Skip to main content

core/stdarch/crates/core_arch/src/aarch64/neon/
generated.rs

1// This code is automatically generated. DO NOT MODIFY.
2//
3// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file:
4//
5// ```
6// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec
7// ```
8#![allow(improper_ctypes)]
9
10#[cfg(test)]
11use stdarch_test::assert_instr;
12
13use super::*;
14
15#[doc = "CRC32-C single round checksum for quad words (64 bits)."]
16#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"]
17#[inline(always)]
18#[target_feature(enable = "crc")]
19#[cfg_attr(test, assert_instr(crc32cx))]
20#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
21pub fn __crc32cd(crc: u32, data: u64) -> u32 {
22    unsafe extern "unadjusted" {
23        #[cfg_attr(
24            any(target_arch = "aarch64", target_arch = "arm64ec"),
25            link_name = "llvm.aarch64.crc32cx"
26        )]
27        fn ___crc32cd(crc: u32, data: u64) -> u32;
28    }
29    unsafe { ___crc32cd(crc, data) }
30}
31#[doc = "CRC32 single round checksum for quad words (64 bits)."]
32#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"]
33#[inline(always)]
34#[target_feature(enable = "crc")]
35#[cfg_attr(test, assert_instr(crc32x))]
36#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
37pub fn __crc32d(crc: u32, data: u64) -> u32 {
38    unsafe extern "unadjusted" {
39        #[cfg_attr(
40            any(target_arch = "aarch64", target_arch = "arm64ec"),
41            link_name = "llvm.aarch64.crc32x"
42        )]
43        fn ___crc32d(crc: u32, data: u64) -> u32;
44    }
45    unsafe { ___crc32d(crc, data) }
46}
47#[doc = "Floating-point JavaScript convert to signed fixed-point, rounding toward zero"]
48#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__jcvt)"]
49#[inline(always)]
50#[target_feature(enable = "jsconv")]
51#[cfg_attr(test, assert_instr(fjcvtzs))]
52#[stable(feature = "stdarch_aarch64_jscvt", since = "CURRENT_RUSTC_VERSION")]
53pub fn __jcvt(a: f64) -> i32 {
54    unsafe extern "unadjusted" {
55        #[cfg_attr(
56            any(target_arch = "aarch64", target_arch = "arm64ec"),
57            link_name = "llvm.aarch64.fjcvtzs"
58        )]
59        fn ___jcvt(a: f64) -> i32;
60    }
61    unsafe { ___jcvt(a) }
62}
63#[doc = "Signed Absolute difference and Accumulate Long"]
64#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"]
65#[inline(always)]
66#[target_feature(enable = "neon")]
67#[stable(feature = "neon_intrinsics", since = "1.59.0")]
68#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
69pub fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
70    unsafe {
71        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
72        let e: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
73        let f: int8x8_t = vabd_s8(d, e);
74        let f: uint8x8_t = simd_cast(f);
75        simd_add(a, simd_cast(f))
76    }
77}
78#[doc = "Signed Absolute difference and Accumulate Long"]
79#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"]
80#[inline(always)]
81#[target_feature(enable = "neon")]
82#[stable(feature = "neon_intrinsics", since = "1.59.0")]
83#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
84pub fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
85    unsafe {
86        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
87        let e: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
88        let f: int16x4_t = vabd_s16(d, e);
89        let f: uint16x4_t = simd_cast(f);
90        simd_add(a, simd_cast(f))
91    }
92}
93#[doc = "Signed Absolute difference and Accumulate Long"]
94#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"]
95#[inline(always)]
96#[target_feature(enable = "neon")]
97#[stable(feature = "neon_intrinsics", since = "1.59.0")]
98#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
99pub fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
100    unsafe {
101        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
102        let e: int32x2_t = simd_shuffle!(c, c, [2, 3]);
103        let f: int32x2_t = vabd_s32(d, e);
104        let f: uint32x2_t = simd_cast(f);
105        simd_add(a, simd_cast(f))
106    }
107}
108#[doc = "Unsigned Absolute difference and Accumulate Long"]
109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"]
110#[inline(always)]
111#[target_feature(enable = "neon")]
112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
113#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
114pub fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
115    unsafe {
116        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
117        let e: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
118        let f: uint8x8_t = vabd_u8(d, e);
119        simd_add(a, simd_cast(f))
120    }
121}
122#[doc = "Unsigned Absolute difference and Accumulate Long"]
123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"]
124#[inline(always)]
125#[target_feature(enable = "neon")]
126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
127#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
128pub fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
129    unsafe {
130        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
131        let e: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
132        let f: uint16x4_t = vabd_u16(d, e);
133        simd_add(a, simd_cast(f))
134    }
135}
136#[doc = "Unsigned Absolute difference and Accumulate Long"]
137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"]
138#[inline(always)]
139#[target_feature(enable = "neon")]
140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
141#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
142pub fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
143    unsafe {
144        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
145        let e: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
146        let f: uint32x2_t = vabd_u32(d, e);
147        simd_add(a, simd_cast(f))
148    }
149}
150#[doc = "Absolute difference between the arguments of Floating"]
151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)"]
152#[inline(always)]
153#[target_feature(enable = "neon")]
154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
155#[cfg_attr(test, assert_instr(fabd))]
156pub fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
157    unsafe extern "unadjusted" {
158        #[cfg_attr(
159            any(target_arch = "aarch64", target_arch = "arm64ec"),
160            link_name = "llvm.aarch64.neon.fabd.v1f64"
161        )]
162        fn _vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
163    }
164    unsafe { _vabd_f64(a, b) }
165}
166#[doc = "Absolute difference between the arguments of Floating"]
167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"]
168#[inline(always)]
169#[target_feature(enable = "neon")]
170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
171#[cfg_attr(test, assert_instr(fabd))]
172pub fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
173    unsafe extern "unadjusted" {
174        #[cfg_attr(
175            any(target_arch = "aarch64", target_arch = "arm64ec"),
176            link_name = "llvm.aarch64.neon.fabd.v2f64"
177        )]
178        fn _vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
179    }
180    unsafe { _vabdq_f64(a, b) }
181}
182#[doc = "Floating-point absolute difference"]
183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)"]
184#[inline(always)]
185#[target_feature(enable = "neon")]
186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
187#[cfg_attr(test, assert_instr(fabd))]
188pub fn vabdd_f64(a: f64, b: f64) -> f64 {
189    unsafe { simd_extract!(vabd_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
190}
191#[doc = "Floating-point absolute difference"]
192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)"]
193#[inline(always)]
194#[target_feature(enable = "neon")]
195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
196#[cfg_attr(test, assert_instr(fabd))]
197pub fn vabds_f32(a: f32, b: f32) -> f32 {
198    unsafe { simd_extract!(vabd_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
199}
200#[doc = "Floating-point absolute difference"]
201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdh_f16)"]
202#[inline(always)]
203#[target_feature(enable = "neon,fp16")]
204#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
205#[cfg(not(target_arch = "arm64ec"))]
206#[cfg_attr(test, assert_instr(fabd))]
207pub fn vabdh_f16(a: f16, b: f16) -> f16 {
208    unsafe { simd_extract!(vabd_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
209}
210#[doc = "Signed Absolute difference Long"]
211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"]
212#[inline(always)]
213#[target_feature(enable = "neon")]
214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
215#[cfg_attr(test, assert_instr(sabdl2))]
216pub fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
217    unsafe {
218        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
219        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
220        let e: uint16x4_t = simd_cast(vabd_s16(c, d));
221        simd_cast(e)
222    }
223}
224#[doc = "Signed Absolute difference Long"]
225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"]
226#[inline(always)]
227#[target_feature(enable = "neon")]
228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
229#[cfg_attr(test, assert_instr(sabdl2))]
230pub fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
231    unsafe {
232        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
233        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
234        let e: uint32x2_t = simd_cast(vabd_s32(c, d));
235        simd_cast(e)
236    }
237}
238#[doc = "Signed Absolute difference Long"]
239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"]
240#[inline(always)]
241#[target_feature(enable = "neon")]
242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
243#[cfg_attr(test, assert_instr(sabdl2))]
244pub fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
245    unsafe {
246        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
247        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
248        let e: uint8x8_t = simd_cast(vabd_s8(c, d));
249        simd_cast(e)
250    }
251}
252#[doc = "Unsigned Absolute difference Long"]
253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"]
254#[inline(always)]
255#[target_feature(enable = "neon")]
256#[cfg_attr(test, assert_instr(uabdl2))]
257#[stable(feature = "neon_intrinsics", since = "1.59.0")]
258pub fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
259    unsafe {
260        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
261        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
262        simd_cast(vabd_u8(c, d))
263    }
264}
265#[doc = "Unsigned Absolute difference Long"]
266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"]
267#[inline(always)]
268#[target_feature(enable = "neon")]
269#[cfg_attr(test, assert_instr(uabdl2))]
270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
271pub fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
272    unsafe {
273        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
274        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
275        simd_cast(vabd_u16(c, d))
276    }
277}
278#[doc = "Unsigned Absolute difference Long"]
279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"]
280#[inline(always)]
281#[target_feature(enable = "neon")]
282#[cfg_attr(test, assert_instr(uabdl2))]
283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
284pub fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
285    unsafe {
286        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
287        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
288        simd_cast(vabd_u32(c, d))
289    }
290}
291#[doc = "Floating-point absolute value"]
292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)"]
293#[inline(always)]
294#[target_feature(enable = "neon")]
295#[cfg_attr(test, assert_instr(fabs))]
296#[stable(feature = "neon_intrinsics", since = "1.59.0")]
297pub fn vabs_f64(a: float64x1_t) -> float64x1_t {
298    unsafe { simd_fabs(a) }
299}
300#[doc = "Floating-point absolute value"]
301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"]
302#[inline(always)]
303#[target_feature(enable = "neon")]
304#[cfg_attr(test, assert_instr(fabs))]
305#[stable(feature = "neon_intrinsics", since = "1.59.0")]
306pub fn vabsq_f64(a: float64x2_t) -> float64x2_t {
307    unsafe { simd_fabs(a) }
308}
309#[doc = "Absolute Value (wrapping)."]
310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s64)"]
311#[inline(always)]
312#[target_feature(enable = "neon")]
313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
314#[cfg_attr(test, assert_instr(abs))]
315pub fn vabs_s64(a: int64x1_t) -> int64x1_t {
316    unsafe {
317        let neg: int64x1_t = simd_neg(a);
318        let mask: int64x1_t = simd_ge(a, neg);
319        simd_select(mask, a, neg)
320    }
321}
322#[doc = "Absolute Value (wrapping)."]
323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"]
324#[inline(always)]
325#[target_feature(enable = "neon")]
326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
327#[cfg_attr(test, assert_instr(abs))]
328pub fn vabsq_s64(a: int64x2_t) -> int64x2_t {
329    unsafe {
330        let neg: int64x2_t = simd_neg(a);
331        let mask: int64x2_t = simd_ge(a, neg);
332        simd_select(mask, a, neg)
333    }
334}
335#[doc = "Absolute Value (wrapping)."]
336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsd_s64)"]
337#[inline(always)]
338#[target_feature(enable = "neon")]
339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
340#[cfg_attr(test, assert_instr(abs))]
341pub fn vabsd_s64(a: i64) -> i64 {
342    unsafe extern "unadjusted" {
343        #[cfg_attr(
344            any(target_arch = "aarch64", target_arch = "arm64ec"),
345            link_name = "llvm.aarch64.neon.abs.i64"
346        )]
347        fn _vabsd_s64(a: i64) -> i64;
348    }
349    unsafe { _vabsd_s64(a) }
350}
351#[doc = "Add"]
352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"]
353#[inline(always)]
354#[target_feature(enable = "neon")]
355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
356#[cfg_attr(test, assert_instr(nop))]
357pub fn vaddd_s64(a: i64, b: i64) -> i64 {
358    a.wrapping_add(b)
359}
360#[doc = "Add"]
361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)"]
362#[inline(always)]
363#[target_feature(enable = "neon")]
364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
365#[cfg_attr(test, assert_instr(nop))]
366pub fn vaddd_u64(a: u64, b: u64) -> u64 {
367    a.wrapping_add(b)
368}
369#[doc = "Signed Add Long across Vector"]
370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"]
371#[inline(always)]
372#[target_feature(enable = "neon")]
373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
374#[cfg_attr(test, assert_instr(saddlv))]
375pub fn vaddlv_s16(a: int16x4_t) -> i32 {
376    unsafe extern "unadjusted" {
377        #[cfg_attr(
378            any(target_arch = "aarch64", target_arch = "arm64ec"),
379            link_name = "llvm.aarch64.neon.saddlv.i32.v4i16"
380        )]
381        fn _vaddlv_s16(a: int16x4_t) -> i32;
382    }
383    unsafe { _vaddlv_s16(a) }
384}
385#[doc = "Signed Add Long across Vector"]
386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"]
387#[inline(always)]
388#[target_feature(enable = "neon")]
389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
390#[cfg_attr(test, assert_instr(saddlv))]
391pub fn vaddlvq_s16(a: int16x8_t) -> i32 {
392    unsafe extern "unadjusted" {
393        #[cfg_attr(
394            any(target_arch = "aarch64", target_arch = "arm64ec"),
395            link_name = "llvm.aarch64.neon.saddlv.i32.v8i16"
396        )]
397        fn _vaddlvq_s16(a: int16x8_t) -> i32;
398    }
399    unsafe { _vaddlvq_s16(a) }
400}
401#[doc = "Signed Add Long across Vector"]
402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"]
403#[inline(always)]
404#[target_feature(enable = "neon")]
405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
406#[cfg_attr(test, assert_instr(saddlv))]
407pub fn vaddlvq_s32(a: int32x4_t) -> i64 {
408    unsafe extern "unadjusted" {
409        #[cfg_attr(
410            any(target_arch = "aarch64", target_arch = "arm64ec"),
411            link_name = "llvm.aarch64.neon.saddlv.i64.v4i32"
412        )]
413        fn _vaddlvq_s32(a: int32x4_t) -> i64;
414    }
415    unsafe { _vaddlvq_s32(a) }
416}
417#[doc = "Signed Add Long across Vector"]
418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"]
419#[inline(always)]
420#[target_feature(enable = "neon")]
421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
422#[cfg_attr(test, assert_instr(saddlp))]
423pub fn vaddlv_s32(a: int32x2_t) -> i64 {
424    unsafe extern "unadjusted" {
425        #[cfg_attr(
426            any(target_arch = "aarch64", target_arch = "arm64ec"),
427            link_name = "llvm.aarch64.neon.saddlv.i64.v2i32"
428        )]
429        fn _vaddlv_s32(a: int32x2_t) -> i64;
430    }
431    unsafe { _vaddlv_s32(a) }
432}
433#[doc = "Signed Add Long across Vector"]
434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"]
435#[inline(always)]
436#[target_feature(enable = "neon")]
437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
438#[cfg_attr(test, assert_instr(saddlv))]
439pub fn vaddlv_s8(a: int8x8_t) -> i16 {
440    unsafe extern "unadjusted" {
441        #[cfg_attr(
442            any(target_arch = "aarch64", target_arch = "arm64ec"),
443            link_name = "llvm.aarch64.neon.saddlv.i32.v8i8"
444        )]
445        fn _vaddlv_s8(a: int8x8_t) -> i32;
446    }
447    unsafe { _vaddlv_s8(a) as i16 }
448}
449#[doc = "Signed Add Long across Vector"]
450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"]
451#[inline(always)]
452#[target_feature(enable = "neon")]
453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
454#[cfg_attr(test, assert_instr(saddlv))]
455pub fn vaddlvq_s8(a: int8x16_t) -> i16 {
456    unsafe extern "unadjusted" {
457        #[cfg_attr(
458            any(target_arch = "aarch64", target_arch = "arm64ec"),
459            link_name = "llvm.aarch64.neon.saddlv.i32.v16i8"
460        )]
461        fn _vaddlvq_s8(a: int8x16_t) -> i32;
462    }
463    unsafe { _vaddlvq_s8(a) as i16 }
464}
465#[doc = "Unsigned Add Long across Vector"]
466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"]
467#[inline(always)]
468#[target_feature(enable = "neon")]
469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
470#[cfg_attr(test, assert_instr(uaddlv))]
471pub fn vaddlv_u16(a: uint16x4_t) -> u32 {
472    unsafe extern "unadjusted" {
473        #[cfg_attr(
474            any(target_arch = "aarch64", target_arch = "arm64ec"),
475            link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16"
476        )]
477        fn _vaddlv_u16(a: uint16x4_t) -> u32;
478    }
479    unsafe { _vaddlv_u16(a) }
480}
481#[doc = "Unsigned Add Long across Vector"]
482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"]
483#[inline(always)]
484#[target_feature(enable = "neon")]
485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
486#[cfg_attr(test, assert_instr(uaddlv))]
487pub fn vaddlvq_u16(a: uint16x8_t) -> u32 {
488    unsafe extern "unadjusted" {
489        #[cfg_attr(
490            any(target_arch = "aarch64", target_arch = "arm64ec"),
491            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16"
492        )]
493        fn _vaddlvq_u16(a: uint16x8_t) -> u32;
494    }
495    unsafe { _vaddlvq_u16(a) }
496}
497#[doc = "Unsigned Add Long across Vector"]
498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"]
499#[inline(always)]
500#[target_feature(enable = "neon")]
501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
502#[cfg_attr(test, assert_instr(uaddlv))]
503pub fn vaddlvq_u32(a: uint32x4_t) -> u64 {
504    unsafe extern "unadjusted" {
505        #[cfg_attr(
506            any(target_arch = "aarch64", target_arch = "arm64ec"),
507            link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32"
508        )]
509        fn _vaddlvq_u32(a: uint32x4_t) -> u64;
510    }
511    unsafe { _vaddlvq_u32(a) }
512}
513#[doc = "Unsigned Add Long across Vector"]
514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"]
515#[inline(always)]
516#[target_feature(enable = "neon")]
517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
518#[cfg_attr(test, assert_instr(uaddlp))]
519pub fn vaddlv_u32(a: uint32x2_t) -> u64 {
520    unsafe extern "unadjusted" {
521        #[cfg_attr(
522            any(target_arch = "aarch64", target_arch = "arm64ec"),
523            link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32"
524        )]
525        fn _vaddlv_u32(a: uint32x2_t) -> u64;
526    }
527    unsafe { _vaddlv_u32(a) }
528}
529#[doc = "Unsigned Add Long across Vector"]
530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"]
531#[inline(always)]
532#[target_feature(enable = "neon")]
533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
534#[cfg_attr(test, assert_instr(uaddlv))]
535pub fn vaddlv_u8(a: uint8x8_t) -> u16 {
536    unsafe extern "unadjusted" {
537        #[cfg_attr(
538            any(target_arch = "aarch64", target_arch = "arm64ec"),
539            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8"
540        )]
541        fn _vaddlv_u8(a: uint8x8_t) -> i32;
542    }
543    unsafe { _vaddlv_u8(a) as u16 }
544}
545#[doc = "Unsigned Add Long across Vector"]
546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"]
547#[inline(always)]
548#[target_feature(enable = "neon")]
549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
550#[cfg_attr(test, assert_instr(uaddlv))]
551pub fn vaddlvq_u8(a: uint8x16_t) -> u16 {
552    unsafe extern "unadjusted" {
553        #[cfg_attr(
554            any(target_arch = "aarch64", target_arch = "arm64ec"),
555            link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8"
556        )]
557        fn _vaddlvq_u8(a: uint8x16_t) -> i32;
558    }
559    unsafe { _vaddlvq_u8(a) as u16 }
560}
561#[doc = "Floating-point add across vector"]
562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"]
563#[inline(always)]
564#[target_feature(enable = "neon")]
565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
566#[cfg_attr(test, assert_instr(faddp))]
567pub fn vaddv_f32(a: float32x2_t) -> f32 {
568    unsafe extern "unadjusted" {
569        #[cfg_attr(
570            any(target_arch = "aarch64", target_arch = "arm64ec"),
571            link_name = "llvm.aarch64.neon.faddv.f32.v2f32"
572        )]
573        fn _vaddv_f32(a: float32x2_t) -> f32;
574    }
575    unsafe { _vaddv_f32(a) }
576}
577#[doc = "Floating-point add across vector"]
578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"]
579#[inline(always)]
580#[target_feature(enable = "neon")]
581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
582#[cfg_attr(test, assert_instr(faddp))]
583pub fn vaddvq_f32(a: float32x4_t) -> f32 {
584    unsafe extern "unadjusted" {
585        #[cfg_attr(
586            any(target_arch = "aarch64", target_arch = "arm64ec"),
587            link_name = "llvm.aarch64.neon.faddv.f32.v4f32"
588        )]
589        fn _vaddvq_f32(a: float32x4_t) -> f32;
590    }
591    unsafe { _vaddvq_f32(a) }
592}
593#[doc = "Floating-point add across vector"]
594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"]
595#[inline(always)]
596#[target_feature(enable = "neon")]
597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
598#[cfg_attr(test, assert_instr(faddp))]
599pub fn vaddvq_f64(a: float64x2_t) -> f64 {
600    unsafe extern "unadjusted" {
601        #[cfg_attr(
602            any(target_arch = "aarch64", target_arch = "arm64ec"),
603            link_name = "llvm.aarch64.neon.faddv.f64.v2f64"
604        )]
605        fn _vaddvq_f64(a: float64x2_t) -> f64;
606    }
607    unsafe { _vaddvq_f64(a) }
608}
609#[doc = "Add across vector"]
610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"]
611#[inline(always)]
612#[target_feature(enable = "neon")]
613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
614#[cfg_attr(test, assert_instr(addp))]
615pub fn vaddv_s32(a: int32x2_t) -> i32 {
616    unsafe { simd_reduce_add_ordered(a, 0) }
617}
618#[doc = "Add across vector"]
619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"]
620#[inline(always)]
621#[target_feature(enable = "neon")]
622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
623#[cfg_attr(test, assert_instr(addv))]
624pub fn vaddv_s8(a: int8x8_t) -> i8 {
625    unsafe { simd_reduce_add_ordered(a, 0) }
626}
627#[doc = "Add across vector"]
628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"]
629#[inline(always)]
630#[target_feature(enable = "neon")]
631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
632#[cfg_attr(test, assert_instr(addv))]
633pub fn vaddvq_s8(a: int8x16_t) -> i8 {
634    unsafe { simd_reduce_add_ordered(a, 0) }
635}
636#[doc = "Add across vector"]
637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"]
638#[inline(always)]
639#[target_feature(enable = "neon")]
640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
641#[cfg_attr(test, assert_instr(addv))]
642pub fn vaddv_s16(a: int16x4_t) -> i16 {
643    unsafe { simd_reduce_add_ordered(a, 0) }
644}
645#[doc = "Add across vector"]
646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"]
647#[inline(always)]
648#[target_feature(enable = "neon")]
649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
650#[cfg_attr(test, assert_instr(addv))]
651pub fn vaddvq_s16(a: int16x8_t) -> i16 {
652    unsafe { simd_reduce_add_ordered(a, 0) }
653}
654#[doc = "Add across vector"]
655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"]
656#[inline(always)]
657#[target_feature(enable = "neon")]
658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
659#[cfg_attr(test, assert_instr(addv))]
660pub fn vaddvq_s32(a: int32x4_t) -> i32 {
661    unsafe { simd_reduce_add_ordered(a, 0) }
662}
663#[doc = "Add across vector"]
664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"]
665#[inline(always)]
666#[target_feature(enable = "neon")]
667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
668#[cfg_attr(test, assert_instr(addp))]
669pub fn vaddv_u32(a: uint32x2_t) -> u32 {
670    unsafe { simd_reduce_add_ordered(a, 0) }
671}
672#[doc = "Add across vector"]
673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"]
674#[inline(always)]
675#[target_feature(enable = "neon")]
676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
677#[cfg_attr(test, assert_instr(addv))]
678pub fn vaddv_u8(a: uint8x8_t) -> u8 {
679    unsafe { simd_reduce_add_ordered(a, 0) }
680}
681#[doc = "Add across vector"]
682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"]
683#[inline(always)]
684#[target_feature(enable = "neon")]
685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
686#[cfg_attr(test, assert_instr(addv))]
687pub fn vaddvq_u8(a: uint8x16_t) -> u8 {
688    unsafe { simd_reduce_add_ordered(a, 0) }
689}
690#[doc = "Add across vector"]
691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"]
692#[inline(always)]
693#[target_feature(enable = "neon")]
694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
695#[cfg_attr(test, assert_instr(addv))]
696pub fn vaddv_u16(a: uint16x4_t) -> u16 {
697    unsafe { simd_reduce_add_ordered(a, 0) }
698}
699#[doc = "Add across vector"]
700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"]
701#[inline(always)]
702#[target_feature(enable = "neon")]
703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
704#[cfg_attr(test, assert_instr(addv))]
705pub fn vaddvq_u16(a: uint16x8_t) -> u16 {
706    unsafe { simd_reduce_add_ordered(a, 0) }
707}
708#[doc = "Add across vector"]
709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"]
710#[inline(always)]
711#[target_feature(enable = "neon")]
712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
713#[cfg_attr(test, assert_instr(addv))]
714pub fn vaddvq_u32(a: uint32x4_t) -> u32 {
715    unsafe { simd_reduce_add_ordered(a, 0) }
716}
717#[doc = "Add across vector"]
718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"]
719#[inline(always)]
720#[target_feature(enable = "neon")]
721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
722#[cfg_attr(test, assert_instr(addp))]
723pub fn vaddvq_s64(a: int64x2_t) -> i64 {
724    unsafe { simd_reduce_add_ordered(a, 0) }
725}
726#[doc = "Add across vector"]
727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"]
728#[inline(always)]
729#[target_feature(enable = "neon")]
730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
731#[cfg_attr(test, assert_instr(addp))]
732pub fn vaddvq_u64(a: uint64x2_t) -> u64 {
733    unsafe { simd_reduce_add_ordered(a, 0) }
734}
735#[doc = "Multi-vector floating-point absolute maximum"]
736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f16)"]
737#[inline(always)]
738#[target_feature(enable = "neon,faminmax")]
739#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
740#[unstable(feature = "faminmax", issue = "137933")]
741pub fn vamax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
742    unsafe extern "unadjusted" {
743        #[cfg_attr(
744            any(target_arch = "aarch64", target_arch = "arm64ec"),
745            link_name = "llvm.aarch64.neon.famax.v4f16"
746        )]
747        fn _vamax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
748    }
749    unsafe { _vamax_f16(a, b) }
750}
751#[doc = "Multi-vector floating-point absolute maximum"]
752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f16)"]
753#[inline(always)]
754#[target_feature(enable = "neon,faminmax")]
755#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
756#[unstable(feature = "faminmax", issue = "137933")]
757pub fn vamaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
758    unsafe extern "unadjusted" {
759        #[cfg_attr(
760            any(target_arch = "aarch64", target_arch = "arm64ec"),
761            link_name = "llvm.aarch64.neon.famax.v8f16"
762        )]
763        fn _vamaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
764    }
765    unsafe { _vamaxq_f16(a, b) }
766}
767#[doc = "Multi-vector floating-point absolute maximum"]
768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f32)"]
769#[inline(always)]
770#[target_feature(enable = "neon,faminmax")]
771#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
772#[unstable(feature = "faminmax", issue = "137933")]
773pub fn vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
774    unsafe extern "unadjusted" {
775        #[cfg_attr(
776            any(target_arch = "aarch64", target_arch = "arm64ec"),
777            link_name = "llvm.aarch64.neon.famax.v2f32"
778        )]
779        fn _vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
780    }
781    unsafe { _vamax_f32(a, b) }
782}
783#[doc = "Multi-vector floating-point absolute maximum"]
784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f32)"]
785#[inline(always)]
786#[target_feature(enable = "neon,faminmax")]
787#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
788#[unstable(feature = "faminmax", issue = "137933")]
789pub fn vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
790    unsafe extern "unadjusted" {
791        #[cfg_attr(
792            any(target_arch = "aarch64", target_arch = "arm64ec"),
793            link_name = "llvm.aarch64.neon.famax.v4f32"
794        )]
795        fn _vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
796    }
797    unsafe { _vamaxq_f32(a, b) }
798}
799#[doc = "Multi-vector floating-point absolute maximum"]
800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f64)"]
801#[inline(always)]
802#[target_feature(enable = "neon,faminmax")]
803#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
804#[unstable(feature = "faminmax", issue = "137933")]
805pub fn vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
806    unsafe extern "unadjusted" {
807        #[cfg_attr(
808            any(target_arch = "aarch64", target_arch = "arm64ec"),
809            link_name = "llvm.aarch64.neon.famax.v2f64"
810        )]
811        fn _vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
812    }
813    unsafe { _vamaxq_f64(a, b) }
814}
815#[doc = "Multi-vector floating-point absolute minimum"]
816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f16)"]
817#[inline(always)]
818#[target_feature(enable = "neon,faminmax")]
819#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
820#[unstable(feature = "faminmax", issue = "137933")]
821pub fn vamin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
822    unsafe extern "unadjusted" {
823        #[cfg_attr(
824            any(target_arch = "aarch64", target_arch = "arm64ec"),
825            link_name = "llvm.aarch64.neon.famin.v4f16"
826        )]
827        fn _vamin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
828    }
829    unsafe { _vamin_f16(a, b) }
830}
831#[doc = "Multi-vector floating-point absolute minimum"]
832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f16)"]
833#[inline(always)]
834#[target_feature(enable = "neon,faminmax")]
835#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
836#[unstable(feature = "faminmax", issue = "137933")]
837pub fn vaminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
838    unsafe extern "unadjusted" {
839        #[cfg_attr(
840            any(target_arch = "aarch64", target_arch = "arm64ec"),
841            link_name = "llvm.aarch64.neon.famin.v8f16"
842        )]
843        fn _vaminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
844    }
845    unsafe { _vaminq_f16(a, b) }
846}
847#[doc = "Multi-vector floating-point absolute minimum"]
848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f32)"]
849#[inline(always)]
850#[target_feature(enable = "neon,faminmax")]
851#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
852#[unstable(feature = "faminmax", issue = "137933")]
853pub fn vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
854    unsafe extern "unadjusted" {
855        #[cfg_attr(
856            any(target_arch = "aarch64", target_arch = "arm64ec"),
857            link_name = "llvm.aarch64.neon.famin.v2f32"
858        )]
859        fn _vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
860    }
861    unsafe { _vamin_f32(a, b) }
862}
863#[doc = "Multi-vector floating-point absolute minimum"]
864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f32)"]
865#[inline(always)]
866#[target_feature(enable = "neon,faminmax")]
867#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
868#[unstable(feature = "faminmax", issue = "137933")]
869pub fn vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
870    unsafe extern "unadjusted" {
871        #[cfg_attr(
872            any(target_arch = "aarch64", target_arch = "arm64ec"),
873            link_name = "llvm.aarch64.neon.famin.v4f32"
874        )]
875        fn _vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
876    }
877    unsafe { _vaminq_f32(a, b) }
878}
879#[doc = "Multi-vector floating-point absolute minimum"]
880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f64)"]
881#[inline(always)]
882#[target_feature(enable = "neon,faminmax")]
883#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
884#[unstable(feature = "faminmax", issue = "137933")]
885pub fn vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
886    unsafe extern "unadjusted" {
887        #[cfg_attr(
888            any(target_arch = "aarch64", target_arch = "arm64ec"),
889            link_name = "llvm.aarch64.neon.famin.v2f64"
890        )]
891        fn _vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
892    }
893    unsafe { _vaminq_f64(a, b) }
894}
895#[doc = "Bit clear and exclusive OR"]
896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"]
897#[inline(always)]
898#[target_feature(enable = "neon,sha3")]
899#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
900#[cfg_attr(test, assert_instr(bcax))]
901pub fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
902    unsafe extern "unadjusted" {
903        #[cfg_attr(
904            any(target_arch = "aarch64", target_arch = "arm64ec"),
905            link_name = "llvm.aarch64.crypto.bcaxs.v16i8"
906        )]
907        fn _vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
908    }
909    unsafe { _vbcaxq_s8(a, b, c) }
910}
911#[doc = "Bit clear and exclusive OR"]
912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"]
913#[inline(always)]
914#[target_feature(enable = "neon,sha3")]
915#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
916#[cfg_attr(test, assert_instr(bcax))]
917pub fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
918    unsafe extern "unadjusted" {
919        #[cfg_attr(
920            any(target_arch = "aarch64", target_arch = "arm64ec"),
921            link_name = "llvm.aarch64.crypto.bcaxs.v8i16"
922        )]
923        fn _vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
924    }
925    unsafe { _vbcaxq_s16(a, b, c) }
926}
927#[doc = "Bit clear and exclusive OR"]
928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"]
929#[inline(always)]
930#[target_feature(enable = "neon,sha3")]
931#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
932#[cfg_attr(test, assert_instr(bcax))]
933pub fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
934    unsafe extern "unadjusted" {
935        #[cfg_attr(
936            any(target_arch = "aarch64", target_arch = "arm64ec"),
937            link_name = "llvm.aarch64.crypto.bcaxs.v4i32"
938        )]
939        fn _vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
940    }
941    unsafe { _vbcaxq_s32(a, b, c) }
942}
943#[doc = "Bit clear and exclusive OR"]
944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"]
945#[inline(always)]
946#[target_feature(enable = "neon,sha3")]
947#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
948#[cfg_attr(test, assert_instr(bcax))]
949pub fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
950    unsafe extern "unadjusted" {
951        #[cfg_attr(
952            any(target_arch = "aarch64", target_arch = "arm64ec"),
953            link_name = "llvm.aarch64.crypto.bcaxs.v2i64"
954        )]
955        fn _vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
956    }
957    unsafe { _vbcaxq_s64(a, b, c) }
958}
959#[doc = "Bit clear and exclusive OR"]
960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"]
961#[inline(always)]
962#[target_feature(enable = "neon,sha3")]
963#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
964#[cfg_attr(test, assert_instr(bcax))]
965pub fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
966    unsafe extern "unadjusted" {
967        #[cfg_attr(
968            any(target_arch = "aarch64", target_arch = "arm64ec"),
969            link_name = "llvm.aarch64.crypto.bcaxu.v16i8"
970        )]
971        fn _vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
972    }
973    unsafe { _vbcaxq_u8(a, b, c) }
974}
975#[doc = "Bit clear and exclusive OR"]
976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"]
977#[inline(always)]
978#[target_feature(enable = "neon,sha3")]
979#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
980#[cfg_attr(test, assert_instr(bcax))]
981pub fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
982    unsafe extern "unadjusted" {
983        #[cfg_attr(
984            any(target_arch = "aarch64", target_arch = "arm64ec"),
985            link_name = "llvm.aarch64.crypto.bcaxu.v8i16"
986        )]
987        fn _vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
988    }
989    unsafe { _vbcaxq_u16(a, b, c) }
990}
991#[doc = "Bit clear and exclusive OR"]
992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"]
993#[inline(always)]
994#[target_feature(enable = "neon,sha3")]
995#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
996#[cfg_attr(test, assert_instr(bcax))]
997pub fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
998    unsafe extern "unadjusted" {
999        #[cfg_attr(
1000            any(target_arch = "aarch64", target_arch = "arm64ec"),
1001            link_name = "llvm.aarch64.crypto.bcaxu.v4i32"
1002        )]
1003        fn _vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
1004    }
1005    unsafe { _vbcaxq_u32(a, b, c) }
1006}
1007#[doc = "Bit clear and exclusive OR"]
1008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"]
1009#[inline(always)]
1010#[target_feature(enable = "neon,sha3")]
1011#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1012#[cfg_attr(test, assert_instr(bcax))]
1013pub fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
1014    unsafe extern "unadjusted" {
1015        #[cfg_attr(
1016            any(target_arch = "aarch64", target_arch = "arm64ec"),
1017            link_name = "llvm.aarch64.crypto.bcaxu.v2i64"
1018        )]
1019        fn _vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
1020    }
1021    unsafe { _vbcaxq_u64(a, b, c) }
1022}
1023#[doc = "Floating-point complex add"]
1024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f16)"]
1025#[inline(always)]
1026#[target_feature(enable = "neon,fp16")]
1027#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1028#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1029#[cfg(not(target_arch = "arm64ec"))]
1030#[cfg_attr(test, assert_instr(fcadd))]
1031pub fn vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1032    unsafe extern "unadjusted" {
1033        #[cfg_attr(
1034            any(target_arch = "aarch64", target_arch = "arm64ec"),
1035            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f16"
1036        )]
1037        fn _vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1038    }
1039    unsafe { _vcadd_rot270_f16(a, b) }
1040}
1041#[doc = "Floating-point complex add"]
1042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f16)"]
1043#[inline(always)]
1044#[target_feature(enable = "neon,fp16")]
1045#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1046#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1047#[cfg(not(target_arch = "arm64ec"))]
1048#[cfg_attr(test, assert_instr(fcadd))]
1049pub fn vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1050    unsafe extern "unadjusted" {
1051        #[cfg_attr(
1052            any(target_arch = "aarch64", target_arch = "arm64ec"),
1053            link_name = "llvm.aarch64.neon.vcadd.rot270.v8f16"
1054        )]
1055        fn _vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1056    }
1057    unsafe { _vcaddq_rot270_f16(a, b) }
1058}
1059#[doc = "Floating-point complex add"]
1060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"]
1061#[inline(always)]
1062#[target_feature(enable = "neon,fcma")]
1063#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1064#[cfg_attr(test, assert_instr(fcadd))]
1065pub fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1066    unsafe extern "unadjusted" {
1067        #[cfg_attr(
1068            any(target_arch = "aarch64", target_arch = "arm64ec"),
1069            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32"
1070        )]
1071        fn _vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1072    }
1073    unsafe { _vcadd_rot270_f32(a, b) }
1074}
1075#[doc = "Floating-point complex add"]
1076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"]
1077#[inline(always)]
1078#[target_feature(enable = "neon,fcma")]
1079#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1080#[cfg_attr(test, assert_instr(fcadd))]
1081pub fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1082    unsafe extern "unadjusted" {
1083        #[cfg_attr(
1084            any(target_arch = "aarch64", target_arch = "arm64ec"),
1085            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32"
1086        )]
1087        fn _vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1088    }
1089    unsafe { _vcaddq_rot270_f32(a, b) }
1090}
1091#[doc = "Floating-point complex add"]
1092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"]
1093#[inline(always)]
1094#[target_feature(enable = "neon,fcma")]
1095#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1096#[cfg_attr(test, assert_instr(fcadd))]
1097pub fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1098    unsafe extern "unadjusted" {
1099        #[cfg_attr(
1100            any(target_arch = "aarch64", target_arch = "arm64ec"),
1101            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64"
1102        )]
1103        fn _vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1104    }
1105    unsafe { _vcaddq_rot270_f64(a, b) }
1106}
1107#[doc = "Floating-point complex add"]
1108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f16)"]
1109#[inline(always)]
1110#[target_feature(enable = "neon,fp16")]
1111#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1112#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1113#[cfg(not(target_arch = "arm64ec"))]
1114#[cfg_attr(test, assert_instr(fcadd))]
1115pub fn vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1116    unsafe extern "unadjusted" {
1117        #[cfg_attr(
1118            any(target_arch = "aarch64", target_arch = "arm64ec"),
1119            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f16"
1120        )]
1121        fn _vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1122    }
1123    unsafe { _vcadd_rot90_f16(a, b) }
1124}
1125#[doc = "Floating-point complex add"]
1126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f16)"]
1127#[inline(always)]
1128#[target_feature(enable = "neon,fp16")]
1129#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1130#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1131#[cfg(not(target_arch = "arm64ec"))]
1132#[cfg_attr(test, assert_instr(fcadd))]
1133pub fn vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1134    unsafe extern "unadjusted" {
1135        #[cfg_attr(
1136            any(target_arch = "aarch64", target_arch = "arm64ec"),
1137            link_name = "llvm.aarch64.neon.vcadd.rot90.v8f16"
1138        )]
1139        fn _vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1140    }
1141    unsafe { _vcaddq_rot90_f16(a, b) }
1142}
1143#[doc = "Floating-point complex add"]
1144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"]
1145#[inline(always)]
1146#[target_feature(enable = "neon,fcma")]
1147#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1148#[cfg_attr(test, assert_instr(fcadd))]
1149pub fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1150    unsafe extern "unadjusted" {
1151        #[cfg_attr(
1152            any(target_arch = "aarch64", target_arch = "arm64ec"),
1153            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32"
1154        )]
1155        fn _vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1156    }
1157    unsafe { _vcadd_rot90_f32(a, b) }
1158}
1159#[doc = "Floating-point complex add"]
1160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"]
1161#[inline(always)]
1162#[target_feature(enable = "neon,fcma")]
1163#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1164#[cfg_attr(test, assert_instr(fcadd))]
1165pub fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1166    unsafe extern "unadjusted" {
1167        #[cfg_attr(
1168            any(target_arch = "aarch64", target_arch = "arm64ec"),
1169            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32"
1170        )]
1171        fn _vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1172    }
1173    unsafe { _vcaddq_rot90_f32(a, b) }
1174}
1175#[doc = "Floating-point complex add"]
1176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"]
1177#[inline(always)]
1178#[target_feature(enable = "neon,fcma")]
1179#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1180#[cfg_attr(test, assert_instr(fcadd))]
1181pub fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1182    unsafe extern "unadjusted" {
1183        #[cfg_attr(
1184            any(target_arch = "aarch64", target_arch = "arm64ec"),
1185            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64"
1186        )]
1187        fn _vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1188    }
1189    unsafe { _vcaddq_rot90_f64(a, b) }
1190}
1191#[doc = "Floating-point absolute compare greater than or equal"]
1192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"]
1193#[inline(always)]
1194#[target_feature(enable = "neon")]
1195#[cfg_attr(test, assert_instr(facge))]
1196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1197pub fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1198    unsafe extern "unadjusted" {
1199        #[cfg_attr(
1200            any(target_arch = "aarch64", target_arch = "arm64ec"),
1201            link_name = "llvm.aarch64.neon.facge.v1i64.v1f64"
1202        )]
1203        fn _vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1204    }
1205    unsafe { _vcage_f64(a, b) }
1206}
1207#[doc = "Floating-point absolute compare greater than or equal"]
1208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"]
1209#[inline(always)]
1210#[target_feature(enable = "neon")]
1211#[cfg_attr(test, assert_instr(facge))]
1212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1213pub fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1214    unsafe extern "unadjusted" {
1215        #[cfg_attr(
1216            any(target_arch = "aarch64", target_arch = "arm64ec"),
1217            link_name = "llvm.aarch64.neon.facge.v2i64.v2f64"
1218        )]
1219        fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1220    }
1221    unsafe { _vcageq_f64(a, b) }
1222}
1223#[doc = "Floating-point absolute compare greater than or equal"]
1224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"]
1225#[inline(always)]
1226#[target_feature(enable = "neon")]
1227#[cfg_attr(test, assert_instr(facge))]
1228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1229pub fn vcaged_f64(a: f64, b: f64) -> u64 {
1230    unsafe extern "unadjusted" {
1231        #[cfg_attr(
1232            any(target_arch = "aarch64", target_arch = "arm64ec"),
1233            link_name = "llvm.aarch64.neon.facge.i64.f64"
1234        )]
1235        fn _vcaged_f64(a: f64, b: f64) -> u64;
1236    }
1237    unsafe { _vcaged_f64(a, b) }
1238}
1239#[doc = "Floating-point absolute compare greater than or equal"]
1240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)"]
1241#[inline(always)]
1242#[target_feature(enable = "neon")]
1243#[cfg_attr(test, assert_instr(facge))]
1244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1245pub fn vcages_f32(a: f32, b: f32) -> u32 {
1246    unsafe extern "unadjusted" {
1247        #[cfg_attr(
1248            any(target_arch = "aarch64", target_arch = "arm64ec"),
1249            link_name = "llvm.aarch64.neon.facge.i32.f32"
1250        )]
1251        fn _vcages_f32(a: f32, b: f32) -> u32;
1252    }
1253    unsafe { _vcages_f32(a, b) }
1254}
1255#[doc = "Floating-point absolute compare greater than or equal"]
1256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageh_f16)"]
1257#[inline(always)]
1258#[cfg_attr(test, assert_instr(facge))]
1259#[target_feature(enable = "neon,fp16")]
1260#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1261#[cfg(not(target_arch = "arm64ec"))]
1262pub fn vcageh_f16(a: f16, b: f16) -> u16 {
1263    unsafe extern "unadjusted" {
1264        #[cfg_attr(
1265            any(target_arch = "aarch64", target_arch = "arm64ec"),
1266            link_name = "llvm.aarch64.neon.facge.i32.f16"
1267        )]
1268        fn _vcageh_f16(a: f16, b: f16) -> i32;
1269    }
1270    unsafe { _vcageh_f16(a, b) as u16 }
1271}
1272#[doc = "Floating-point absolute compare greater than"]
1273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)"]
1274#[inline(always)]
1275#[target_feature(enable = "neon")]
1276#[cfg_attr(test, assert_instr(facgt))]
1277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1278pub fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1279    unsafe extern "unadjusted" {
1280        #[cfg_attr(
1281            any(target_arch = "aarch64", target_arch = "arm64ec"),
1282            link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64"
1283        )]
1284        fn _vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1285    }
1286    unsafe { _vcagt_f64(a, b) }
1287}
1288#[doc = "Floating-point absolute compare greater than"]
1289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"]
1290#[inline(always)]
1291#[target_feature(enable = "neon")]
1292#[cfg_attr(test, assert_instr(facgt))]
1293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1294pub fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1295    unsafe extern "unadjusted" {
1296        #[cfg_attr(
1297            any(target_arch = "aarch64", target_arch = "arm64ec"),
1298            link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64"
1299        )]
1300        fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1301    }
1302    unsafe { _vcagtq_f64(a, b) }
1303}
1304#[doc = "Floating-point absolute compare greater than"]
1305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"]
1306#[inline(always)]
1307#[target_feature(enable = "neon")]
1308#[cfg_attr(test, assert_instr(facgt))]
1309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1310pub fn vcagtd_f64(a: f64, b: f64) -> u64 {
1311    unsafe extern "unadjusted" {
1312        #[cfg_attr(
1313            any(target_arch = "aarch64", target_arch = "arm64ec"),
1314            link_name = "llvm.aarch64.neon.facgt.i64.f64"
1315        )]
1316        fn _vcagtd_f64(a: f64, b: f64) -> u64;
1317    }
1318    unsafe { _vcagtd_f64(a, b) }
1319}
1320#[doc = "Floating-point absolute compare greater than"]
1321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)"]
1322#[inline(always)]
1323#[target_feature(enable = "neon")]
1324#[cfg_attr(test, assert_instr(facgt))]
1325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1326pub fn vcagts_f32(a: f32, b: f32) -> u32 {
1327    unsafe extern "unadjusted" {
1328        #[cfg_attr(
1329            any(target_arch = "aarch64", target_arch = "arm64ec"),
1330            link_name = "llvm.aarch64.neon.facgt.i32.f32"
1331        )]
1332        fn _vcagts_f32(a: f32, b: f32) -> u32;
1333    }
1334    unsafe { _vcagts_f32(a, b) }
1335}
1336#[doc = "Floating-point absolute compare greater than"]
1337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagth_f16)"]
1338#[inline(always)]
1339#[cfg_attr(test, assert_instr(facgt))]
1340#[target_feature(enable = "neon,fp16")]
1341#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1342#[cfg(not(target_arch = "arm64ec"))]
1343pub fn vcagth_f16(a: f16, b: f16) -> u16 {
1344    unsafe extern "unadjusted" {
1345        #[cfg_attr(
1346            any(target_arch = "aarch64", target_arch = "arm64ec"),
1347            link_name = "llvm.aarch64.neon.facgt.i32.f16"
1348        )]
1349        fn _vcagth_f16(a: f16, b: f16) -> i32;
1350    }
1351    unsafe { _vcagth_f16(a, b) as u16 }
1352}
1353#[doc = "Floating-point absolute compare less than or equal"]
1354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)"]
1355#[inline(always)]
1356#[target_feature(enable = "neon")]
1357#[cfg_attr(test, assert_instr(facge))]
1358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1359pub fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1360    vcage_f64(b, a)
1361}
1362#[doc = "Floating-point absolute compare less than or equal"]
1363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"]
1364#[inline(always)]
1365#[target_feature(enable = "neon")]
1366#[cfg_attr(test, assert_instr(facge))]
1367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1368pub fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1369    vcageq_f64(b, a)
1370}
1371#[doc = "Floating-point absolute compare less than or equal"]
1372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)"]
1373#[inline(always)]
1374#[target_feature(enable = "neon")]
1375#[cfg_attr(test, assert_instr(facge))]
1376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1377pub fn vcaled_f64(a: f64, b: f64) -> u64 {
1378    vcaged_f64(b, a)
1379}
1380#[doc = "Floating-point absolute compare less than or equal"]
1381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)"]
1382#[inline(always)]
1383#[target_feature(enable = "neon")]
1384#[cfg_attr(test, assert_instr(facge))]
1385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1386pub fn vcales_f32(a: f32, b: f32) -> u32 {
1387    vcages_f32(b, a)
1388}
1389#[doc = "Floating-point absolute compare less than or equal"]
1390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleh_f16)"]
1391#[inline(always)]
1392#[cfg_attr(test, assert_instr(facge))]
1393#[target_feature(enable = "neon,fp16")]
1394#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1395#[cfg(not(target_arch = "arm64ec"))]
1396pub fn vcaleh_f16(a: f16, b: f16) -> u16 {
1397    vcageh_f16(b, a)
1398}
1399#[doc = "Floating-point absolute compare less than"]
1400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)"]
1401#[inline(always)]
1402#[target_feature(enable = "neon")]
1403#[cfg_attr(test, assert_instr(facgt))]
1404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1405pub fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1406    vcagt_f64(b, a)
1407}
1408#[doc = "Floating-point absolute compare less than"]
1409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"]
1410#[inline(always)]
1411#[target_feature(enable = "neon")]
1412#[cfg_attr(test, assert_instr(facgt))]
1413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1414pub fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1415    vcagtq_f64(b, a)
1416}
1417#[doc = "Floating-point absolute compare less than"]
1418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)"]
1419#[inline(always)]
1420#[target_feature(enable = "neon")]
1421#[cfg_attr(test, assert_instr(facgt))]
1422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1423pub fn vcaltd_f64(a: f64, b: f64) -> u64 {
1424    vcagtd_f64(b, a)
1425}
1426#[doc = "Floating-point absolute compare less than"]
1427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)"]
1428#[inline(always)]
1429#[target_feature(enable = "neon")]
1430#[cfg_attr(test, assert_instr(facgt))]
1431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1432pub fn vcalts_f32(a: f32, b: f32) -> u32 {
1433    vcagts_f32(b, a)
1434}
1435#[doc = "Floating-point absolute compare less than"]
1436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalth_f16)"]
1437#[inline(always)]
1438#[cfg_attr(test, assert_instr(facgt))]
1439#[target_feature(enable = "neon,fp16")]
1440#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1441#[cfg(not(target_arch = "arm64ec"))]
1442pub fn vcalth_f16(a: f16, b: f16) -> u16 {
1443    vcagth_f16(b, a)
1444}
1445#[doc = "Floating-point compare equal"]
1446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)"]
1447#[inline(always)]
1448#[target_feature(enable = "neon")]
1449#[cfg_attr(test, assert_instr(fcmeq))]
1450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1451pub fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1452    unsafe { simd_eq(a, b) }
1453}
1454#[doc = "Floating-point compare equal"]
1455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"]
1456#[inline(always)]
1457#[target_feature(enable = "neon")]
1458#[cfg_attr(test, assert_instr(fcmeq))]
1459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1460pub fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1461    unsafe { simd_eq(a, b) }
1462}
1463#[doc = "Compare bitwise Equal (vector)"]
1464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)"]
1465#[inline(always)]
1466#[target_feature(enable = "neon")]
1467#[cfg_attr(test, assert_instr(cmeq))]
1468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1469pub fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1470    unsafe { simd_eq(a, b) }
1471}
1472#[doc = "Compare bitwise Equal (vector)"]
1473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"]
1474#[inline(always)]
1475#[target_feature(enable = "neon")]
1476#[cfg_attr(test, assert_instr(cmeq))]
1477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1478pub fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1479    unsafe { simd_eq(a, b) }
1480}
1481#[doc = "Compare bitwise Equal (vector)"]
1482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)"]
1483#[inline(always)]
1484#[target_feature(enable = "neon")]
1485#[cfg_attr(test, assert_instr(cmeq))]
1486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1487pub fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1488    unsafe { simd_eq(a, b) }
1489}
1490#[doc = "Compare bitwise Equal (vector)"]
1491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"]
1492#[inline(always)]
1493#[target_feature(enable = "neon")]
1494#[cfg_attr(test, assert_instr(cmeq))]
1495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1496pub fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1497    unsafe { simd_eq(a, b) }
1498}
1499#[doc = "Compare bitwise Equal (vector)"]
1500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)"]
1501#[inline(always)]
1502#[target_feature(enable = "neon")]
1503#[cfg_attr(test, assert_instr(cmeq))]
1504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1505pub fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
1506    unsafe { simd_eq(a, b) }
1507}
1508#[doc = "Compare bitwise Equal (vector)"]
1509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"]
1510#[inline(always)]
1511#[target_feature(enable = "neon")]
1512#[cfg_attr(test, assert_instr(cmeq))]
1513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1514pub fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
1515    unsafe { simd_eq(a, b) }
1516}
1517#[doc = "Floating-point compare equal"]
1518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)"]
1519#[inline(always)]
1520#[target_feature(enable = "neon")]
1521#[cfg_attr(test, assert_instr(fcmp))]
1522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1523pub fn vceqd_f64(a: f64, b: f64) -> u64 {
1524    unsafe { simd_extract!(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1525}
1526#[doc = "Floating-point compare equal"]
1527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)"]
1528#[inline(always)]
1529#[target_feature(enable = "neon")]
1530#[cfg_attr(test, assert_instr(fcmp))]
1531#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1532pub fn vceqs_f32(a: f32, b: f32) -> u32 {
1533    unsafe { simd_extract!(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1534}
1535#[doc = "Compare bitwise equal"]
1536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)"]
1537#[inline(always)]
1538#[target_feature(enable = "neon")]
1539#[cfg_attr(test, assert_instr(cmp))]
1540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1541pub fn vceqd_s64(a: i64, b: i64) -> u64 {
1542    unsafe { transmute(vceq_s64(transmute(a), transmute(b))) }
1543}
1544#[doc = "Compare bitwise equal"]
1545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)"]
1546#[inline(always)]
1547#[target_feature(enable = "neon")]
1548#[cfg_attr(test, assert_instr(cmp))]
1549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1550pub fn vceqd_u64(a: u64, b: u64) -> u64 {
1551    unsafe { transmute(vceq_u64(transmute(a), transmute(b))) }
1552}
1553#[doc = "Floating-point compare equal"]
1554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqh_f16)"]
1555#[inline(always)]
1556#[cfg_attr(test, assert_instr(fcmp))]
1557#[target_feature(enable = "neon,fp16")]
1558#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1559#[cfg(not(target_arch = "arm64ec"))]
1560pub fn vceqh_f16(a: f16, b: f16) -> u16 {
1561    unsafe { simd_extract!(vceq_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1562}
1563#[doc = "Floating-point compare bitwise equal to zero"]
1564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f16)"]
1565#[inline(always)]
1566#[cfg_attr(test, assert_instr(fcmeq))]
1567#[target_feature(enable = "neon,fp16")]
1568#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
1569#[cfg(not(target_arch = "arm64ec"))]
1570pub fn vceqz_f16(a: float16x4_t) -> uint16x4_t {
1571    let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0);
1572    unsafe { simd_eq(a, transmute(b)) }
1573}
1574#[doc = "Floating-point compare bitwise equal to zero"]
1575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f16)"]
1576#[inline(always)]
1577#[cfg_attr(test, assert_instr(fcmeq))]
1578#[target_feature(enable = "neon,fp16")]
1579#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
1580#[cfg(not(target_arch = "arm64ec"))]
1581pub fn vceqzq_f16(a: float16x8_t) -> uint16x8_t {
1582    let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
1583    unsafe { simd_eq(a, transmute(b)) }
1584}
1585#[doc = "Floating-point compare bitwise equal to zero"]
1586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"]
1587#[inline(always)]
1588#[target_feature(enable = "neon")]
1589#[cfg_attr(test, assert_instr(fcmeq))]
1590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1591pub fn vceqz_f32(a: float32x2_t) -> uint32x2_t {
1592    let b: f32x2 = f32x2::new(0.0, 0.0);
1593    unsafe { simd_eq(a, transmute(b)) }
1594}
1595#[doc = "Floating-point compare bitwise equal to zero"]
1596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"]
1597#[inline(always)]
1598#[target_feature(enable = "neon")]
1599#[cfg_attr(test, assert_instr(fcmeq))]
1600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1601pub fn vceqzq_f32(a: float32x4_t) -> uint32x4_t {
1602    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1603    unsafe { simd_eq(a, transmute(b)) }
1604}
1605#[doc = "Floating-point compare bitwise equal to zero"]
1606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)"]
1607#[inline(always)]
1608#[target_feature(enable = "neon")]
1609#[cfg_attr(test, assert_instr(fcmeq))]
1610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1611pub fn vceqz_f64(a: float64x1_t) -> uint64x1_t {
1612    let b: f64 = 0.0;
1613    unsafe { simd_eq(a, transmute(b)) }
1614}
1615#[doc = "Floating-point compare bitwise equal to zero"]
1616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"]
1617#[inline(always)]
1618#[target_feature(enable = "neon")]
1619#[cfg_attr(test, assert_instr(fcmeq))]
1620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1621pub fn vceqzq_f64(a: float64x2_t) -> uint64x2_t {
1622    let b: f64x2 = f64x2::new(0.0, 0.0);
1623    unsafe { simd_eq(a, transmute(b)) }
1624}
1625#[doc = "Signed compare bitwise equal to zero"]
1626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"]
1627#[inline(always)]
1628#[target_feature(enable = "neon")]
1629#[cfg_attr(test, assert_instr(cmeq))]
1630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1631pub fn vceqz_s8(a: int8x8_t) -> uint8x8_t {
1632    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1633    unsafe { simd_eq(a, transmute(b)) }
1634}
1635#[doc = "Signed compare bitwise equal to zero"]
1636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"]
1637#[inline(always)]
1638#[target_feature(enable = "neon")]
1639#[cfg_attr(test, assert_instr(cmeq))]
1640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1641pub fn vceqzq_s8(a: int8x16_t) -> uint8x16_t {
1642    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1643    unsafe { simd_eq(a, transmute(b)) }
1644}
1645#[doc = "Signed compare bitwise equal to zero"]
1646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"]
1647#[inline(always)]
1648#[target_feature(enable = "neon")]
1649#[cfg_attr(test, assert_instr(cmeq))]
1650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1651pub fn vceqz_s16(a: int16x4_t) -> uint16x4_t {
1652    let b: i16x4 = i16x4::new(0, 0, 0, 0);
1653    unsafe { simd_eq(a, transmute(b)) }
1654}
1655#[doc = "Signed compare bitwise equal to zero"]
1656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"]
1657#[inline(always)]
1658#[target_feature(enable = "neon")]
1659#[cfg_attr(test, assert_instr(cmeq))]
1660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1661pub fn vceqzq_s16(a: int16x8_t) -> uint16x8_t {
1662    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1663    unsafe { simd_eq(a, transmute(b)) }
1664}
1665#[doc = "Signed compare bitwise equal to zero"]
1666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"]
1667#[inline(always)]
1668#[target_feature(enable = "neon")]
1669#[cfg_attr(test, assert_instr(cmeq))]
1670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1671pub fn vceqz_s32(a: int32x2_t) -> uint32x2_t {
1672    let b: i32x2 = i32x2::new(0, 0);
1673    unsafe { simd_eq(a, transmute(b)) }
1674}
1675#[doc = "Signed compare bitwise equal to zero"]
1676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"]
1677#[inline(always)]
1678#[target_feature(enable = "neon")]
1679#[cfg_attr(test, assert_instr(cmeq))]
1680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1681pub fn vceqzq_s32(a: int32x4_t) -> uint32x4_t {
1682    let b: i32x4 = i32x4::new(0, 0, 0, 0);
1683    unsafe { simd_eq(a, transmute(b)) }
1684}
1685#[doc = "Signed compare bitwise equal to zero"]
1686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)"]
1687#[inline(always)]
1688#[target_feature(enable = "neon")]
1689#[cfg_attr(test, assert_instr(cmeq))]
1690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1691pub fn vceqz_s64(a: int64x1_t) -> uint64x1_t {
1692    let b: i64x1 = i64x1::new(0);
1693    unsafe { simd_eq(a, transmute(b)) }
1694}
1695#[doc = "Signed compare bitwise equal to zero"]
1696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"]
1697#[inline(always)]
1698#[target_feature(enable = "neon")]
1699#[cfg_attr(test, assert_instr(cmeq))]
1700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1701pub fn vceqzq_s64(a: int64x2_t) -> uint64x2_t {
1702    let b: i64x2 = i64x2::new(0, 0);
1703    unsafe { simd_eq(a, transmute(b)) }
1704}
1705#[doc = "Signed compare bitwise equal to zero"]
1706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"]
1707#[inline(always)]
1708#[target_feature(enable = "neon")]
1709#[cfg_attr(test, assert_instr(cmeq))]
1710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1711pub fn vceqz_p8(a: poly8x8_t) -> uint8x8_t {
1712    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1713    unsafe { simd_eq(a, transmute(b)) }
1714}
1715#[doc = "Signed compare bitwise equal to zero"]
1716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"]
1717#[inline(always)]
1718#[target_feature(enable = "neon")]
1719#[cfg_attr(test, assert_instr(cmeq))]
1720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1721pub fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t {
1722    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1723    unsafe { simd_eq(a, transmute(b)) }
1724}
1725#[doc = "Signed compare bitwise equal to zero"]
1726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)"]
1727#[inline(always)]
1728#[target_feature(enable = "neon")]
1729#[cfg_attr(test, assert_instr(cmeq))]
1730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1731pub fn vceqz_p64(a: poly64x1_t) -> uint64x1_t {
1732    let b: i64x1 = i64x1::new(0);
1733    unsafe { simd_eq(a, transmute(b)) }
1734}
1735#[doc = "Signed compare bitwise equal to zero"]
1736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"]
1737#[inline(always)]
1738#[target_feature(enable = "neon")]
1739#[cfg_attr(test, assert_instr(cmeq))]
1740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1741pub fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t {
1742    let b: i64x2 = i64x2::new(0, 0);
1743    unsafe { simd_eq(a, transmute(b)) }
1744}
1745#[doc = "Unsigned compare bitwise equal to zero"]
1746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"]
1747#[inline(always)]
1748#[target_feature(enable = "neon")]
1749#[cfg_attr(test, assert_instr(cmeq))]
1750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1751pub fn vceqz_u8(a: uint8x8_t) -> uint8x8_t {
1752    let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1753    unsafe { simd_eq(a, transmute(b)) }
1754}
1755#[doc = "Unsigned compare bitwise equal to zero"]
1756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"]
1757#[inline(always)]
1758#[target_feature(enable = "neon")]
1759#[cfg_attr(test, assert_instr(cmeq))]
1760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1761pub fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t {
1762    let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1763    unsafe { simd_eq(a, transmute(b)) }
1764}
1765#[doc = "Unsigned compare bitwise equal to zero"]
1766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"]
1767#[inline(always)]
1768#[target_feature(enable = "neon")]
1769#[cfg_attr(test, assert_instr(cmeq))]
1770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1771pub fn vceqz_u16(a: uint16x4_t) -> uint16x4_t {
1772    let b: u16x4 = u16x4::new(0, 0, 0, 0);
1773    unsafe { simd_eq(a, transmute(b)) }
1774}
1775#[doc = "Unsigned compare bitwise equal to zero"]
1776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"]
1777#[inline(always)]
1778#[target_feature(enable = "neon")]
1779#[cfg_attr(test, assert_instr(cmeq))]
1780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1781pub fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t {
1782    let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1783    unsafe { simd_eq(a, transmute(b)) }
1784}
1785#[doc = "Unsigned compare bitwise equal to zero"]
1786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"]
1787#[inline(always)]
1788#[target_feature(enable = "neon")]
1789#[cfg_attr(test, assert_instr(cmeq))]
1790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1791pub fn vceqz_u32(a: uint32x2_t) -> uint32x2_t {
1792    let b: u32x2 = u32x2::new(0, 0);
1793    unsafe { simd_eq(a, transmute(b)) }
1794}
1795#[doc = "Unsigned compare bitwise equal to zero"]
1796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"]
1797#[inline(always)]
1798#[target_feature(enable = "neon")]
1799#[cfg_attr(test, assert_instr(cmeq))]
1800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1801pub fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t {
1802    let b: u32x4 = u32x4::new(0, 0, 0, 0);
1803    unsafe { simd_eq(a, transmute(b)) }
1804}
1805#[doc = "Unsigned compare bitwise equal to zero"]
1806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"]
1807#[inline(always)]
1808#[target_feature(enable = "neon")]
1809#[cfg_attr(test, assert_instr(cmeq))]
1810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1811pub fn vceqz_u64(a: uint64x1_t) -> uint64x1_t {
1812    let b: u64x1 = u64x1::new(0);
1813    unsafe { simd_eq(a, transmute(b)) }
1814}
1815#[doc = "Unsigned compare bitwise equal to zero"]
1816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"]
1817#[inline(always)]
1818#[target_feature(enable = "neon")]
1819#[cfg_attr(test, assert_instr(cmeq))]
1820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1821pub fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t {
1822    let b: u64x2 = u64x2::new(0, 0);
1823    unsafe { simd_eq(a, transmute(b)) }
1824}
1825#[doc = "Compare bitwise equal to zero"]
1826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)"]
1827#[inline(always)]
1828#[target_feature(enable = "neon")]
1829#[cfg_attr(test, assert_instr(cmp))]
1830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1831pub fn vceqzd_s64(a: i64) -> u64 {
1832    unsafe { transmute(vceqz_s64(transmute(a))) }
1833}
1834#[doc = "Compare bitwise equal to zero"]
1835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)"]
1836#[inline(always)]
1837#[target_feature(enable = "neon")]
1838#[cfg_attr(test, assert_instr(cmp))]
1839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1840pub fn vceqzd_u64(a: u64) -> u64 {
1841    unsafe { transmute(vceqz_u64(transmute(a))) }
1842}
1843#[doc = "Floating-point compare bitwise equal to zero"]
1844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzh_f16)"]
1845#[inline(always)]
1846#[cfg_attr(test, assert_instr(fcmp))]
1847#[target_feature(enable = "neon,fp16")]
1848#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1849#[cfg(not(target_arch = "arm64ec"))]
1850pub fn vceqzh_f16(a: f16) -> u16 {
1851    unsafe { simd_extract!(vceqz_f16(vdup_n_f16(a)), 0) }
1852}
1853#[doc = "Floating-point compare bitwise equal to zero"]
1854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)"]
1855#[inline(always)]
1856#[target_feature(enable = "neon")]
1857#[cfg_attr(test, assert_instr(fcmp))]
1858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1859pub fn vceqzs_f32(a: f32) -> u32 {
1860    unsafe { simd_extract!(vceqz_f32(vdup_n_f32(a)), 0) }
1861}
1862#[doc = "Floating-point compare bitwise equal to zero"]
1863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)"]
1864#[inline(always)]
1865#[target_feature(enable = "neon")]
1866#[cfg_attr(test, assert_instr(fcmp))]
1867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1868pub fn vceqzd_f64(a: f64) -> u64 {
1869    unsafe { simd_extract!(vceqz_f64(vdup_n_f64(a)), 0) }
1870}
1871#[doc = "Floating-point compare greater than or equal"]
1872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)"]
1873#[inline(always)]
1874#[target_feature(enable = "neon")]
1875#[cfg_attr(test, assert_instr(fcmge))]
1876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1877pub fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1878    unsafe { simd_ge(a, b) }
1879}
1880#[doc = "Floating-point compare greater than or equal"]
1881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"]
1882#[inline(always)]
1883#[target_feature(enable = "neon")]
1884#[cfg_attr(test, assert_instr(fcmge))]
1885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1886pub fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1887    unsafe { simd_ge(a, b) }
1888}
1889#[doc = "Compare signed greater than or equal"]
1890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)"]
1891#[inline(always)]
1892#[target_feature(enable = "neon")]
1893#[cfg_attr(test, assert_instr(cmge))]
1894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1895pub fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1896    unsafe { simd_ge(a, b) }
1897}
1898#[doc = "Compare signed greater than or equal"]
1899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"]
1900#[inline(always)]
1901#[target_feature(enable = "neon")]
1902#[cfg_attr(test, assert_instr(cmge))]
1903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1904pub fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1905    unsafe { simd_ge(a, b) }
1906}
1907#[doc = "Compare unsigned greater than or equal"]
1908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)"]
1909#[inline(always)]
1910#[target_feature(enable = "neon")]
1911#[cfg_attr(test, assert_instr(cmhs))]
1912#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1913pub fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1914    unsafe { simd_ge(a, b) }
1915}
1916#[doc = "Compare unsigned greater than or equal"]
1917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"]
1918#[inline(always)]
1919#[target_feature(enable = "neon")]
1920#[cfg_attr(test, assert_instr(cmhs))]
1921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1922pub fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1923    unsafe { simd_ge(a, b) }
1924}
1925#[doc = "Floating-point compare greater than or equal"]
1926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)"]
1927#[inline(always)]
1928#[target_feature(enable = "neon")]
1929#[cfg_attr(test, assert_instr(fcmp))]
1930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1931pub fn vcged_f64(a: f64, b: f64) -> u64 {
1932    unsafe { simd_extract!(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1933}
1934#[doc = "Floating-point compare greater than or equal"]
1935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)"]
1936#[inline(always)]
1937#[target_feature(enable = "neon")]
1938#[cfg_attr(test, assert_instr(fcmp))]
1939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1940pub fn vcges_f32(a: f32, b: f32) -> u32 {
1941    unsafe { simd_extract!(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1942}
1943#[doc = "Compare greater than or equal"]
1944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)"]
1945#[inline(always)]
1946#[target_feature(enable = "neon")]
1947#[cfg_attr(test, assert_instr(cmp))]
1948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1949pub fn vcged_s64(a: i64, b: i64) -> u64 {
1950    unsafe { transmute(vcge_s64(transmute(a), transmute(b))) }
1951}
1952#[doc = "Compare greater than or equal"]
1953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)"]
1954#[inline(always)]
1955#[target_feature(enable = "neon")]
1956#[cfg_attr(test, assert_instr(cmp))]
1957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1958pub fn vcged_u64(a: u64, b: u64) -> u64 {
1959    unsafe { transmute(vcge_u64(transmute(a), transmute(b))) }
1960}
1961#[doc = "Floating-point compare greater than or equal"]
1962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeh_f16)"]
1963#[inline(always)]
1964#[cfg_attr(test, assert_instr(fcmp))]
1965#[target_feature(enable = "neon,fp16")]
1966#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1967#[cfg(not(target_arch = "arm64ec"))]
1968pub fn vcgeh_f16(a: f16, b: f16) -> u16 {
1969    unsafe { simd_extract!(vcge_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1970}
1971#[doc = "Floating-point compare greater than or equal to zero"]
1972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"]
1973#[inline(always)]
1974#[target_feature(enable = "neon")]
1975#[cfg_attr(test, assert_instr(fcmge))]
1976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1977pub fn vcgez_f32(a: float32x2_t) -> uint32x2_t {
1978    let b: f32x2 = f32x2::new(0.0, 0.0);
1979    unsafe { simd_ge(a, transmute(b)) }
1980}
1981#[doc = "Floating-point compare greater than or equal to zero"]
1982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"]
1983#[inline(always)]
1984#[target_feature(enable = "neon")]
1985#[cfg_attr(test, assert_instr(fcmge))]
1986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1987pub fn vcgezq_f32(a: float32x4_t) -> uint32x4_t {
1988    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1989    unsafe { simd_ge(a, transmute(b)) }
1990}
1991#[doc = "Floating-point compare greater than or equal to zero"]
1992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)"]
1993#[inline(always)]
1994#[target_feature(enable = "neon")]
1995#[cfg_attr(test, assert_instr(fcmge))]
1996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1997pub fn vcgez_f64(a: float64x1_t) -> uint64x1_t {
1998    let b: f64 = 0.0;
1999    unsafe { simd_ge(a, transmute(b)) }
2000}
2001#[doc = "Floating-point compare greater than or equal to zero"]
2002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"]
2003#[inline(always)]
2004#[target_feature(enable = "neon")]
2005#[cfg_attr(test, assert_instr(fcmge))]
2006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2007pub fn vcgezq_f64(a: float64x2_t) -> uint64x2_t {
2008    let b: f64x2 = f64x2::new(0.0, 0.0);
2009    unsafe { simd_ge(a, transmute(b)) }
2010}
2011#[doc = "Compare signed greater than or equal to zero"]
2012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"]
2013#[inline(always)]
2014#[target_feature(enable = "neon")]
2015#[cfg_attr(test, assert_instr(cmge))]
2016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2017pub fn vcgez_s8(a: int8x8_t) -> uint8x8_t {
2018    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2019    unsafe { simd_ge(a, transmute(b)) }
2020}
2021#[doc = "Compare signed greater than or equal to zero"]
2022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"]
2023#[inline(always)]
2024#[target_feature(enable = "neon")]
2025#[cfg_attr(test, assert_instr(cmge))]
2026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2027pub fn vcgezq_s8(a: int8x16_t) -> uint8x16_t {
2028    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2029    unsafe { simd_ge(a, transmute(b)) }
2030}
2031#[doc = "Compare signed greater than or equal to zero"]
2032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"]
2033#[inline(always)]
2034#[target_feature(enable = "neon")]
2035#[cfg_attr(test, assert_instr(cmge))]
2036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2037pub fn vcgez_s16(a: int16x4_t) -> uint16x4_t {
2038    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2039    unsafe { simd_ge(a, transmute(b)) }
2040}
2041#[doc = "Compare signed greater than or equal to zero"]
2042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"]
2043#[inline(always)]
2044#[target_feature(enable = "neon")]
2045#[cfg_attr(test, assert_instr(cmge))]
2046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2047pub fn vcgezq_s16(a: int16x8_t) -> uint16x8_t {
2048    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2049    unsafe { simd_ge(a, transmute(b)) }
2050}
2051#[doc = "Compare signed greater than or equal to zero"]
2052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"]
2053#[inline(always)]
2054#[target_feature(enable = "neon")]
2055#[cfg_attr(test, assert_instr(cmge))]
2056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2057pub fn vcgez_s32(a: int32x2_t) -> uint32x2_t {
2058    let b: i32x2 = i32x2::new(0, 0);
2059    unsafe { simd_ge(a, transmute(b)) }
2060}
2061#[doc = "Compare signed greater than or equal to zero"]
2062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"]
2063#[inline(always)]
2064#[target_feature(enable = "neon")]
2065#[cfg_attr(test, assert_instr(cmge))]
2066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2067pub fn vcgezq_s32(a: int32x4_t) -> uint32x4_t {
2068    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2069    unsafe { simd_ge(a, transmute(b)) }
2070}
2071#[doc = "Compare signed greater than or equal to zero"]
2072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)"]
2073#[inline(always)]
2074#[target_feature(enable = "neon")]
2075#[cfg_attr(test, assert_instr(cmge))]
2076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2077pub fn vcgez_s64(a: int64x1_t) -> uint64x1_t {
2078    let b: i64x1 = i64x1::new(0);
2079    unsafe { simd_ge(a, transmute(b)) }
2080}
2081#[doc = "Compare signed greater than or equal to zero"]
2082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"]
2083#[inline(always)]
2084#[target_feature(enable = "neon")]
2085#[cfg_attr(test, assert_instr(cmge))]
2086#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2087pub fn vcgezq_s64(a: int64x2_t) -> uint64x2_t {
2088    let b: i64x2 = i64x2::new(0, 0);
2089    unsafe { simd_ge(a, transmute(b)) }
2090}
2091#[doc = "Floating-point compare greater than or equal to zero"]
2092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)"]
2093#[inline(always)]
2094#[target_feature(enable = "neon")]
2095#[cfg_attr(test, assert_instr(fcmp))]
2096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2097pub fn vcgezd_f64(a: f64) -> u64 {
2098    unsafe { simd_extract!(vcgez_f64(vdup_n_f64(a)), 0) }
2099}
2100#[doc = "Floating-point compare greater than or equal to zero"]
2101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)"]
2102#[inline(always)]
2103#[target_feature(enable = "neon")]
2104#[cfg_attr(test, assert_instr(fcmp))]
2105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2106pub fn vcgezs_f32(a: f32) -> u32 {
2107    unsafe { simd_extract!(vcgez_f32(vdup_n_f32(a)), 0) }
2108}
2109#[doc = "Compare signed greater than or equal to zero"]
2110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)"]
2111#[inline(always)]
2112#[target_feature(enable = "neon")]
2113#[cfg_attr(test, assert_instr(nop))]
2114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2115pub fn vcgezd_s64(a: i64) -> u64 {
2116    unsafe { transmute(vcgez_s64(transmute(a))) }
2117}
2118#[doc = "Floating-point compare greater than or equal to zero"]
2119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezh_f16)"]
2120#[inline(always)]
2121#[cfg_attr(test, assert_instr(fcmp))]
2122#[target_feature(enable = "neon,fp16")]
2123#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2124#[cfg(not(target_arch = "arm64ec"))]
2125pub fn vcgezh_f16(a: f16) -> u16 {
2126    unsafe { simd_extract!(vcgez_f16(vdup_n_f16(a)), 0) }
2127}
2128#[doc = "Floating-point compare greater than"]
2129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)"]
2130#[inline(always)]
2131#[target_feature(enable = "neon")]
2132#[cfg_attr(test, assert_instr(fcmgt))]
2133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2134pub fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2135    unsafe { simd_gt(a, b) }
2136}
2137#[doc = "Floating-point compare greater than"]
2138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"]
2139#[inline(always)]
2140#[target_feature(enable = "neon")]
2141#[cfg_attr(test, assert_instr(fcmgt))]
2142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2143pub fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2144    unsafe { simd_gt(a, b) }
2145}
2146#[doc = "Compare signed greater than"]
2147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)"]
2148#[inline(always)]
2149#[target_feature(enable = "neon")]
2150#[cfg_attr(test, assert_instr(cmgt))]
2151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2152pub fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2153    unsafe { simd_gt(a, b) }
2154}
2155#[doc = "Compare signed greater than"]
2156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"]
2157#[inline(always)]
2158#[target_feature(enable = "neon")]
2159#[cfg_attr(test, assert_instr(cmgt))]
2160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2161pub fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2162    unsafe { simd_gt(a, b) }
2163}
2164#[doc = "Compare unsigned greater than"]
2165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)"]
2166#[inline(always)]
2167#[target_feature(enable = "neon")]
2168#[cfg_attr(test, assert_instr(cmhi))]
2169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2170pub fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2171    unsafe { simd_gt(a, b) }
2172}
2173#[doc = "Compare unsigned greater than"]
2174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"]
2175#[inline(always)]
2176#[target_feature(enable = "neon")]
2177#[cfg_attr(test, assert_instr(cmhi))]
2178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2179pub fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2180    unsafe { simd_gt(a, b) }
2181}
2182#[doc = "Floating-point compare greater than"]
2183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)"]
2184#[inline(always)]
2185#[target_feature(enable = "neon")]
2186#[cfg_attr(test, assert_instr(fcmp))]
2187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2188pub fn vcgtd_f64(a: f64, b: f64) -> u64 {
2189    unsafe { simd_extract!(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2190}
2191#[doc = "Floating-point compare greater than"]
2192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)"]
2193#[inline(always)]
2194#[target_feature(enable = "neon")]
2195#[cfg_attr(test, assert_instr(fcmp))]
2196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2197pub fn vcgts_f32(a: f32, b: f32) -> u32 {
2198    unsafe { simd_extract!(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2199}
2200#[doc = "Compare greater than"]
2201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)"]
2202#[inline(always)]
2203#[target_feature(enable = "neon")]
2204#[cfg_attr(test, assert_instr(cmp))]
2205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2206pub fn vcgtd_s64(a: i64, b: i64) -> u64 {
2207    unsafe { transmute(vcgt_s64(transmute(a), transmute(b))) }
2208}
2209#[doc = "Compare greater than"]
2210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)"]
2211#[inline(always)]
2212#[target_feature(enable = "neon")]
2213#[cfg_attr(test, assert_instr(cmp))]
2214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2215pub fn vcgtd_u64(a: u64, b: u64) -> u64 {
2216    unsafe { transmute(vcgt_u64(transmute(a), transmute(b))) }
2217}
2218#[doc = "Floating-point compare greater than"]
2219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgth_f16)"]
2220#[inline(always)]
2221#[cfg_attr(test, assert_instr(fcmp))]
2222#[target_feature(enable = "neon,fp16")]
2223#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2224#[cfg(not(target_arch = "arm64ec"))]
2225pub fn vcgth_f16(a: f16, b: f16) -> u16 {
2226    unsafe { simd_extract!(vcgt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2227}
2228#[doc = "Floating-point compare greater than zero"]
2229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"]
2230#[inline(always)]
2231#[target_feature(enable = "neon")]
2232#[cfg_attr(test, assert_instr(fcmgt))]
2233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2234pub fn vcgtz_f32(a: float32x2_t) -> uint32x2_t {
2235    let b: f32x2 = f32x2::new(0.0, 0.0);
2236    unsafe { simd_gt(a, transmute(b)) }
2237}
2238#[doc = "Floating-point compare greater than zero"]
2239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"]
2240#[inline(always)]
2241#[target_feature(enable = "neon")]
2242#[cfg_attr(test, assert_instr(fcmgt))]
2243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2244pub fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t {
2245    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2246    unsafe { simd_gt(a, transmute(b)) }
2247}
2248#[doc = "Floating-point compare greater than zero"]
2249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)"]
2250#[inline(always)]
2251#[target_feature(enable = "neon")]
2252#[cfg_attr(test, assert_instr(fcmgt))]
2253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2254pub fn vcgtz_f64(a: float64x1_t) -> uint64x1_t {
2255    let b: f64 = 0.0;
2256    unsafe { simd_gt(a, transmute(b)) }
2257}
2258#[doc = "Floating-point compare greater than zero"]
2259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"]
2260#[inline(always)]
2261#[target_feature(enable = "neon")]
2262#[cfg_attr(test, assert_instr(fcmgt))]
2263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2264pub fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t {
2265    let b: f64x2 = f64x2::new(0.0, 0.0);
2266    unsafe { simd_gt(a, transmute(b)) }
2267}
2268#[doc = "Compare signed greater than zero"]
2269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"]
2270#[inline(always)]
2271#[target_feature(enable = "neon")]
2272#[cfg_attr(test, assert_instr(cmgt))]
2273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2274pub fn vcgtz_s8(a: int8x8_t) -> uint8x8_t {
2275    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2276    unsafe { simd_gt(a, transmute(b)) }
2277}
2278#[doc = "Compare signed greater than zero"]
2279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"]
2280#[inline(always)]
2281#[target_feature(enable = "neon")]
2282#[cfg_attr(test, assert_instr(cmgt))]
2283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2284pub fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t {
2285    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2286    unsafe { simd_gt(a, transmute(b)) }
2287}
2288#[doc = "Compare signed greater than zero"]
2289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"]
2290#[inline(always)]
2291#[target_feature(enable = "neon")]
2292#[cfg_attr(test, assert_instr(cmgt))]
2293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2294pub fn vcgtz_s16(a: int16x4_t) -> uint16x4_t {
2295    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2296    unsafe { simd_gt(a, transmute(b)) }
2297}
2298#[doc = "Compare signed greater than zero"]
2299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"]
2300#[inline(always)]
2301#[target_feature(enable = "neon")]
2302#[cfg_attr(test, assert_instr(cmgt))]
2303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2304pub fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t {
2305    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2306    unsafe { simd_gt(a, transmute(b)) }
2307}
2308#[doc = "Compare signed greater than zero"]
2309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"]
2310#[inline(always)]
2311#[target_feature(enable = "neon")]
2312#[cfg_attr(test, assert_instr(cmgt))]
2313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2314pub fn vcgtz_s32(a: int32x2_t) -> uint32x2_t {
2315    let b: i32x2 = i32x2::new(0, 0);
2316    unsafe { simd_gt(a, transmute(b)) }
2317}
2318#[doc = "Compare signed greater than zero"]
2319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"]
2320#[inline(always)]
2321#[target_feature(enable = "neon")]
2322#[cfg_attr(test, assert_instr(cmgt))]
2323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2324pub fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t {
2325    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2326    unsafe { simd_gt(a, transmute(b)) }
2327}
2328#[doc = "Compare signed greater than zero"]
2329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)"]
2330#[inline(always)]
2331#[target_feature(enable = "neon")]
2332#[cfg_attr(test, assert_instr(cmgt))]
2333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2334pub fn vcgtz_s64(a: int64x1_t) -> uint64x1_t {
2335    let b: i64x1 = i64x1::new(0);
2336    unsafe { simd_gt(a, transmute(b)) }
2337}
2338#[doc = "Compare signed greater than zero"]
2339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"]
2340#[inline(always)]
2341#[target_feature(enable = "neon")]
2342#[cfg_attr(test, assert_instr(cmgt))]
2343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2344pub fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t {
2345    let b: i64x2 = i64x2::new(0, 0);
2346    unsafe { simd_gt(a, transmute(b)) }
2347}
2348#[doc = "Floating-point compare greater than zero"]
2349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)"]
2350#[inline(always)]
2351#[target_feature(enable = "neon")]
2352#[cfg_attr(test, assert_instr(fcmp))]
2353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2354pub fn vcgtzd_f64(a: f64) -> u64 {
2355    unsafe { simd_extract!(vcgtz_f64(vdup_n_f64(a)), 0) }
2356}
2357#[doc = "Floating-point compare greater than zero"]
2358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)"]
2359#[inline(always)]
2360#[target_feature(enable = "neon")]
2361#[cfg_attr(test, assert_instr(fcmp))]
2362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2363pub fn vcgtzs_f32(a: f32) -> u32 {
2364    unsafe { simd_extract!(vcgtz_f32(vdup_n_f32(a)), 0) }
2365}
2366#[doc = "Compare signed greater than zero"]
2367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)"]
2368#[inline(always)]
2369#[target_feature(enable = "neon")]
2370#[cfg_attr(test, assert_instr(cmp))]
2371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2372pub fn vcgtzd_s64(a: i64) -> u64 {
2373    unsafe { transmute(vcgtz_s64(transmute(a))) }
2374}
2375#[doc = "Floating-point compare greater than zero"]
2376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzh_f16)"]
2377#[inline(always)]
2378#[cfg_attr(test, assert_instr(fcmp))]
2379#[target_feature(enable = "neon,fp16")]
2380#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2381#[cfg(not(target_arch = "arm64ec"))]
2382pub fn vcgtzh_f16(a: f16) -> u16 {
2383    unsafe { simd_extract!(vcgtz_f16(vdup_n_f16(a)), 0) }
2384}
2385#[doc = "Floating-point compare less than or equal"]
2386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)"]
2387#[inline(always)]
2388#[target_feature(enable = "neon")]
2389#[cfg_attr(test, assert_instr(fcmge))]
2390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2391pub fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2392    unsafe { simd_le(a, b) }
2393}
2394#[doc = "Floating-point compare less than or equal"]
2395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"]
2396#[inline(always)]
2397#[target_feature(enable = "neon")]
2398#[cfg_attr(test, assert_instr(fcmge))]
2399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2400pub fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2401    unsafe { simd_le(a, b) }
2402}
2403#[doc = "Compare signed less than or equal"]
2404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)"]
2405#[inline(always)]
2406#[target_feature(enable = "neon")]
2407#[cfg_attr(test, assert_instr(cmge))]
2408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2409pub fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2410    unsafe { simd_le(a, b) }
2411}
2412#[doc = "Compare signed less than or equal"]
2413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"]
2414#[inline(always)]
2415#[target_feature(enable = "neon")]
2416#[cfg_attr(test, assert_instr(cmge))]
2417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2418pub fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2419    unsafe { simd_le(a, b) }
2420}
2421#[doc = "Compare unsigned less than or equal"]
2422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)"]
2423#[inline(always)]
2424#[target_feature(enable = "neon")]
2425#[cfg_attr(test, assert_instr(cmhs))]
2426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2427pub fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2428    unsafe { simd_le(a, b) }
2429}
2430#[doc = "Compare unsigned less than or equal"]
2431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"]
2432#[inline(always)]
2433#[target_feature(enable = "neon")]
2434#[cfg_attr(test, assert_instr(cmhs))]
2435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2436pub fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2437    unsafe { simd_le(a, b) }
2438}
2439#[doc = "Floating-point compare less than or equal"]
2440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)"]
2441#[inline(always)]
2442#[target_feature(enable = "neon")]
2443#[cfg_attr(test, assert_instr(fcmp))]
2444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2445pub fn vcled_f64(a: f64, b: f64) -> u64 {
2446    unsafe { simd_extract!(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2447}
2448#[doc = "Floating-point compare less than or equal"]
2449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)"]
2450#[inline(always)]
2451#[target_feature(enable = "neon")]
2452#[cfg_attr(test, assert_instr(fcmp))]
2453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2454pub fn vcles_f32(a: f32, b: f32) -> u32 {
2455    unsafe { simd_extract!(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2456}
2457#[doc = "Compare less than or equal"]
2458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)"]
2459#[inline(always)]
2460#[target_feature(enable = "neon")]
2461#[cfg_attr(test, assert_instr(cmp))]
2462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2463pub fn vcled_u64(a: u64, b: u64) -> u64 {
2464    unsafe { transmute(vcle_u64(transmute(a), transmute(b))) }
2465}
2466#[doc = "Compare less than or equal"]
2467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)"]
2468#[inline(always)]
2469#[target_feature(enable = "neon")]
2470#[cfg_attr(test, assert_instr(cmp))]
2471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2472pub fn vcled_s64(a: i64, b: i64) -> u64 {
2473    unsafe { transmute(vcle_s64(transmute(a), transmute(b))) }
2474}
2475#[doc = "Floating-point compare less than or equal"]
2476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleh_f16)"]
2477#[inline(always)]
2478#[cfg_attr(test, assert_instr(fcmp))]
2479#[target_feature(enable = "neon,fp16")]
2480#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2481#[cfg(not(target_arch = "arm64ec"))]
2482pub fn vcleh_f16(a: f16, b: f16) -> u16 {
2483    unsafe { simd_extract!(vcle_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2484}
2485#[doc = "Floating-point compare less than or equal to zero"]
2486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"]
2487#[inline(always)]
2488#[target_feature(enable = "neon")]
2489#[cfg_attr(test, assert_instr(fcmle))]
2490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2491pub fn vclez_f32(a: float32x2_t) -> uint32x2_t {
2492    let b: f32x2 = f32x2::new(0.0, 0.0);
2493    unsafe { simd_le(a, transmute(b)) }
2494}
2495#[doc = "Floating-point compare less than or equal to zero"]
2496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"]
2497#[inline(always)]
2498#[target_feature(enable = "neon")]
2499#[cfg_attr(test, assert_instr(fcmle))]
2500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2501pub fn vclezq_f32(a: float32x4_t) -> uint32x4_t {
2502    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2503    unsafe { simd_le(a, transmute(b)) }
2504}
2505#[doc = "Floating-point compare less than or equal to zero"]
2506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)"]
2507#[inline(always)]
2508#[target_feature(enable = "neon")]
2509#[cfg_attr(test, assert_instr(fcmle))]
2510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2511pub fn vclez_f64(a: float64x1_t) -> uint64x1_t {
2512    let b: f64 = 0.0;
2513    unsafe { simd_le(a, transmute(b)) }
2514}
2515#[doc = "Floating-point compare less than or equal to zero"]
2516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"]
2517#[inline(always)]
2518#[target_feature(enable = "neon")]
2519#[cfg_attr(test, assert_instr(fcmle))]
2520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2521pub fn vclezq_f64(a: float64x2_t) -> uint64x2_t {
2522    let b: f64x2 = f64x2::new(0.0, 0.0);
2523    unsafe { simd_le(a, transmute(b)) }
2524}
2525#[doc = "Compare signed less than or equal to zero"]
2526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"]
2527#[inline(always)]
2528#[target_feature(enable = "neon")]
2529#[cfg_attr(test, assert_instr(cmle))]
2530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2531pub fn vclez_s8(a: int8x8_t) -> uint8x8_t {
2532    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2533    unsafe { simd_le(a, transmute(b)) }
2534}
2535#[doc = "Compare signed less than or equal to zero"]
2536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"]
2537#[inline(always)]
2538#[target_feature(enable = "neon")]
2539#[cfg_attr(test, assert_instr(cmle))]
2540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2541pub fn vclezq_s8(a: int8x16_t) -> uint8x16_t {
2542    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2543    unsafe { simd_le(a, transmute(b)) }
2544}
2545#[doc = "Compare signed less than or equal to zero"]
2546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"]
2547#[inline(always)]
2548#[target_feature(enable = "neon")]
2549#[cfg_attr(test, assert_instr(cmle))]
2550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2551pub fn vclez_s16(a: int16x4_t) -> uint16x4_t {
2552    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2553    unsafe { simd_le(a, transmute(b)) }
2554}
2555#[doc = "Compare signed less than or equal to zero"]
2556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"]
2557#[inline(always)]
2558#[target_feature(enable = "neon")]
2559#[cfg_attr(test, assert_instr(cmle))]
2560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2561pub fn vclezq_s16(a: int16x8_t) -> uint16x8_t {
2562    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2563    unsafe { simd_le(a, transmute(b)) }
2564}
2565#[doc = "Compare signed less than or equal to zero"]
2566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"]
2567#[inline(always)]
2568#[target_feature(enable = "neon")]
2569#[cfg_attr(test, assert_instr(cmle))]
2570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2571pub fn vclez_s32(a: int32x2_t) -> uint32x2_t {
2572    let b: i32x2 = i32x2::new(0, 0);
2573    unsafe { simd_le(a, transmute(b)) }
2574}
2575#[doc = "Compare signed less than or equal to zero"]
2576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"]
2577#[inline(always)]
2578#[target_feature(enable = "neon")]
2579#[cfg_attr(test, assert_instr(cmle))]
2580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2581pub fn vclezq_s32(a: int32x4_t) -> uint32x4_t {
2582    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2583    unsafe { simd_le(a, transmute(b)) }
2584}
2585#[doc = "Compare signed less than or equal to zero"]
2586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)"]
2587#[inline(always)]
2588#[target_feature(enable = "neon")]
2589#[cfg_attr(test, assert_instr(cmle))]
2590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2591pub fn vclez_s64(a: int64x1_t) -> uint64x1_t {
2592    let b: i64x1 = i64x1::new(0);
2593    unsafe { simd_le(a, transmute(b)) }
2594}
2595#[doc = "Compare signed less than or equal to zero"]
2596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"]
2597#[inline(always)]
2598#[target_feature(enable = "neon")]
2599#[cfg_attr(test, assert_instr(cmle))]
2600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2601pub fn vclezq_s64(a: int64x2_t) -> uint64x2_t {
2602    let b: i64x2 = i64x2::new(0, 0);
2603    unsafe { simd_le(a, transmute(b)) }
2604}
2605#[doc = "Floating-point compare less than or equal to zero"]
2606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)"]
2607#[inline(always)]
2608#[target_feature(enable = "neon")]
2609#[cfg_attr(test, assert_instr(fcmp))]
2610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2611pub fn vclezd_f64(a: f64) -> u64 {
2612    unsafe { simd_extract!(vclez_f64(vdup_n_f64(a)), 0) }
2613}
2614#[doc = "Floating-point compare less than or equal to zero"]
2615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)"]
2616#[inline(always)]
2617#[target_feature(enable = "neon")]
2618#[cfg_attr(test, assert_instr(fcmp))]
2619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2620pub fn vclezs_f32(a: f32) -> u32 {
2621    unsafe { simd_extract!(vclez_f32(vdup_n_f32(a)), 0) }
2622}
2623#[doc = "Compare less than or equal to zero"]
2624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)"]
2625#[inline(always)]
2626#[target_feature(enable = "neon")]
2627#[cfg_attr(test, assert_instr(cmp))]
2628#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2629pub fn vclezd_s64(a: i64) -> u64 {
2630    unsafe { transmute(vclez_s64(transmute(a))) }
2631}
2632#[doc = "Floating-point compare less than or equal to zero"]
2633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezh_f16)"]
2634#[inline(always)]
2635#[cfg_attr(test, assert_instr(fcmp))]
2636#[target_feature(enable = "neon,fp16")]
2637#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2638#[cfg(not(target_arch = "arm64ec"))]
2639pub fn vclezh_f16(a: f16) -> u16 {
2640    unsafe { simd_extract!(vclez_f16(vdup_n_f16(a)), 0) }
2641}
2642#[doc = "Floating-point compare less than"]
2643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)"]
2644#[inline(always)]
2645#[target_feature(enable = "neon")]
2646#[cfg_attr(test, assert_instr(fcmgt))]
2647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2648pub fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2649    unsafe { simd_lt(a, b) }
2650}
2651#[doc = "Floating-point compare less than"]
2652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"]
2653#[inline(always)]
2654#[target_feature(enable = "neon")]
2655#[cfg_attr(test, assert_instr(fcmgt))]
2656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2657pub fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2658    unsafe { simd_lt(a, b) }
2659}
2660#[doc = "Compare signed less than"]
2661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)"]
2662#[inline(always)]
2663#[target_feature(enable = "neon")]
2664#[cfg_attr(test, assert_instr(cmgt))]
2665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2666pub fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2667    unsafe { simd_lt(a, b) }
2668}
2669#[doc = "Compare signed less than"]
2670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"]
2671#[inline(always)]
2672#[target_feature(enable = "neon")]
2673#[cfg_attr(test, assert_instr(cmgt))]
2674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2675pub fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2676    unsafe { simd_lt(a, b) }
2677}
2678#[doc = "Compare unsigned less than"]
2679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)"]
2680#[inline(always)]
2681#[target_feature(enable = "neon")]
2682#[cfg_attr(test, assert_instr(cmhi))]
2683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2684pub fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2685    unsafe { simd_lt(a, b) }
2686}
2687#[doc = "Compare unsigned less than"]
2688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"]
2689#[inline(always)]
2690#[target_feature(enable = "neon")]
2691#[cfg_attr(test, assert_instr(cmhi))]
2692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2693pub fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2694    unsafe { simd_lt(a, b) }
2695}
2696#[doc = "Compare less than"]
2697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)"]
2698#[inline(always)]
2699#[target_feature(enable = "neon")]
2700#[cfg_attr(test, assert_instr(cmp))]
2701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2702pub fn vcltd_u64(a: u64, b: u64) -> u64 {
2703    unsafe { transmute(vclt_u64(transmute(a), transmute(b))) }
2704}
2705#[doc = "Compare less than"]
2706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)"]
2707#[inline(always)]
2708#[target_feature(enable = "neon")]
2709#[cfg_attr(test, assert_instr(cmp))]
2710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2711pub fn vcltd_s64(a: i64, b: i64) -> u64 {
2712    unsafe { transmute(vclt_s64(transmute(a), transmute(b))) }
2713}
2714#[doc = "Floating-point compare less than"]
2715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclth_f16)"]
2716#[inline(always)]
2717#[cfg_attr(test, assert_instr(fcmp))]
2718#[target_feature(enable = "neon,fp16")]
2719#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2720#[cfg(not(target_arch = "arm64ec"))]
2721pub fn vclth_f16(a: f16, b: f16) -> u16 {
2722    unsafe { simd_extract!(vclt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2723}
2724#[doc = "Floating-point compare less than"]
2725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)"]
2726#[inline(always)]
2727#[target_feature(enable = "neon")]
2728#[cfg_attr(test, assert_instr(fcmp))]
2729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2730pub fn vclts_f32(a: f32, b: f32) -> u32 {
2731    unsafe { simd_extract!(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2732}
2733#[doc = "Floating-point compare less than"]
2734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)"]
2735#[inline(always)]
2736#[target_feature(enable = "neon")]
2737#[cfg_attr(test, assert_instr(fcmp))]
2738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2739pub fn vcltd_f64(a: f64, b: f64) -> u64 {
2740    unsafe { simd_extract!(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2741}
2742#[doc = "Floating-point compare less than zero"]
2743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"]
2744#[inline(always)]
2745#[target_feature(enable = "neon")]
2746#[cfg_attr(test, assert_instr(fcmlt))]
2747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2748pub fn vcltz_f32(a: float32x2_t) -> uint32x2_t {
2749    let b: f32x2 = f32x2::new(0.0, 0.0);
2750    unsafe { simd_lt(a, transmute(b)) }
2751}
2752#[doc = "Floating-point compare less than zero"]
2753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"]
2754#[inline(always)]
2755#[target_feature(enable = "neon")]
2756#[cfg_attr(test, assert_instr(fcmlt))]
2757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2758pub fn vcltzq_f32(a: float32x4_t) -> uint32x4_t {
2759    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2760    unsafe { simd_lt(a, transmute(b)) }
2761}
2762#[doc = "Floating-point compare less than zero"]
2763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)"]
2764#[inline(always)]
2765#[target_feature(enable = "neon")]
2766#[cfg_attr(test, assert_instr(fcmlt))]
2767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2768pub fn vcltz_f64(a: float64x1_t) -> uint64x1_t {
2769    let b: f64 = 0.0;
2770    unsafe { simd_lt(a, transmute(b)) }
2771}
2772#[doc = "Floating-point compare less than zero"]
2773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"]
2774#[inline(always)]
2775#[target_feature(enable = "neon")]
2776#[cfg_attr(test, assert_instr(fcmlt))]
2777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2778pub fn vcltzq_f64(a: float64x2_t) -> uint64x2_t {
2779    let b: f64x2 = f64x2::new(0.0, 0.0);
2780    unsafe { simd_lt(a, transmute(b)) }
2781}
2782#[doc = "Compare signed less than zero"]
2783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"]
2784#[inline(always)]
2785#[target_feature(enable = "neon")]
2786#[cfg_attr(test, assert_instr(cmlt))]
2787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2788pub fn vcltz_s8(a: int8x8_t) -> uint8x8_t {
2789    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2790    unsafe { simd_lt(a, transmute(b)) }
2791}
2792#[doc = "Compare signed less than zero"]
2793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"]
2794#[inline(always)]
2795#[target_feature(enable = "neon")]
2796#[cfg_attr(test, assert_instr(cmlt))]
2797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2798pub fn vcltzq_s8(a: int8x16_t) -> uint8x16_t {
2799    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2800    unsafe { simd_lt(a, transmute(b)) }
2801}
2802#[doc = "Compare signed less than zero"]
2803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"]
2804#[inline(always)]
2805#[target_feature(enable = "neon")]
2806#[cfg_attr(test, assert_instr(cmlt))]
2807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2808pub fn vcltz_s16(a: int16x4_t) -> uint16x4_t {
2809    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2810    unsafe { simd_lt(a, transmute(b)) }
2811}
2812#[doc = "Compare signed less than zero"]
2813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"]
2814#[inline(always)]
2815#[target_feature(enable = "neon")]
2816#[cfg_attr(test, assert_instr(cmlt))]
2817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2818pub fn vcltzq_s16(a: int16x8_t) -> uint16x8_t {
2819    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2820    unsafe { simd_lt(a, transmute(b)) }
2821}
2822#[doc = "Compare signed less than zero"]
2823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"]
2824#[inline(always)]
2825#[target_feature(enable = "neon")]
2826#[cfg_attr(test, assert_instr(cmlt))]
2827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2828pub fn vcltz_s32(a: int32x2_t) -> uint32x2_t {
2829    let b: i32x2 = i32x2::new(0, 0);
2830    unsafe { simd_lt(a, transmute(b)) }
2831}
2832#[doc = "Compare signed less than zero"]
2833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"]
2834#[inline(always)]
2835#[target_feature(enable = "neon")]
2836#[cfg_attr(test, assert_instr(cmlt))]
2837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2838pub fn vcltzq_s32(a: int32x4_t) -> uint32x4_t {
2839    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2840    unsafe { simd_lt(a, transmute(b)) }
2841}
2842#[doc = "Compare signed less than zero"]
2843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)"]
2844#[inline(always)]
2845#[target_feature(enable = "neon")]
2846#[cfg_attr(test, assert_instr(cmlt))]
2847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2848pub fn vcltz_s64(a: int64x1_t) -> uint64x1_t {
2849    let b: i64x1 = i64x1::new(0);
2850    unsafe { simd_lt(a, transmute(b)) }
2851}
2852#[doc = "Compare signed less than zero"]
2853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"]
2854#[inline(always)]
2855#[target_feature(enable = "neon")]
2856#[cfg_attr(test, assert_instr(cmlt))]
2857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2858pub fn vcltzq_s64(a: int64x2_t) -> uint64x2_t {
2859    let b: i64x2 = i64x2::new(0, 0);
2860    unsafe { simd_lt(a, transmute(b)) }
2861}
2862#[doc = "Floating-point compare less than zero"]
2863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)"]
2864#[inline(always)]
2865#[target_feature(enable = "neon")]
2866#[cfg_attr(test, assert_instr(fcmp))]
2867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2868pub fn vcltzd_f64(a: f64) -> u64 {
2869    unsafe { simd_extract!(vcltz_f64(vdup_n_f64(a)), 0) }
2870}
2871#[doc = "Floating-point compare less than zero"]
2872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)"]
2873#[inline(always)]
2874#[target_feature(enable = "neon")]
2875#[cfg_attr(test, assert_instr(fcmp))]
2876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2877pub fn vcltzs_f32(a: f32) -> u32 {
2878    unsafe { simd_extract!(vcltz_f32(vdup_n_f32(a)), 0) }
2879}
2880#[doc = "Compare less than zero"]
2881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)"]
2882#[inline(always)]
2883#[target_feature(enable = "neon")]
2884#[cfg_attr(test, assert_instr(asr))]
2885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2886pub fn vcltzd_s64(a: i64) -> u64 {
2887    unsafe { transmute(vcltz_s64(transmute(a))) }
2888}
2889#[doc = "Floating-point compare less than zero"]
2890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzh_f16)"]
2891#[inline(always)]
2892#[cfg_attr(test, assert_instr(fcmp))]
2893#[target_feature(enable = "neon,fp16")]
2894#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2895#[cfg(not(target_arch = "arm64ec"))]
2896pub fn vcltzh_f16(a: f16) -> u16 {
2897    unsafe { simd_extract!(vcltz_f16(vdup_n_f16(a)), 0) }
2898}
2899#[doc = "Floating-point complex multiply accumulate"]
2900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f16)"]
2901#[inline(always)]
2902#[target_feature(enable = "neon,fcma")]
2903#[target_feature(enable = "neon,fp16")]
2904#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2905#[cfg(not(target_arch = "arm64ec"))]
2906#[cfg_attr(test, assert_instr(fcmla))]
2907pub fn vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
2908    unsafe extern "unadjusted" {
2909        #[cfg_attr(
2910            any(target_arch = "aarch64", target_arch = "arm64ec"),
2911            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f16"
2912        )]
2913        fn _vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
2914    }
2915    unsafe { _vcmla_f16(a, b, c) }
2916}
2917#[doc = "Floating-point complex multiply accumulate"]
2918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f16)"]
2919#[inline(always)]
2920#[target_feature(enable = "neon,fcma")]
2921#[target_feature(enable = "neon,fp16")]
2922#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2923#[cfg(not(target_arch = "arm64ec"))]
2924#[cfg_attr(test, assert_instr(fcmla))]
2925pub fn vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
2926    unsafe extern "unadjusted" {
2927        #[cfg_attr(
2928            any(target_arch = "aarch64", target_arch = "arm64ec"),
2929            link_name = "llvm.aarch64.neon.vcmla.rot0.v8f16"
2930        )]
2931        fn _vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
2932    }
2933    unsafe { _vcmlaq_f16(a, b, c) }
2934}
2935#[doc = "Floating-point complex multiply accumulate"]
2936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"]
2937#[inline(always)]
2938#[target_feature(enable = "neon,fcma")]
2939#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2940#[cfg_attr(test, assert_instr(fcmla))]
2941pub fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
2942    unsafe extern "unadjusted" {
2943        #[cfg_attr(
2944            any(target_arch = "aarch64", target_arch = "arm64ec"),
2945            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32"
2946        )]
2947        fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
2948    }
2949    unsafe { _vcmla_f32(a, b, c) }
2950}
2951#[doc = "Floating-point complex multiply accumulate"]
2952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"]
2953#[inline(always)]
2954#[target_feature(enable = "neon,fcma")]
2955#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2956#[cfg_attr(test, assert_instr(fcmla))]
2957pub fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
2958    unsafe extern "unadjusted" {
2959        #[cfg_attr(
2960            any(target_arch = "aarch64", target_arch = "arm64ec"),
2961            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32"
2962        )]
2963        fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
2964    }
2965    unsafe { _vcmlaq_f32(a, b, c) }
2966}
2967#[doc = "Floating-point complex multiply accumulate"]
2968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"]
2969#[inline(always)]
2970#[target_feature(enable = "neon,fcma")]
2971#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2972#[cfg_attr(test, assert_instr(fcmla))]
2973pub fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
2974    unsafe extern "unadjusted" {
2975        #[cfg_attr(
2976            any(target_arch = "aarch64", target_arch = "arm64ec"),
2977            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64"
2978        )]
2979        fn _vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
2980    }
2981    unsafe { _vcmlaq_f64(a, b, c) }
2982}
2983#[doc = "Floating-point complex multiply accumulate"]
2984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f16)"]
2985#[inline(always)]
2986#[target_feature(enable = "neon,fcma")]
2987#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2988#[rustc_legacy_const_generics(3)]
2989#[target_feature(enable = "neon,fp16")]
2990#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2991#[cfg(not(target_arch = "arm64ec"))]
2992pub fn vcmla_lane_f16<const LANE: i32>(
2993    a: float16x4_t,
2994    b: float16x4_t,
2995    c: float16x4_t,
2996) -> float16x4_t {
2997    static_assert_uimm_bits!(LANE, 1);
2998    unsafe {
2999        let c: float16x4_t = simd_shuffle!(
3000            c,
3001            c,
3002            [
3003                2 * LANE as u32,
3004                2 * LANE as u32 + 1,
3005                2 * LANE as u32,
3006                2 * LANE as u32 + 1
3007            ]
3008        );
3009        vcmla_f16(a, b, c)
3010    }
3011}
3012#[doc = "Floating-point complex multiply accumulate"]
3013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f16)"]
3014#[inline(always)]
3015#[target_feature(enable = "neon,fcma")]
3016#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3017#[rustc_legacy_const_generics(3)]
3018#[target_feature(enable = "neon,fp16")]
3019#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3020#[cfg(not(target_arch = "arm64ec"))]
3021pub fn vcmlaq_lane_f16<const LANE: i32>(
3022    a: float16x8_t,
3023    b: float16x8_t,
3024    c: float16x4_t,
3025) -> float16x8_t {
3026    static_assert_uimm_bits!(LANE, 1);
3027    unsafe {
3028        let c: float16x8_t = simd_shuffle!(
3029            c,
3030            c,
3031            [
3032                2 * LANE as u32,
3033                2 * LANE as u32 + 1,
3034                2 * LANE as u32,
3035                2 * LANE as u32 + 1,
3036                2 * LANE as u32,
3037                2 * LANE as u32 + 1,
3038                2 * LANE as u32,
3039                2 * LANE as u32 + 1
3040            ]
3041        );
3042        vcmlaq_f16(a, b, c)
3043    }
3044}
3045#[doc = "Floating-point complex multiply accumulate"]
3046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"]
3047#[inline(always)]
3048#[target_feature(enable = "neon,fcma")]
3049#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3050#[rustc_legacy_const_generics(3)]
3051#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3052pub fn vcmla_lane_f32<const LANE: i32>(
3053    a: float32x2_t,
3054    b: float32x2_t,
3055    c: float32x2_t,
3056) -> float32x2_t {
3057    static_assert!(LANE == 0);
3058    unsafe {
3059        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3060        vcmla_f32(a, b, c)
3061    }
3062}
3063#[doc = "Floating-point complex multiply accumulate"]
3064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"]
3065#[inline(always)]
3066#[target_feature(enable = "neon,fcma")]
3067#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3068#[rustc_legacy_const_generics(3)]
3069#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3070pub fn vcmlaq_lane_f32<const LANE: i32>(
3071    a: float32x4_t,
3072    b: float32x4_t,
3073    c: float32x2_t,
3074) -> float32x4_t {
3075    static_assert!(LANE == 0);
3076    unsafe {
3077        let c: float32x4_t = simd_shuffle!(
3078            c,
3079            c,
3080            [
3081                2 * LANE as u32,
3082                2 * LANE as u32 + 1,
3083                2 * LANE as u32,
3084                2 * LANE as u32 + 1
3085            ]
3086        );
3087        vcmlaq_f32(a, b, c)
3088    }
3089}
3090#[doc = "Floating-point complex multiply accumulate"]
3091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f16)"]
3092#[inline(always)]
3093#[target_feature(enable = "neon,fcma")]
3094#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3095#[rustc_legacy_const_generics(3)]
3096#[target_feature(enable = "neon,fp16")]
3097#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3098#[cfg(not(target_arch = "arm64ec"))]
3099pub fn vcmla_laneq_f16<const LANE: i32>(
3100    a: float16x4_t,
3101    b: float16x4_t,
3102    c: float16x8_t,
3103) -> float16x4_t {
3104    static_assert_uimm_bits!(LANE, 2);
3105    unsafe {
3106        let c: float16x4_t = simd_shuffle!(
3107            c,
3108            c,
3109            [
3110                2 * LANE as u32,
3111                2 * LANE as u32 + 1,
3112                2 * LANE as u32,
3113                2 * LANE as u32 + 1
3114            ]
3115        );
3116        vcmla_f16(a, b, c)
3117    }
3118}
3119#[doc = "Floating-point complex multiply accumulate"]
3120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f16)"]
3121#[inline(always)]
3122#[target_feature(enable = "neon,fcma")]
3123#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3124#[rustc_legacy_const_generics(3)]
3125#[target_feature(enable = "neon,fp16")]
3126#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3127#[cfg(not(target_arch = "arm64ec"))]
3128pub fn vcmlaq_laneq_f16<const LANE: i32>(
3129    a: float16x8_t,
3130    b: float16x8_t,
3131    c: float16x8_t,
3132) -> float16x8_t {
3133    static_assert_uimm_bits!(LANE, 2);
3134    unsafe {
3135        let c: float16x8_t = simd_shuffle!(
3136            c,
3137            c,
3138            [
3139                2 * LANE as u32,
3140                2 * LANE as u32 + 1,
3141                2 * LANE as u32,
3142                2 * LANE as u32 + 1,
3143                2 * LANE as u32,
3144                2 * LANE as u32 + 1,
3145                2 * LANE as u32,
3146                2 * LANE as u32 + 1
3147            ]
3148        );
3149        vcmlaq_f16(a, b, c)
3150    }
3151}
3152#[doc = "Floating-point complex multiply accumulate"]
3153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"]
3154#[inline(always)]
3155#[target_feature(enable = "neon,fcma")]
3156#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3157#[rustc_legacy_const_generics(3)]
3158#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3159pub fn vcmla_laneq_f32<const LANE: i32>(
3160    a: float32x2_t,
3161    b: float32x2_t,
3162    c: float32x4_t,
3163) -> float32x2_t {
3164    static_assert_uimm_bits!(LANE, 1);
3165    unsafe {
3166        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3167        vcmla_f32(a, b, c)
3168    }
3169}
3170#[doc = "Floating-point complex multiply accumulate"]
3171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"]
3172#[inline(always)]
3173#[target_feature(enable = "neon,fcma")]
3174#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3175#[rustc_legacy_const_generics(3)]
3176#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3177pub fn vcmlaq_laneq_f32<const LANE: i32>(
3178    a: float32x4_t,
3179    b: float32x4_t,
3180    c: float32x4_t,
3181) -> float32x4_t {
3182    static_assert_uimm_bits!(LANE, 1);
3183    unsafe {
3184        let c: float32x4_t = simd_shuffle!(
3185            c,
3186            c,
3187            [
3188                2 * LANE as u32,
3189                2 * LANE as u32 + 1,
3190                2 * LANE as u32,
3191                2 * LANE as u32 + 1
3192            ]
3193        );
3194        vcmlaq_f32(a, b, c)
3195    }
3196}
3197#[doc = "Floating-point complex multiply accumulate"]
3198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f16)"]
3199#[inline(always)]
3200#[target_feature(enable = "neon,fcma")]
3201#[target_feature(enable = "neon,fp16")]
3202#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3203#[cfg(not(target_arch = "arm64ec"))]
3204#[cfg_attr(test, assert_instr(fcmla))]
3205pub fn vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3206    unsafe extern "unadjusted" {
3207        #[cfg_attr(
3208            any(target_arch = "aarch64", target_arch = "arm64ec"),
3209            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f16"
3210        )]
3211        fn _vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3212    }
3213    unsafe { _vcmla_rot180_f16(a, b, c) }
3214}
3215#[doc = "Floating-point complex multiply accumulate"]
3216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f16)"]
3217#[inline(always)]
3218#[target_feature(enable = "neon,fcma")]
3219#[target_feature(enable = "neon,fp16")]
3220#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3221#[cfg(not(target_arch = "arm64ec"))]
3222#[cfg_attr(test, assert_instr(fcmla))]
3223pub fn vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3224    unsafe extern "unadjusted" {
3225        #[cfg_attr(
3226            any(target_arch = "aarch64", target_arch = "arm64ec"),
3227            link_name = "llvm.aarch64.neon.vcmla.rot180.v8f16"
3228        )]
3229        fn _vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3230    }
3231    unsafe { _vcmlaq_rot180_f16(a, b, c) }
3232}
3233#[doc = "Floating-point complex multiply accumulate"]
3234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"]
3235#[inline(always)]
3236#[target_feature(enable = "neon,fcma")]
3237#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3238#[cfg_attr(test, assert_instr(fcmla))]
3239pub fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3240    unsafe extern "unadjusted" {
3241        #[cfg_attr(
3242            any(target_arch = "aarch64", target_arch = "arm64ec"),
3243            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32"
3244        )]
3245        fn _vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3246    }
3247    unsafe { _vcmla_rot180_f32(a, b, c) }
3248}
3249#[doc = "Floating-point complex multiply accumulate"]
3250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"]
3251#[inline(always)]
3252#[target_feature(enable = "neon,fcma")]
3253#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3254#[cfg_attr(test, assert_instr(fcmla))]
3255pub fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3256    unsafe extern "unadjusted" {
3257        #[cfg_attr(
3258            any(target_arch = "aarch64", target_arch = "arm64ec"),
3259            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32"
3260        )]
3261        fn _vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3262    }
3263    unsafe { _vcmlaq_rot180_f32(a, b, c) }
3264}
3265#[doc = "Floating-point complex multiply accumulate"]
3266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"]
3267#[inline(always)]
3268#[target_feature(enable = "neon,fcma")]
3269#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3270#[cfg_attr(test, assert_instr(fcmla))]
3271pub fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3272    unsafe extern "unadjusted" {
3273        #[cfg_attr(
3274            any(target_arch = "aarch64", target_arch = "arm64ec"),
3275            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64"
3276        )]
3277        fn _vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3278    }
3279    unsafe { _vcmlaq_rot180_f64(a, b, c) }
3280}
3281#[doc = "Floating-point complex multiply accumulate"]
3282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f16)"]
3283#[inline(always)]
3284#[target_feature(enable = "neon,fcma")]
3285#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3286#[rustc_legacy_const_generics(3)]
3287#[target_feature(enable = "neon,fp16")]
3288#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3289#[cfg(not(target_arch = "arm64ec"))]
3290pub fn vcmla_rot180_lane_f16<const LANE: i32>(
3291    a: float16x4_t,
3292    b: float16x4_t,
3293    c: float16x4_t,
3294) -> float16x4_t {
3295    static_assert_uimm_bits!(LANE, 1);
3296    unsafe {
3297        let c: float16x4_t = simd_shuffle!(
3298            c,
3299            c,
3300            [
3301                2 * LANE as u32,
3302                2 * LANE as u32 + 1,
3303                2 * LANE as u32,
3304                2 * LANE as u32 + 1
3305            ]
3306        );
3307        vcmla_rot180_f16(a, b, c)
3308    }
3309}
3310#[doc = "Floating-point complex multiply accumulate"]
3311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f16)"]
3312#[inline(always)]
3313#[target_feature(enable = "neon,fcma")]
3314#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3315#[rustc_legacy_const_generics(3)]
3316#[target_feature(enable = "neon,fp16")]
3317#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3318#[cfg(not(target_arch = "arm64ec"))]
3319pub fn vcmlaq_rot180_lane_f16<const LANE: i32>(
3320    a: float16x8_t,
3321    b: float16x8_t,
3322    c: float16x4_t,
3323) -> float16x8_t {
3324    static_assert_uimm_bits!(LANE, 1);
3325    unsafe {
3326        let c: float16x8_t = simd_shuffle!(
3327            c,
3328            c,
3329            [
3330                2 * LANE as u32,
3331                2 * LANE as u32 + 1,
3332                2 * LANE as u32,
3333                2 * LANE as u32 + 1,
3334                2 * LANE as u32,
3335                2 * LANE as u32 + 1,
3336                2 * LANE as u32,
3337                2 * LANE as u32 + 1
3338            ]
3339        );
3340        vcmlaq_rot180_f16(a, b, c)
3341    }
3342}
3343#[doc = "Floating-point complex multiply accumulate"]
3344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"]
3345#[inline(always)]
3346#[target_feature(enable = "neon,fcma")]
3347#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3348#[rustc_legacy_const_generics(3)]
3349#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3350pub fn vcmla_rot180_lane_f32<const LANE: i32>(
3351    a: float32x2_t,
3352    b: float32x2_t,
3353    c: float32x2_t,
3354) -> float32x2_t {
3355    static_assert!(LANE == 0);
3356    unsafe {
3357        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3358        vcmla_rot180_f32(a, b, c)
3359    }
3360}
3361#[doc = "Floating-point complex multiply accumulate"]
3362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"]
3363#[inline(always)]
3364#[target_feature(enable = "neon,fcma")]
3365#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3366#[rustc_legacy_const_generics(3)]
3367#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3368pub fn vcmlaq_rot180_lane_f32<const LANE: i32>(
3369    a: float32x4_t,
3370    b: float32x4_t,
3371    c: float32x2_t,
3372) -> float32x4_t {
3373    static_assert!(LANE == 0);
3374    unsafe {
3375        let c: float32x4_t = simd_shuffle!(
3376            c,
3377            c,
3378            [
3379                2 * LANE as u32,
3380                2 * LANE as u32 + 1,
3381                2 * LANE as u32,
3382                2 * LANE as u32 + 1
3383            ]
3384        );
3385        vcmlaq_rot180_f32(a, b, c)
3386    }
3387}
3388#[doc = "Floating-point complex multiply accumulate"]
3389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f16)"]
3390#[inline(always)]
3391#[target_feature(enable = "neon,fcma")]
3392#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3393#[rustc_legacy_const_generics(3)]
3394#[target_feature(enable = "neon,fp16")]
3395#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3396#[cfg(not(target_arch = "arm64ec"))]
3397pub fn vcmla_rot180_laneq_f16<const LANE: i32>(
3398    a: float16x4_t,
3399    b: float16x4_t,
3400    c: float16x8_t,
3401) -> float16x4_t {
3402    static_assert_uimm_bits!(LANE, 2);
3403    unsafe {
3404        let c: float16x4_t = simd_shuffle!(
3405            c,
3406            c,
3407            [
3408                2 * LANE as u32,
3409                2 * LANE as u32 + 1,
3410                2 * LANE as u32,
3411                2 * LANE as u32 + 1
3412            ]
3413        );
3414        vcmla_rot180_f16(a, b, c)
3415    }
3416}
3417#[doc = "Floating-point complex multiply accumulate"]
3418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f16)"]
3419#[inline(always)]
3420#[target_feature(enable = "neon,fcma")]
3421#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3422#[rustc_legacy_const_generics(3)]
3423#[target_feature(enable = "neon,fp16")]
3424#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3425#[cfg(not(target_arch = "arm64ec"))]
3426pub fn vcmlaq_rot180_laneq_f16<const LANE: i32>(
3427    a: float16x8_t,
3428    b: float16x8_t,
3429    c: float16x8_t,
3430) -> float16x8_t {
3431    static_assert_uimm_bits!(LANE, 2);
3432    unsafe {
3433        let c: float16x8_t = simd_shuffle!(
3434            c,
3435            c,
3436            [
3437                2 * LANE as u32,
3438                2 * LANE as u32 + 1,
3439                2 * LANE as u32,
3440                2 * LANE as u32 + 1,
3441                2 * LANE as u32,
3442                2 * LANE as u32 + 1,
3443                2 * LANE as u32,
3444                2 * LANE as u32 + 1
3445            ]
3446        );
3447        vcmlaq_rot180_f16(a, b, c)
3448    }
3449}
3450#[doc = "Floating-point complex multiply accumulate"]
3451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"]
3452#[inline(always)]
3453#[target_feature(enable = "neon,fcma")]
3454#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3455#[rustc_legacy_const_generics(3)]
3456#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3457pub fn vcmla_rot180_laneq_f32<const LANE: i32>(
3458    a: float32x2_t,
3459    b: float32x2_t,
3460    c: float32x4_t,
3461) -> float32x2_t {
3462    static_assert_uimm_bits!(LANE, 1);
3463    unsafe {
3464        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3465        vcmla_rot180_f32(a, b, c)
3466    }
3467}
3468#[doc = "Floating-point complex multiply accumulate"]
3469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"]
3470#[inline(always)]
3471#[target_feature(enable = "neon,fcma")]
3472#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3473#[rustc_legacy_const_generics(3)]
3474#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3475pub fn vcmlaq_rot180_laneq_f32<const LANE: i32>(
3476    a: float32x4_t,
3477    b: float32x4_t,
3478    c: float32x4_t,
3479) -> float32x4_t {
3480    static_assert_uimm_bits!(LANE, 1);
3481    unsafe {
3482        let c: float32x4_t = simd_shuffle!(
3483            c,
3484            c,
3485            [
3486                2 * LANE as u32,
3487                2 * LANE as u32 + 1,
3488                2 * LANE as u32,
3489                2 * LANE as u32 + 1
3490            ]
3491        );
3492        vcmlaq_rot180_f32(a, b, c)
3493    }
3494}
3495#[doc = "Floating-point complex multiply accumulate"]
3496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f16)"]
3497#[inline(always)]
3498#[target_feature(enable = "neon,fcma")]
3499#[target_feature(enable = "neon,fp16")]
3500#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3501#[cfg(not(target_arch = "arm64ec"))]
3502#[cfg_attr(test, assert_instr(fcmla))]
3503pub fn vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3504    unsafe extern "unadjusted" {
3505        #[cfg_attr(
3506            any(target_arch = "aarch64", target_arch = "arm64ec"),
3507            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f16"
3508        )]
3509        fn _vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3510    }
3511    unsafe { _vcmla_rot270_f16(a, b, c) }
3512}
3513#[doc = "Floating-point complex multiply accumulate"]
3514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f16)"]
3515#[inline(always)]
3516#[target_feature(enable = "neon,fcma")]
3517#[target_feature(enable = "neon,fp16")]
3518#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3519#[cfg(not(target_arch = "arm64ec"))]
3520#[cfg_attr(test, assert_instr(fcmla))]
3521pub fn vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3522    unsafe extern "unadjusted" {
3523        #[cfg_attr(
3524            any(target_arch = "aarch64", target_arch = "arm64ec"),
3525            link_name = "llvm.aarch64.neon.vcmla.rot270.v8f16"
3526        )]
3527        fn _vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3528    }
3529    unsafe { _vcmlaq_rot270_f16(a, b, c) }
3530}
3531#[doc = "Floating-point complex multiply accumulate"]
3532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"]
3533#[inline(always)]
3534#[target_feature(enable = "neon,fcma")]
3535#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3536#[cfg_attr(test, assert_instr(fcmla))]
3537pub fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3538    unsafe extern "unadjusted" {
3539        #[cfg_attr(
3540            any(target_arch = "aarch64", target_arch = "arm64ec"),
3541            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32"
3542        )]
3543        fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3544    }
3545    unsafe { _vcmla_rot270_f32(a, b, c) }
3546}
3547#[doc = "Floating-point complex multiply accumulate"]
3548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"]
3549#[inline(always)]
3550#[target_feature(enable = "neon,fcma")]
3551#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3552#[cfg_attr(test, assert_instr(fcmla))]
3553pub fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3554    unsafe extern "unadjusted" {
3555        #[cfg_attr(
3556            any(target_arch = "aarch64", target_arch = "arm64ec"),
3557            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32"
3558        )]
3559        fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3560    }
3561    unsafe { _vcmlaq_rot270_f32(a, b, c) }
3562}
3563#[doc = "Floating-point complex multiply accumulate"]
3564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"]
3565#[inline(always)]
3566#[target_feature(enable = "neon,fcma")]
3567#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3568#[cfg_attr(test, assert_instr(fcmla))]
3569pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3570    unsafe extern "unadjusted" {
3571        #[cfg_attr(
3572            any(target_arch = "aarch64", target_arch = "arm64ec"),
3573            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64"
3574        )]
3575        fn _vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3576    }
3577    unsafe { _vcmlaq_rot270_f64(a, b, c) }
3578}
3579#[doc = "Floating-point complex multiply accumulate"]
3580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f16)"]
3581#[inline(always)]
3582#[target_feature(enable = "neon,fcma")]
3583#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3584#[rustc_legacy_const_generics(3)]
3585#[target_feature(enable = "neon,fp16")]
3586#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3587#[cfg(not(target_arch = "arm64ec"))]
3588pub fn vcmla_rot270_lane_f16<const LANE: i32>(
3589    a: float16x4_t,
3590    b: float16x4_t,
3591    c: float16x4_t,
3592) -> float16x4_t {
3593    static_assert_uimm_bits!(LANE, 1);
3594    unsafe {
3595        let c: float16x4_t = simd_shuffle!(
3596            c,
3597            c,
3598            [
3599                2 * LANE as u32,
3600                2 * LANE as u32 + 1,
3601                2 * LANE as u32,
3602                2 * LANE as u32 + 1
3603            ]
3604        );
3605        vcmla_rot270_f16(a, b, c)
3606    }
3607}
3608#[doc = "Floating-point complex multiply accumulate"]
3609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f16)"]
3610#[inline(always)]
3611#[target_feature(enable = "neon,fcma")]
3612#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3613#[rustc_legacy_const_generics(3)]
3614#[target_feature(enable = "neon,fp16")]
3615#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3616#[cfg(not(target_arch = "arm64ec"))]
3617pub fn vcmlaq_rot270_lane_f16<const LANE: i32>(
3618    a: float16x8_t,
3619    b: float16x8_t,
3620    c: float16x4_t,
3621) -> float16x8_t {
3622    static_assert_uimm_bits!(LANE, 1);
3623    unsafe {
3624        let c: float16x8_t = simd_shuffle!(
3625            c,
3626            c,
3627            [
3628                2 * LANE as u32,
3629                2 * LANE as u32 + 1,
3630                2 * LANE as u32,
3631                2 * LANE as u32 + 1,
3632                2 * LANE as u32,
3633                2 * LANE as u32 + 1,
3634                2 * LANE as u32,
3635                2 * LANE as u32 + 1
3636            ]
3637        );
3638        vcmlaq_rot270_f16(a, b, c)
3639    }
3640}
3641#[doc = "Floating-point complex multiply accumulate"]
3642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"]
3643#[inline(always)]
3644#[target_feature(enable = "neon,fcma")]
3645#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3646#[rustc_legacy_const_generics(3)]
3647#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3648pub fn vcmla_rot270_lane_f32<const LANE: i32>(
3649    a: float32x2_t,
3650    b: float32x2_t,
3651    c: float32x2_t,
3652) -> float32x2_t {
3653    static_assert!(LANE == 0);
3654    unsafe {
3655        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3656        vcmla_rot270_f32(a, b, c)
3657    }
3658}
3659#[doc = "Floating-point complex multiply accumulate"]
3660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"]
3661#[inline(always)]
3662#[target_feature(enable = "neon,fcma")]
3663#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3664#[rustc_legacy_const_generics(3)]
3665#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3666pub fn vcmlaq_rot270_lane_f32<const LANE: i32>(
3667    a: float32x4_t,
3668    b: float32x4_t,
3669    c: float32x2_t,
3670) -> float32x4_t {
3671    static_assert!(LANE == 0);
3672    unsafe {
3673        let c: float32x4_t = simd_shuffle!(
3674            c,
3675            c,
3676            [
3677                2 * LANE as u32,
3678                2 * LANE as u32 + 1,
3679                2 * LANE as u32,
3680                2 * LANE as u32 + 1
3681            ]
3682        );
3683        vcmlaq_rot270_f32(a, b, c)
3684    }
3685}
3686#[doc = "Floating-point complex multiply accumulate"]
3687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f16)"]
3688#[inline(always)]
3689#[target_feature(enable = "neon,fcma")]
3690#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3691#[rustc_legacy_const_generics(3)]
3692#[target_feature(enable = "neon,fp16")]
3693#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3694#[cfg(not(target_arch = "arm64ec"))]
3695pub fn vcmla_rot270_laneq_f16<const LANE: i32>(
3696    a: float16x4_t,
3697    b: float16x4_t,
3698    c: float16x8_t,
3699) -> float16x4_t {
3700    static_assert_uimm_bits!(LANE, 2);
3701    unsafe {
3702        let c: float16x4_t = simd_shuffle!(
3703            c,
3704            c,
3705            [
3706                2 * LANE as u32,
3707                2 * LANE as u32 + 1,
3708                2 * LANE as u32,
3709                2 * LANE as u32 + 1
3710            ]
3711        );
3712        vcmla_rot270_f16(a, b, c)
3713    }
3714}
3715#[doc = "Floating-point complex multiply accumulate"]
3716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f16)"]
3717#[inline(always)]
3718#[target_feature(enable = "neon,fcma")]
3719#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3720#[rustc_legacy_const_generics(3)]
3721#[target_feature(enable = "neon,fp16")]
3722#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3723#[cfg(not(target_arch = "arm64ec"))]
3724pub fn vcmlaq_rot270_laneq_f16<const LANE: i32>(
3725    a: float16x8_t,
3726    b: float16x8_t,
3727    c: float16x8_t,
3728) -> float16x8_t {
3729    static_assert_uimm_bits!(LANE, 2);
3730    unsafe {
3731        let c: float16x8_t = simd_shuffle!(
3732            c,
3733            c,
3734            [
3735                2 * LANE as u32,
3736                2 * LANE as u32 + 1,
3737                2 * LANE as u32,
3738                2 * LANE as u32 + 1,
3739                2 * LANE as u32,
3740                2 * LANE as u32 + 1,
3741                2 * LANE as u32,
3742                2 * LANE as u32 + 1
3743            ]
3744        );
3745        vcmlaq_rot270_f16(a, b, c)
3746    }
3747}
3748#[doc = "Floating-point complex multiply accumulate"]
3749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"]
3750#[inline(always)]
3751#[target_feature(enable = "neon,fcma")]
3752#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3753#[rustc_legacy_const_generics(3)]
3754#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3755pub fn vcmla_rot270_laneq_f32<const LANE: i32>(
3756    a: float32x2_t,
3757    b: float32x2_t,
3758    c: float32x4_t,
3759) -> float32x2_t {
3760    static_assert_uimm_bits!(LANE, 1);
3761    unsafe {
3762        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3763        vcmla_rot270_f32(a, b, c)
3764    }
3765}
3766#[doc = "Floating-point complex multiply accumulate"]
3767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"]
3768#[inline(always)]
3769#[target_feature(enable = "neon,fcma")]
3770#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3771#[rustc_legacy_const_generics(3)]
3772#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3773pub fn vcmlaq_rot270_laneq_f32<const LANE: i32>(
3774    a: float32x4_t,
3775    b: float32x4_t,
3776    c: float32x4_t,
3777) -> float32x4_t {
3778    static_assert_uimm_bits!(LANE, 1);
3779    unsafe {
3780        let c: float32x4_t = simd_shuffle!(
3781            c,
3782            c,
3783            [
3784                2 * LANE as u32,
3785                2 * LANE as u32 + 1,
3786                2 * LANE as u32,
3787                2 * LANE as u32 + 1
3788            ]
3789        );
3790        vcmlaq_rot270_f32(a, b, c)
3791    }
3792}
3793#[doc = "Floating-point complex multiply accumulate"]
3794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f16)"]
3795#[inline(always)]
3796#[target_feature(enable = "neon,fcma")]
3797#[target_feature(enable = "neon,fp16")]
3798#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3799#[cfg(not(target_arch = "arm64ec"))]
3800#[cfg_attr(test, assert_instr(fcmla))]
3801pub fn vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3802    unsafe extern "unadjusted" {
3803        #[cfg_attr(
3804            any(target_arch = "aarch64", target_arch = "arm64ec"),
3805            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f16"
3806        )]
3807        fn _vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3808    }
3809    unsafe { _vcmla_rot90_f16(a, b, c) }
3810}
3811#[doc = "Floating-point complex multiply accumulate"]
3812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f16)"]
3813#[inline(always)]
3814#[target_feature(enable = "neon,fcma")]
3815#[target_feature(enable = "neon,fp16")]
3816#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3817#[cfg(not(target_arch = "arm64ec"))]
3818#[cfg_attr(test, assert_instr(fcmla))]
3819pub fn vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3820    unsafe extern "unadjusted" {
3821        #[cfg_attr(
3822            any(target_arch = "aarch64", target_arch = "arm64ec"),
3823            link_name = "llvm.aarch64.neon.vcmla.rot90.v8f16"
3824        )]
3825        fn _vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3826    }
3827    unsafe { _vcmlaq_rot90_f16(a, b, c) }
3828}
3829#[doc = "Floating-point complex multiply accumulate"]
3830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"]
3831#[inline(always)]
3832#[target_feature(enable = "neon,fcma")]
3833#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3834#[cfg_attr(test, assert_instr(fcmla))]
3835pub fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3836    unsafe extern "unadjusted" {
3837        #[cfg_attr(
3838            any(target_arch = "aarch64", target_arch = "arm64ec"),
3839            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32"
3840        )]
3841        fn _vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3842    }
3843    unsafe { _vcmla_rot90_f32(a, b, c) }
3844}
3845#[doc = "Floating-point complex multiply accumulate"]
3846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"]
3847#[inline(always)]
3848#[target_feature(enable = "neon,fcma")]
3849#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3850#[cfg_attr(test, assert_instr(fcmla))]
3851pub fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3852    unsafe extern "unadjusted" {
3853        #[cfg_attr(
3854            any(target_arch = "aarch64", target_arch = "arm64ec"),
3855            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32"
3856        )]
3857        fn _vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3858    }
3859    unsafe { _vcmlaq_rot90_f32(a, b, c) }
3860}
3861#[doc = "Floating-point complex multiply accumulate"]
3862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"]
3863#[inline(always)]
3864#[target_feature(enable = "neon,fcma")]
3865#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3866#[cfg_attr(test, assert_instr(fcmla))]
3867pub fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3868    unsafe extern "unadjusted" {
3869        #[cfg_attr(
3870            any(target_arch = "aarch64", target_arch = "arm64ec"),
3871            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64"
3872        )]
3873        fn _vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3874    }
3875    unsafe { _vcmlaq_rot90_f64(a, b, c) }
3876}
3877#[doc = "Floating-point complex multiply accumulate"]
3878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f16)"]
3879#[inline(always)]
3880#[target_feature(enable = "neon,fcma")]
3881#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3882#[rustc_legacy_const_generics(3)]
3883#[target_feature(enable = "neon,fp16")]
3884#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3885#[cfg(not(target_arch = "arm64ec"))]
3886pub fn vcmla_rot90_lane_f16<const LANE: i32>(
3887    a: float16x4_t,
3888    b: float16x4_t,
3889    c: float16x4_t,
3890) -> float16x4_t {
3891    static_assert_uimm_bits!(LANE, 1);
3892    unsafe {
3893        let c: float16x4_t = simd_shuffle!(
3894            c,
3895            c,
3896            [
3897                2 * LANE as u32,
3898                2 * LANE as u32 + 1,
3899                2 * LANE as u32,
3900                2 * LANE as u32 + 1
3901            ]
3902        );
3903        vcmla_rot90_f16(a, b, c)
3904    }
3905}
3906#[doc = "Floating-point complex multiply accumulate"]
3907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f16)"]
3908#[inline(always)]
3909#[target_feature(enable = "neon,fcma")]
3910#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3911#[rustc_legacy_const_generics(3)]
3912#[target_feature(enable = "neon,fp16")]
3913#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3914#[cfg(not(target_arch = "arm64ec"))]
3915pub fn vcmlaq_rot90_lane_f16<const LANE: i32>(
3916    a: float16x8_t,
3917    b: float16x8_t,
3918    c: float16x4_t,
3919) -> float16x8_t {
3920    static_assert_uimm_bits!(LANE, 1);
3921    unsafe {
3922        let c: float16x8_t = simd_shuffle!(
3923            c,
3924            c,
3925            [
3926                2 * LANE as u32,
3927                2 * LANE as u32 + 1,
3928                2 * LANE as u32,
3929                2 * LANE as u32 + 1,
3930                2 * LANE as u32,
3931                2 * LANE as u32 + 1,
3932                2 * LANE as u32,
3933                2 * LANE as u32 + 1
3934            ]
3935        );
3936        vcmlaq_rot90_f16(a, b, c)
3937    }
3938}
3939#[doc = "Floating-point complex multiply accumulate"]
3940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"]
3941#[inline(always)]
3942#[target_feature(enable = "neon,fcma")]
3943#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3944#[rustc_legacy_const_generics(3)]
3945#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3946pub fn vcmla_rot90_lane_f32<const LANE: i32>(
3947    a: float32x2_t,
3948    b: float32x2_t,
3949    c: float32x2_t,
3950) -> float32x2_t {
3951    static_assert!(LANE == 0);
3952    unsafe {
3953        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3954        vcmla_rot90_f32(a, b, c)
3955    }
3956}
3957#[doc = "Floating-point complex multiply accumulate"]
3958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"]
3959#[inline(always)]
3960#[target_feature(enable = "neon,fcma")]
3961#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3962#[rustc_legacy_const_generics(3)]
3963#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3964pub fn vcmlaq_rot90_lane_f32<const LANE: i32>(
3965    a: float32x4_t,
3966    b: float32x4_t,
3967    c: float32x2_t,
3968) -> float32x4_t {
3969    static_assert!(LANE == 0);
3970    unsafe {
3971        let c: float32x4_t = simd_shuffle!(
3972            c,
3973            c,
3974            [
3975                2 * LANE as u32,
3976                2 * LANE as u32 + 1,
3977                2 * LANE as u32,
3978                2 * LANE as u32 + 1
3979            ]
3980        );
3981        vcmlaq_rot90_f32(a, b, c)
3982    }
3983}
3984#[doc = "Floating-point complex multiply accumulate"]
3985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f16)"]
3986#[inline(always)]
3987#[target_feature(enable = "neon,fcma")]
3988#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3989#[rustc_legacy_const_generics(3)]
3990#[target_feature(enable = "neon,fp16")]
3991#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3992#[cfg(not(target_arch = "arm64ec"))]
3993pub fn vcmla_rot90_laneq_f16<const LANE: i32>(
3994    a: float16x4_t,
3995    b: float16x4_t,
3996    c: float16x8_t,
3997) -> float16x4_t {
3998    static_assert_uimm_bits!(LANE, 2);
3999    unsafe {
4000        let c: float16x4_t = simd_shuffle!(
4001            c,
4002            c,
4003            [
4004                2 * LANE as u32,
4005                2 * LANE as u32 + 1,
4006                2 * LANE as u32,
4007                2 * LANE as u32 + 1
4008            ]
4009        );
4010        vcmla_rot90_f16(a, b, c)
4011    }
4012}
4013#[doc = "Floating-point complex multiply accumulate"]
4014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f16)"]
4015#[inline(always)]
4016#[target_feature(enable = "neon,fcma")]
4017#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4018#[rustc_legacy_const_generics(3)]
4019#[target_feature(enable = "neon,fp16")]
4020#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4021#[cfg(not(target_arch = "arm64ec"))]
4022pub fn vcmlaq_rot90_laneq_f16<const LANE: i32>(
4023    a: float16x8_t,
4024    b: float16x8_t,
4025    c: float16x8_t,
4026) -> float16x8_t {
4027    static_assert_uimm_bits!(LANE, 2);
4028    unsafe {
4029        let c: float16x8_t = simd_shuffle!(
4030            c,
4031            c,
4032            [
4033                2 * LANE as u32,
4034                2 * LANE as u32 + 1,
4035                2 * LANE as u32,
4036                2 * LANE as u32 + 1,
4037                2 * LANE as u32,
4038                2 * LANE as u32 + 1,
4039                2 * LANE as u32,
4040                2 * LANE as u32 + 1
4041            ]
4042        );
4043        vcmlaq_rot90_f16(a, b, c)
4044    }
4045}
4046#[doc = "Floating-point complex multiply accumulate"]
4047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"]
4048#[inline(always)]
4049#[target_feature(enable = "neon,fcma")]
4050#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4051#[rustc_legacy_const_generics(3)]
4052#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4053pub fn vcmla_rot90_laneq_f32<const LANE: i32>(
4054    a: float32x2_t,
4055    b: float32x2_t,
4056    c: float32x4_t,
4057) -> float32x2_t {
4058    static_assert_uimm_bits!(LANE, 1);
4059    unsafe {
4060        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
4061        vcmla_rot90_f32(a, b, c)
4062    }
4063}
4064#[doc = "Floating-point complex multiply accumulate"]
4065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"]
4066#[inline(always)]
4067#[target_feature(enable = "neon,fcma")]
4068#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4069#[rustc_legacy_const_generics(3)]
4070#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4071pub fn vcmlaq_rot90_laneq_f32<const LANE: i32>(
4072    a: float32x4_t,
4073    b: float32x4_t,
4074    c: float32x4_t,
4075) -> float32x4_t {
4076    static_assert_uimm_bits!(LANE, 1);
4077    unsafe {
4078        let c: float32x4_t = simd_shuffle!(
4079            c,
4080            c,
4081            [
4082                2 * LANE as u32,
4083                2 * LANE as u32 + 1,
4084                2 * LANE as u32,
4085                2 * LANE as u32 + 1
4086            ]
4087        );
4088        vcmlaq_rot90_f32(a, b, c)
4089    }
4090}
4091#[doc = "Insert vector element from another vector element"]
4092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"]
4093#[inline(always)]
4094#[target_feature(enable = "neon")]
4095#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4096#[rustc_legacy_const_generics(1, 3)]
4097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4098pub fn vcopy_lane_f32<const LANE1: i32, const LANE2: i32>(
4099    a: float32x2_t,
4100    b: float32x2_t,
4101) -> float32x2_t {
4102    static_assert_uimm_bits!(LANE1, 1);
4103    static_assert_uimm_bits!(LANE2, 1);
4104    unsafe {
4105        match LANE1 & 0b1 {
4106            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4107            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4108            _ => unreachable_unchecked(),
4109        }
4110    }
4111}
4112#[doc = "Insert vector element from another vector element"]
4113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"]
4114#[inline(always)]
4115#[target_feature(enable = "neon")]
4116#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4117#[rustc_legacy_const_generics(1, 3)]
4118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4119pub fn vcopy_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
4120    static_assert_uimm_bits!(LANE1, 3);
4121    static_assert_uimm_bits!(LANE2, 3);
4122    unsafe {
4123        match LANE1 & 0b111 {
4124            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4125            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4126            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4127            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4128            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4129            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4130            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4131            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4132            _ => unreachable_unchecked(),
4133        }
4134    }
4135}
4136#[doc = "Insert vector element from another vector element"]
4137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"]
4138#[inline(always)]
4139#[target_feature(enable = "neon")]
4140#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4141#[rustc_legacy_const_generics(1, 3)]
4142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4143pub fn vcopy_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
4144    static_assert_uimm_bits!(LANE1, 2);
4145    static_assert_uimm_bits!(LANE2, 2);
4146    unsafe {
4147        match LANE1 & 0b11 {
4148            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4149            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4150            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4151            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4152            _ => unreachable_unchecked(),
4153        }
4154    }
4155}
4156#[doc = "Insert vector element from another vector element"]
4157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"]
4158#[inline(always)]
4159#[target_feature(enable = "neon")]
4160#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4161#[rustc_legacy_const_generics(1, 3)]
4162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4163pub fn vcopy_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
4164    static_assert_uimm_bits!(LANE1, 1);
4165    static_assert_uimm_bits!(LANE2, 1);
4166    unsafe {
4167        match LANE1 & 0b1 {
4168            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4169            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4170            _ => unreachable_unchecked(),
4171        }
4172    }
4173}
4174#[doc = "Insert vector element from another vector element"]
4175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"]
4176#[inline(always)]
4177#[target_feature(enable = "neon")]
4178#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4179#[rustc_legacy_const_generics(1, 3)]
4180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4181pub fn vcopy_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
4182    static_assert_uimm_bits!(LANE1, 3);
4183    static_assert_uimm_bits!(LANE2, 3);
4184    unsafe {
4185        match LANE1 & 0b111 {
4186            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4187            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4188            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4189            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4190            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4191            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4192            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4193            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4194            _ => unreachable_unchecked(),
4195        }
4196    }
4197}
4198#[doc = "Insert vector element from another vector element"]
4199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"]
4200#[inline(always)]
4201#[target_feature(enable = "neon")]
4202#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4203#[rustc_legacy_const_generics(1, 3)]
4204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4205pub fn vcopy_lane_u16<const LANE1: i32, const LANE2: i32>(
4206    a: uint16x4_t,
4207    b: uint16x4_t,
4208) -> uint16x4_t {
4209    static_assert_uimm_bits!(LANE1, 2);
4210    static_assert_uimm_bits!(LANE2, 2);
4211    unsafe {
4212        match LANE1 & 0b11 {
4213            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4214            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4215            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4216            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4217            _ => unreachable_unchecked(),
4218        }
4219    }
4220}
4221#[doc = "Insert vector element from another vector element"]
4222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"]
4223#[inline(always)]
4224#[target_feature(enable = "neon")]
4225#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4226#[rustc_legacy_const_generics(1, 3)]
4227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4228pub fn vcopy_lane_u32<const LANE1: i32, const LANE2: i32>(
4229    a: uint32x2_t,
4230    b: uint32x2_t,
4231) -> uint32x2_t {
4232    static_assert_uimm_bits!(LANE1, 1);
4233    static_assert_uimm_bits!(LANE2, 1);
4234    unsafe {
4235        match LANE1 & 0b1 {
4236            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4237            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4238            _ => unreachable_unchecked(),
4239        }
4240    }
4241}
4242#[doc = "Insert vector element from another vector element"]
4243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"]
4244#[inline(always)]
4245#[target_feature(enable = "neon")]
4246#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4247#[rustc_legacy_const_generics(1, 3)]
4248#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4249pub fn vcopy_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
4250    static_assert_uimm_bits!(LANE1, 3);
4251    static_assert_uimm_bits!(LANE2, 3);
4252    unsafe {
4253        match LANE1 & 0b111 {
4254            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4255            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4256            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4257            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4258            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4259            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4260            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4261            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4262            _ => unreachable_unchecked(),
4263        }
4264    }
4265}
4266#[doc = "Insert vector element from another vector element"]
4267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"]
4268#[inline(always)]
4269#[target_feature(enable = "neon")]
4270#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4271#[rustc_legacy_const_generics(1, 3)]
4272#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4273pub fn vcopy_lane_p16<const LANE1: i32, const LANE2: i32>(
4274    a: poly16x4_t,
4275    b: poly16x4_t,
4276) -> poly16x4_t {
4277    static_assert_uimm_bits!(LANE1, 2);
4278    static_assert_uimm_bits!(LANE2, 2);
4279    unsafe {
4280        match LANE1 & 0b11 {
4281            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4282            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4283            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4284            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4285            _ => unreachable_unchecked(),
4286        }
4287    }
4288}
4289#[doc = "Insert vector element from another vector element"]
4290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"]
4291#[inline(always)]
4292#[target_feature(enable = "neon")]
4293#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4294#[rustc_legacy_const_generics(1, 3)]
4295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4296pub fn vcopy_laneq_f32<const LANE1: i32, const LANE2: i32>(
4297    a: float32x2_t,
4298    b: float32x4_t,
4299) -> float32x2_t {
4300    static_assert_uimm_bits!(LANE1, 1);
4301    static_assert_uimm_bits!(LANE2, 2);
4302    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4303    unsafe {
4304        match LANE1 & 0b1 {
4305            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4306            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4307            _ => unreachable_unchecked(),
4308        }
4309    }
4310}
4311#[doc = "Insert vector element from another vector element"]
4312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"]
4313#[inline(always)]
4314#[target_feature(enable = "neon")]
4315#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4316#[rustc_legacy_const_generics(1, 3)]
4317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4318pub fn vcopy_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x16_t) -> int8x8_t {
4319    static_assert_uimm_bits!(LANE1, 3);
4320    static_assert_uimm_bits!(LANE2, 4);
4321    let a: int8x16_t =
4322        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4323    unsafe {
4324        match LANE1 & 0b111 {
4325            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4326            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4327            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4328            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4329            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4330            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4331            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4332            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4333            _ => unreachable_unchecked(),
4334        }
4335    }
4336}
4337#[doc = "Insert vector element from another vector element"]
4338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"]
4339#[inline(always)]
4340#[target_feature(enable = "neon")]
4341#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4342#[rustc_legacy_const_generics(1, 3)]
4343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4344pub fn vcopy_laneq_s16<const LANE1: i32, const LANE2: i32>(
4345    a: int16x4_t,
4346    b: int16x8_t,
4347) -> int16x4_t {
4348    static_assert_uimm_bits!(LANE1, 2);
4349    static_assert_uimm_bits!(LANE2, 3);
4350    let a: int16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4351    unsafe {
4352        match LANE1 & 0b11 {
4353            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4354            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4355            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4356            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4357            _ => unreachable_unchecked(),
4358        }
4359    }
4360}
4361#[doc = "Insert vector element from another vector element"]
4362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"]
4363#[inline(always)]
4364#[target_feature(enable = "neon")]
4365#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4366#[rustc_legacy_const_generics(1, 3)]
4367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4368pub fn vcopy_laneq_s32<const LANE1: i32, const LANE2: i32>(
4369    a: int32x2_t,
4370    b: int32x4_t,
4371) -> int32x2_t {
4372    static_assert_uimm_bits!(LANE1, 1);
4373    static_assert_uimm_bits!(LANE2, 2);
4374    let a: int32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4375    unsafe {
4376        match LANE1 & 0b1 {
4377            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4378            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4379            _ => unreachable_unchecked(),
4380        }
4381    }
4382}
4383#[doc = "Insert vector element from another vector element"]
4384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"]
4385#[inline(always)]
4386#[target_feature(enable = "neon")]
4387#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4388#[rustc_legacy_const_generics(1, 3)]
4389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4390pub fn vcopy_laneq_u8<const LANE1: i32, const LANE2: i32>(
4391    a: uint8x8_t,
4392    b: uint8x16_t,
4393) -> uint8x8_t {
4394    static_assert_uimm_bits!(LANE1, 3);
4395    static_assert_uimm_bits!(LANE2, 4);
4396    let a: uint8x16_t =
4397        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4398    unsafe {
4399        match LANE1 & 0b111 {
4400            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4401            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4402            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4403            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4404            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4405            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4406            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4407            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4408            _ => unreachable_unchecked(),
4409        }
4410    }
4411}
4412#[doc = "Insert vector element from another vector element"]
4413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"]
4414#[inline(always)]
4415#[target_feature(enable = "neon")]
4416#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4417#[rustc_legacy_const_generics(1, 3)]
4418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4419pub fn vcopy_laneq_u16<const LANE1: i32, const LANE2: i32>(
4420    a: uint16x4_t,
4421    b: uint16x8_t,
4422) -> uint16x4_t {
4423    static_assert_uimm_bits!(LANE1, 2);
4424    static_assert_uimm_bits!(LANE2, 3);
4425    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4426    unsafe {
4427        match LANE1 & 0b11 {
4428            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4429            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4430            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4431            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4432            _ => unreachable_unchecked(),
4433        }
4434    }
4435}
4436#[doc = "Insert vector element from another vector element"]
4437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"]
4438#[inline(always)]
4439#[target_feature(enable = "neon")]
4440#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4441#[rustc_legacy_const_generics(1, 3)]
4442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4443pub fn vcopy_laneq_u32<const LANE1: i32, const LANE2: i32>(
4444    a: uint32x2_t,
4445    b: uint32x4_t,
4446) -> uint32x2_t {
4447    static_assert_uimm_bits!(LANE1, 1);
4448    static_assert_uimm_bits!(LANE2, 2);
4449    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4450    unsafe {
4451        match LANE1 & 0b1 {
4452            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4453            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4454            _ => unreachable_unchecked(),
4455        }
4456    }
4457}
4458#[doc = "Insert vector element from another vector element"]
4459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"]
4460#[inline(always)]
4461#[target_feature(enable = "neon")]
4462#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4463#[rustc_legacy_const_generics(1, 3)]
4464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4465pub fn vcopy_laneq_p8<const LANE1: i32, const LANE2: i32>(
4466    a: poly8x8_t,
4467    b: poly8x16_t,
4468) -> poly8x8_t {
4469    static_assert_uimm_bits!(LANE1, 3);
4470    static_assert_uimm_bits!(LANE2, 4);
4471    let a: poly8x16_t =
4472        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4473    unsafe {
4474        match LANE1 & 0b111 {
4475            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4476            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4477            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4478            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4479            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4480            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4481            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4482            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4483            _ => unreachable_unchecked(),
4484        }
4485    }
4486}
4487#[doc = "Insert vector element from another vector element"]
4488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"]
4489#[inline(always)]
4490#[target_feature(enable = "neon")]
4491#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4492#[rustc_legacy_const_generics(1, 3)]
4493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4494pub fn vcopy_laneq_p16<const LANE1: i32, const LANE2: i32>(
4495    a: poly16x4_t,
4496    b: poly16x8_t,
4497) -> poly16x4_t {
4498    static_assert_uimm_bits!(LANE1, 2);
4499    static_assert_uimm_bits!(LANE2, 3);
4500    let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4501    unsafe {
4502        match LANE1 & 0b11 {
4503            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4504            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4505            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4506            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4507            _ => unreachable_unchecked(),
4508        }
4509    }
4510}
4511#[doc = "Insert vector element from another vector element"]
4512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"]
4513#[inline(always)]
4514#[target_feature(enable = "neon")]
4515#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4516#[rustc_legacy_const_generics(1, 3)]
4517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4518pub fn vcopyq_lane_f32<const LANE1: i32, const LANE2: i32>(
4519    a: float32x4_t,
4520    b: float32x2_t,
4521) -> float32x4_t {
4522    static_assert_uimm_bits!(LANE1, 2);
4523    static_assert_uimm_bits!(LANE2, 1);
4524    let b: float32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
4525    unsafe {
4526        match LANE1 & 0b11 {
4527            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4528            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4529            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4530            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4531            _ => unreachable_unchecked(),
4532        }
4533    }
4534}
4535#[doc = "Insert vector element from another vector element"]
4536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"]
4537#[inline(always)]
4538#[target_feature(enable = "neon")]
4539#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4540#[rustc_legacy_const_generics(1, 3)]
4541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4542pub fn vcopyq_lane_f64<const LANE1: i32, const LANE2: i32>(
4543    a: float64x2_t,
4544    b: float64x1_t,
4545) -> float64x2_t {
4546    static_assert_uimm_bits!(LANE1, 1);
4547    static_assert!(LANE2 == 0);
4548    let b: float64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4549    unsafe {
4550        match LANE1 & 0b1 {
4551            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4552            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4553            _ => unreachable_unchecked(),
4554        }
4555    }
4556}
4557#[doc = "Insert vector element from another vector element"]
4558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"]
4559#[inline(always)]
4560#[target_feature(enable = "neon")]
4561#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4562#[rustc_legacy_const_generics(1, 3)]
4563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4564pub fn vcopyq_lane_s64<const LANE1: i32, const LANE2: i32>(
4565    a: int64x2_t,
4566    b: int64x1_t,
4567) -> int64x2_t {
4568    static_assert_uimm_bits!(LANE1, 1);
4569    static_assert!(LANE2 == 0);
4570    let b: int64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4571    unsafe {
4572        match LANE1 & 0b1 {
4573            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4574            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4575            _ => unreachable_unchecked(),
4576        }
4577    }
4578}
4579#[doc = "Insert vector element from another vector element"]
4580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"]
4581#[inline(always)]
4582#[target_feature(enable = "neon")]
4583#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4584#[rustc_legacy_const_generics(1, 3)]
4585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4586pub fn vcopyq_lane_u64<const LANE1: i32, const LANE2: i32>(
4587    a: uint64x2_t,
4588    b: uint64x1_t,
4589) -> uint64x2_t {
4590    static_assert_uimm_bits!(LANE1, 1);
4591    static_assert!(LANE2 == 0);
4592    let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4593    unsafe {
4594        match LANE1 & 0b1 {
4595            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4596            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4597            _ => unreachable_unchecked(),
4598        }
4599    }
4600}
4601#[doc = "Insert vector element from another vector element"]
4602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"]
4603#[inline(always)]
4604#[target_feature(enable = "neon")]
4605#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4606#[rustc_legacy_const_generics(1, 3)]
4607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4608pub fn vcopyq_lane_p64<const LANE1: i32, const LANE2: i32>(
4609    a: poly64x2_t,
4610    b: poly64x1_t,
4611) -> poly64x2_t {
4612    static_assert_uimm_bits!(LANE1, 1);
4613    static_assert!(LANE2 == 0);
4614    let b: poly64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4615    unsafe {
4616        match LANE1 & 0b1 {
4617            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4618            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4619            _ => unreachable_unchecked(),
4620        }
4621    }
4622}
4623#[doc = "Insert vector element from another vector element"]
4624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"]
4625#[inline(always)]
4626#[target_feature(enable = "neon")]
4627#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4628#[rustc_legacy_const_generics(1, 3)]
4629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4630pub fn vcopyq_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x8_t) -> int8x16_t {
4631    static_assert_uimm_bits!(LANE1, 4);
4632    static_assert_uimm_bits!(LANE2, 3);
4633    let b: int8x16_t =
4634        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4635    unsafe {
4636        match LANE1 & 0b1111 {
4637            0 => simd_shuffle!(
4638                a,
4639                b,
4640                [
4641                    16 + LANE2 as u32,
4642                    1,
4643                    2,
4644                    3,
4645                    4,
4646                    5,
4647                    6,
4648                    7,
4649                    8,
4650                    9,
4651                    10,
4652                    11,
4653                    12,
4654                    13,
4655                    14,
4656                    15
4657                ]
4658            ),
4659            1 => simd_shuffle!(
4660                a,
4661                b,
4662                [
4663                    0,
4664                    16 + LANE2 as u32,
4665                    2,
4666                    3,
4667                    4,
4668                    5,
4669                    6,
4670                    7,
4671                    8,
4672                    9,
4673                    10,
4674                    11,
4675                    12,
4676                    13,
4677                    14,
4678                    15
4679                ]
4680            ),
4681            2 => simd_shuffle!(
4682                a,
4683                b,
4684                [
4685                    0,
4686                    1,
4687                    16 + LANE2 as u32,
4688                    3,
4689                    4,
4690                    5,
4691                    6,
4692                    7,
4693                    8,
4694                    9,
4695                    10,
4696                    11,
4697                    12,
4698                    13,
4699                    14,
4700                    15
4701                ]
4702            ),
4703            3 => simd_shuffle!(
4704                a,
4705                b,
4706                [
4707                    0,
4708                    1,
4709                    2,
4710                    16 + LANE2 as u32,
4711                    4,
4712                    5,
4713                    6,
4714                    7,
4715                    8,
4716                    9,
4717                    10,
4718                    11,
4719                    12,
4720                    13,
4721                    14,
4722                    15
4723                ]
4724            ),
4725            4 => simd_shuffle!(
4726                a,
4727                b,
4728                [
4729                    0,
4730                    1,
4731                    2,
4732                    3,
4733                    16 + LANE2 as u32,
4734                    5,
4735                    6,
4736                    7,
4737                    8,
4738                    9,
4739                    10,
4740                    11,
4741                    12,
4742                    13,
4743                    14,
4744                    15
4745                ]
4746            ),
4747            5 => simd_shuffle!(
4748                a,
4749                b,
4750                [
4751                    0,
4752                    1,
4753                    2,
4754                    3,
4755                    4,
4756                    16 + LANE2 as u32,
4757                    6,
4758                    7,
4759                    8,
4760                    9,
4761                    10,
4762                    11,
4763                    12,
4764                    13,
4765                    14,
4766                    15
4767                ]
4768            ),
4769            6 => simd_shuffle!(
4770                a,
4771                b,
4772                [
4773                    0,
4774                    1,
4775                    2,
4776                    3,
4777                    4,
4778                    5,
4779                    16 + LANE2 as u32,
4780                    7,
4781                    8,
4782                    9,
4783                    10,
4784                    11,
4785                    12,
4786                    13,
4787                    14,
4788                    15
4789                ]
4790            ),
4791            7 => simd_shuffle!(
4792                a,
4793                b,
4794                [
4795                    0,
4796                    1,
4797                    2,
4798                    3,
4799                    4,
4800                    5,
4801                    6,
4802                    16 + LANE2 as u32,
4803                    8,
4804                    9,
4805                    10,
4806                    11,
4807                    12,
4808                    13,
4809                    14,
4810                    15
4811                ]
4812            ),
4813            8 => simd_shuffle!(
4814                a,
4815                b,
4816                [
4817                    0,
4818                    1,
4819                    2,
4820                    3,
4821                    4,
4822                    5,
4823                    6,
4824                    7,
4825                    16 + LANE2 as u32,
4826                    9,
4827                    10,
4828                    11,
4829                    12,
4830                    13,
4831                    14,
4832                    15
4833                ]
4834            ),
4835            9 => simd_shuffle!(
4836                a,
4837                b,
4838                [
4839                    0,
4840                    1,
4841                    2,
4842                    3,
4843                    4,
4844                    5,
4845                    6,
4846                    7,
4847                    8,
4848                    16 + LANE2 as u32,
4849                    10,
4850                    11,
4851                    12,
4852                    13,
4853                    14,
4854                    15
4855                ]
4856            ),
4857            10 => simd_shuffle!(
4858                a,
4859                b,
4860                [
4861                    0,
4862                    1,
4863                    2,
4864                    3,
4865                    4,
4866                    5,
4867                    6,
4868                    7,
4869                    8,
4870                    9,
4871                    16 + LANE2 as u32,
4872                    11,
4873                    12,
4874                    13,
4875                    14,
4876                    15
4877                ]
4878            ),
4879            11 => simd_shuffle!(
4880                a,
4881                b,
4882                [
4883                    0,
4884                    1,
4885                    2,
4886                    3,
4887                    4,
4888                    5,
4889                    6,
4890                    7,
4891                    8,
4892                    9,
4893                    10,
4894                    16 + LANE2 as u32,
4895                    12,
4896                    13,
4897                    14,
4898                    15
4899                ]
4900            ),
4901            12 => simd_shuffle!(
4902                a,
4903                b,
4904                [
4905                    0,
4906                    1,
4907                    2,
4908                    3,
4909                    4,
4910                    5,
4911                    6,
4912                    7,
4913                    8,
4914                    9,
4915                    10,
4916                    11,
4917                    16 + LANE2 as u32,
4918                    13,
4919                    14,
4920                    15
4921                ]
4922            ),
4923            13 => simd_shuffle!(
4924                a,
4925                b,
4926                [
4927                    0,
4928                    1,
4929                    2,
4930                    3,
4931                    4,
4932                    5,
4933                    6,
4934                    7,
4935                    8,
4936                    9,
4937                    10,
4938                    11,
4939                    12,
4940                    16 + LANE2 as u32,
4941                    14,
4942                    15
4943                ]
4944            ),
4945            14 => simd_shuffle!(
4946                a,
4947                b,
4948                [
4949                    0,
4950                    1,
4951                    2,
4952                    3,
4953                    4,
4954                    5,
4955                    6,
4956                    7,
4957                    8,
4958                    9,
4959                    10,
4960                    11,
4961                    12,
4962                    13,
4963                    16 + LANE2 as u32,
4964                    15
4965                ]
4966            ),
4967            15 => simd_shuffle!(
4968                a,
4969                b,
4970                [
4971                    0,
4972                    1,
4973                    2,
4974                    3,
4975                    4,
4976                    5,
4977                    6,
4978                    7,
4979                    8,
4980                    9,
4981                    10,
4982                    11,
4983                    12,
4984                    13,
4985                    14,
4986                    16 + LANE2 as u32
4987                ]
4988            ),
4989            _ => unreachable_unchecked(),
4990        }
4991    }
4992}
4993#[doc = "Insert vector element from another vector element"]
4994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"]
4995#[inline(always)]
4996#[target_feature(enable = "neon")]
4997#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4998#[rustc_legacy_const_generics(1, 3)]
4999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5000pub fn vcopyq_lane_s16<const LANE1: i32, const LANE2: i32>(
5001    a: int16x8_t,
5002    b: int16x4_t,
5003) -> int16x8_t {
5004    static_assert_uimm_bits!(LANE1, 3);
5005    static_assert_uimm_bits!(LANE2, 2);
5006    let b: int16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5007    unsafe {
5008        match LANE1 & 0b111 {
5009            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5010            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5011            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5012            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5013            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5014            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5015            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5016            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5017            _ => unreachable_unchecked(),
5018        }
5019    }
5020}
5021#[doc = "Insert vector element from another vector element"]
5022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"]
5023#[inline(always)]
5024#[target_feature(enable = "neon")]
5025#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5026#[rustc_legacy_const_generics(1, 3)]
5027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5028pub fn vcopyq_lane_s32<const LANE1: i32, const LANE2: i32>(
5029    a: int32x4_t,
5030    b: int32x2_t,
5031) -> int32x4_t {
5032    static_assert_uimm_bits!(LANE1, 2);
5033    static_assert_uimm_bits!(LANE2, 1);
5034    let b: int32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5035    unsafe {
5036        match LANE1 & 0b11 {
5037            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5038            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5039            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5040            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5041            _ => unreachable_unchecked(),
5042        }
5043    }
5044}
5045#[doc = "Insert vector element from another vector element"]
5046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"]
5047#[inline(always)]
5048#[target_feature(enable = "neon")]
5049#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5050#[rustc_legacy_const_generics(1, 3)]
5051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5052pub fn vcopyq_lane_u8<const LANE1: i32, const LANE2: i32>(
5053    a: uint8x16_t,
5054    b: uint8x8_t,
5055) -> uint8x16_t {
5056    static_assert_uimm_bits!(LANE1, 4);
5057    static_assert_uimm_bits!(LANE2, 3);
5058    let b: uint8x16_t =
5059        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5060    unsafe {
5061        match LANE1 & 0b1111 {
5062            0 => simd_shuffle!(
5063                a,
5064                b,
5065                [
5066                    16 + LANE2 as u32,
5067                    1,
5068                    2,
5069                    3,
5070                    4,
5071                    5,
5072                    6,
5073                    7,
5074                    8,
5075                    9,
5076                    10,
5077                    11,
5078                    12,
5079                    13,
5080                    14,
5081                    15
5082                ]
5083            ),
5084            1 => simd_shuffle!(
5085                a,
5086                b,
5087                [
5088                    0,
5089                    16 + LANE2 as u32,
5090                    2,
5091                    3,
5092                    4,
5093                    5,
5094                    6,
5095                    7,
5096                    8,
5097                    9,
5098                    10,
5099                    11,
5100                    12,
5101                    13,
5102                    14,
5103                    15
5104                ]
5105            ),
5106            2 => simd_shuffle!(
5107                a,
5108                b,
5109                [
5110                    0,
5111                    1,
5112                    16 + LANE2 as u32,
5113                    3,
5114                    4,
5115                    5,
5116                    6,
5117                    7,
5118                    8,
5119                    9,
5120                    10,
5121                    11,
5122                    12,
5123                    13,
5124                    14,
5125                    15
5126                ]
5127            ),
5128            3 => simd_shuffle!(
5129                a,
5130                b,
5131                [
5132                    0,
5133                    1,
5134                    2,
5135                    16 + LANE2 as u32,
5136                    4,
5137                    5,
5138                    6,
5139                    7,
5140                    8,
5141                    9,
5142                    10,
5143                    11,
5144                    12,
5145                    13,
5146                    14,
5147                    15
5148                ]
5149            ),
5150            4 => simd_shuffle!(
5151                a,
5152                b,
5153                [
5154                    0,
5155                    1,
5156                    2,
5157                    3,
5158                    16 + LANE2 as u32,
5159                    5,
5160                    6,
5161                    7,
5162                    8,
5163                    9,
5164                    10,
5165                    11,
5166                    12,
5167                    13,
5168                    14,
5169                    15
5170                ]
5171            ),
5172            5 => simd_shuffle!(
5173                a,
5174                b,
5175                [
5176                    0,
5177                    1,
5178                    2,
5179                    3,
5180                    4,
5181                    16 + LANE2 as u32,
5182                    6,
5183                    7,
5184                    8,
5185                    9,
5186                    10,
5187                    11,
5188                    12,
5189                    13,
5190                    14,
5191                    15
5192                ]
5193            ),
5194            6 => simd_shuffle!(
5195                a,
5196                b,
5197                [
5198                    0,
5199                    1,
5200                    2,
5201                    3,
5202                    4,
5203                    5,
5204                    16 + LANE2 as u32,
5205                    7,
5206                    8,
5207                    9,
5208                    10,
5209                    11,
5210                    12,
5211                    13,
5212                    14,
5213                    15
5214                ]
5215            ),
5216            7 => simd_shuffle!(
5217                a,
5218                b,
5219                [
5220                    0,
5221                    1,
5222                    2,
5223                    3,
5224                    4,
5225                    5,
5226                    6,
5227                    16 + LANE2 as u32,
5228                    8,
5229                    9,
5230                    10,
5231                    11,
5232                    12,
5233                    13,
5234                    14,
5235                    15
5236                ]
5237            ),
5238            8 => simd_shuffle!(
5239                a,
5240                b,
5241                [
5242                    0,
5243                    1,
5244                    2,
5245                    3,
5246                    4,
5247                    5,
5248                    6,
5249                    7,
5250                    16 + LANE2 as u32,
5251                    9,
5252                    10,
5253                    11,
5254                    12,
5255                    13,
5256                    14,
5257                    15
5258                ]
5259            ),
5260            9 => simd_shuffle!(
5261                a,
5262                b,
5263                [
5264                    0,
5265                    1,
5266                    2,
5267                    3,
5268                    4,
5269                    5,
5270                    6,
5271                    7,
5272                    8,
5273                    16 + LANE2 as u32,
5274                    10,
5275                    11,
5276                    12,
5277                    13,
5278                    14,
5279                    15
5280                ]
5281            ),
5282            10 => simd_shuffle!(
5283                a,
5284                b,
5285                [
5286                    0,
5287                    1,
5288                    2,
5289                    3,
5290                    4,
5291                    5,
5292                    6,
5293                    7,
5294                    8,
5295                    9,
5296                    16 + LANE2 as u32,
5297                    11,
5298                    12,
5299                    13,
5300                    14,
5301                    15
5302                ]
5303            ),
5304            11 => simd_shuffle!(
5305                a,
5306                b,
5307                [
5308                    0,
5309                    1,
5310                    2,
5311                    3,
5312                    4,
5313                    5,
5314                    6,
5315                    7,
5316                    8,
5317                    9,
5318                    10,
5319                    16 + LANE2 as u32,
5320                    12,
5321                    13,
5322                    14,
5323                    15
5324                ]
5325            ),
5326            12 => simd_shuffle!(
5327                a,
5328                b,
5329                [
5330                    0,
5331                    1,
5332                    2,
5333                    3,
5334                    4,
5335                    5,
5336                    6,
5337                    7,
5338                    8,
5339                    9,
5340                    10,
5341                    11,
5342                    16 + LANE2 as u32,
5343                    13,
5344                    14,
5345                    15
5346                ]
5347            ),
5348            13 => simd_shuffle!(
5349                a,
5350                b,
5351                [
5352                    0,
5353                    1,
5354                    2,
5355                    3,
5356                    4,
5357                    5,
5358                    6,
5359                    7,
5360                    8,
5361                    9,
5362                    10,
5363                    11,
5364                    12,
5365                    16 + LANE2 as u32,
5366                    14,
5367                    15
5368                ]
5369            ),
5370            14 => simd_shuffle!(
5371                a,
5372                b,
5373                [
5374                    0,
5375                    1,
5376                    2,
5377                    3,
5378                    4,
5379                    5,
5380                    6,
5381                    7,
5382                    8,
5383                    9,
5384                    10,
5385                    11,
5386                    12,
5387                    13,
5388                    16 + LANE2 as u32,
5389                    15
5390                ]
5391            ),
5392            15 => simd_shuffle!(
5393                a,
5394                b,
5395                [
5396                    0,
5397                    1,
5398                    2,
5399                    3,
5400                    4,
5401                    5,
5402                    6,
5403                    7,
5404                    8,
5405                    9,
5406                    10,
5407                    11,
5408                    12,
5409                    13,
5410                    14,
5411                    16 + LANE2 as u32
5412                ]
5413            ),
5414            _ => unreachable_unchecked(),
5415        }
5416    }
5417}
5418#[doc = "Insert vector element from another vector element"]
5419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"]
5420#[inline(always)]
5421#[target_feature(enable = "neon")]
5422#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5423#[rustc_legacy_const_generics(1, 3)]
5424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5425pub fn vcopyq_lane_u16<const LANE1: i32, const LANE2: i32>(
5426    a: uint16x8_t,
5427    b: uint16x4_t,
5428) -> uint16x8_t {
5429    static_assert_uimm_bits!(LANE1, 3);
5430    static_assert_uimm_bits!(LANE2, 2);
5431    let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5432    unsafe {
5433        match LANE1 & 0b111 {
5434            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5435            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5436            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5437            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5438            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5439            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5440            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5441            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5442            _ => unreachable_unchecked(),
5443        }
5444    }
5445}
5446#[doc = "Insert vector element from another vector element"]
5447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"]
5448#[inline(always)]
5449#[target_feature(enable = "neon")]
5450#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5451#[rustc_legacy_const_generics(1, 3)]
5452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5453pub fn vcopyq_lane_u32<const LANE1: i32, const LANE2: i32>(
5454    a: uint32x4_t,
5455    b: uint32x2_t,
5456) -> uint32x4_t {
5457    static_assert_uimm_bits!(LANE1, 2);
5458    static_assert_uimm_bits!(LANE2, 1);
5459    let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5460    unsafe {
5461        match LANE1 & 0b11 {
5462            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5463            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5464            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5465            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5466            _ => unreachable_unchecked(),
5467        }
5468    }
5469}
5470#[doc = "Insert vector element from another vector element"]
5471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"]
5472#[inline(always)]
5473#[target_feature(enable = "neon")]
5474#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5475#[rustc_legacy_const_generics(1, 3)]
5476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5477pub fn vcopyq_lane_p8<const LANE1: i32, const LANE2: i32>(
5478    a: poly8x16_t,
5479    b: poly8x8_t,
5480) -> poly8x16_t {
5481    static_assert_uimm_bits!(LANE1, 4);
5482    static_assert_uimm_bits!(LANE2, 3);
5483    let b: poly8x16_t =
5484        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5485    unsafe {
5486        match LANE1 & 0b1111 {
5487            0 => simd_shuffle!(
5488                a,
5489                b,
5490                [
5491                    16 + LANE2 as u32,
5492                    1,
5493                    2,
5494                    3,
5495                    4,
5496                    5,
5497                    6,
5498                    7,
5499                    8,
5500                    9,
5501                    10,
5502                    11,
5503                    12,
5504                    13,
5505                    14,
5506                    15
5507                ]
5508            ),
5509            1 => simd_shuffle!(
5510                a,
5511                b,
5512                [
5513                    0,
5514                    16 + LANE2 as u32,
5515                    2,
5516                    3,
5517                    4,
5518                    5,
5519                    6,
5520                    7,
5521                    8,
5522                    9,
5523                    10,
5524                    11,
5525                    12,
5526                    13,
5527                    14,
5528                    15
5529                ]
5530            ),
5531            2 => simd_shuffle!(
5532                a,
5533                b,
5534                [
5535                    0,
5536                    1,
5537                    16 + LANE2 as u32,
5538                    3,
5539                    4,
5540                    5,
5541                    6,
5542                    7,
5543                    8,
5544                    9,
5545                    10,
5546                    11,
5547                    12,
5548                    13,
5549                    14,
5550                    15
5551                ]
5552            ),
5553            3 => simd_shuffle!(
5554                a,
5555                b,
5556                [
5557                    0,
5558                    1,
5559                    2,
5560                    16 + LANE2 as u32,
5561                    4,
5562                    5,
5563                    6,
5564                    7,
5565                    8,
5566                    9,
5567                    10,
5568                    11,
5569                    12,
5570                    13,
5571                    14,
5572                    15
5573                ]
5574            ),
5575            4 => simd_shuffle!(
5576                a,
5577                b,
5578                [
5579                    0,
5580                    1,
5581                    2,
5582                    3,
5583                    16 + LANE2 as u32,
5584                    5,
5585                    6,
5586                    7,
5587                    8,
5588                    9,
5589                    10,
5590                    11,
5591                    12,
5592                    13,
5593                    14,
5594                    15
5595                ]
5596            ),
5597            5 => simd_shuffle!(
5598                a,
5599                b,
5600                [
5601                    0,
5602                    1,
5603                    2,
5604                    3,
5605                    4,
5606                    16 + LANE2 as u32,
5607                    6,
5608                    7,
5609                    8,
5610                    9,
5611                    10,
5612                    11,
5613                    12,
5614                    13,
5615                    14,
5616                    15
5617                ]
5618            ),
5619            6 => simd_shuffle!(
5620                a,
5621                b,
5622                [
5623                    0,
5624                    1,
5625                    2,
5626                    3,
5627                    4,
5628                    5,
5629                    16 + LANE2 as u32,
5630                    7,
5631                    8,
5632                    9,
5633                    10,
5634                    11,
5635                    12,
5636                    13,
5637                    14,
5638                    15
5639                ]
5640            ),
5641            7 => simd_shuffle!(
5642                a,
5643                b,
5644                [
5645                    0,
5646                    1,
5647                    2,
5648                    3,
5649                    4,
5650                    5,
5651                    6,
5652                    16 + LANE2 as u32,
5653                    8,
5654                    9,
5655                    10,
5656                    11,
5657                    12,
5658                    13,
5659                    14,
5660                    15
5661                ]
5662            ),
5663            8 => simd_shuffle!(
5664                a,
5665                b,
5666                [
5667                    0,
5668                    1,
5669                    2,
5670                    3,
5671                    4,
5672                    5,
5673                    6,
5674                    7,
5675                    16 + LANE2 as u32,
5676                    9,
5677                    10,
5678                    11,
5679                    12,
5680                    13,
5681                    14,
5682                    15
5683                ]
5684            ),
5685            9 => simd_shuffle!(
5686                a,
5687                b,
5688                [
5689                    0,
5690                    1,
5691                    2,
5692                    3,
5693                    4,
5694                    5,
5695                    6,
5696                    7,
5697                    8,
5698                    16 + LANE2 as u32,
5699                    10,
5700                    11,
5701                    12,
5702                    13,
5703                    14,
5704                    15
5705                ]
5706            ),
5707            10 => simd_shuffle!(
5708                a,
5709                b,
5710                [
5711                    0,
5712                    1,
5713                    2,
5714                    3,
5715                    4,
5716                    5,
5717                    6,
5718                    7,
5719                    8,
5720                    9,
5721                    16 + LANE2 as u32,
5722                    11,
5723                    12,
5724                    13,
5725                    14,
5726                    15
5727                ]
5728            ),
5729            11 => simd_shuffle!(
5730                a,
5731                b,
5732                [
5733                    0,
5734                    1,
5735                    2,
5736                    3,
5737                    4,
5738                    5,
5739                    6,
5740                    7,
5741                    8,
5742                    9,
5743                    10,
5744                    16 + LANE2 as u32,
5745                    12,
5746                    13,
5747                    14,
5748                    15
5749                ]
5750            ),
5751            12 => simd_shuffle!(
5752                a,
5753                b,
5754                [
5755                    0,
5756                    1,
5757                    2,
5758                    3,
5759                    4,
5760                    5,
5761                    6,
5762                    7,
5763                    8,
5764                    9,
5765                    10,
5766                    11,
5767                    16 + LANE2 as u32,
5768                    13,
5769                    14,
5770                    15
5771                ]
5772            ),
5773            13 => simd_shuffle!(
5774                a,
5775                b,
5776                [
5777                    0,
5778                    1,
5779                    2,
5780                    3,
5781                    4,
5782                    5,
5783                    6,
5784                    7,
5785                    8,
5786                    9,
5787                    10,
5788                    11,
5789                    12,
5790                    16 + LANE2 as u32,
5791                    14,
5792                    15
5793                ]
5794            ),
5795            14 => simd_shuffle!(
5796                a,
5797                b,
5798                [
5799                    0,
5800                    1,
5801                    2,
5802                    3,
5803                    4,
5804                    5,
5805                    6,
5806                    7,
5807                    8,
5808                    9,
5809                    10,
5810                    11,
5811                    12,
5812                    13,
5813                    16 + LANE2 as u32,
5814                    15
5815                ]
5816            ),
5817            15 => simd_shuffle!(
5818                a,
5819                b,
5820                [
5821                    0,
5822                    1,
5823                    2,
5824                    3,
5825                    4,
5826                    5,
5827                    6,
5828                    7,
5829                    8,
5830                    9,
5831                    10,
5832                    11,
5833                    12,
5834                    13,
5835                    14,
5836                    16 + LANE2 as u32
5837                ]
5838            ),
5839            _ => unreachable_unchecked(),
5840        }
5841    }
5842}
5843#[doc = "Insert vector element from another vector element"]
5844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"]
5845#[inline(always)]
5846#[target_feature(enable = "neon")]
5847#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5848#[rustc_legacy_const_generics(1, 3)]
5849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5850pub fn vcopyq_lane_p16<const LANE1: i32, const LANE2: i32>(
5851    a: poly16x8_t,
5852    b: poly16x4_t,
5853) -> poly16x8_t {
5854    static_assert_uimm_bits!(LANE1, 3);
5855    static_assert_uimm_bits!(LANE2, 2);
5856    let b: poly16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5857    unsafe {
5858        match LANE1 & 0b111 {
5859            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5860            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5861            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5862            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5863            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5864            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5865            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5866            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5867            _ => unreachable_unchecked(),
5868        }
5869    }
5870}
5871#[doc = "Insert vector element from another vector element"]
5872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"]
5873#[inline(always)]
5874#[target_feature(enable = "neon")]
5875#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5876#[rustc_legacy_const_generics(1, 3)]
5877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5878pub fn vcopyq_laneq_f32<const LANE1: i32, const LANE2: i32>(
5879    a: float32x4_t,
5880    b: float32x4_t,
5881) -> float32x4_t {
5882    static_assert_uimm_bits!(LANE1, 2);
5883    static_assert_uimm_bits!(LANE2, 2);
5884    unsafe {
5885        match LANE1 & 0b11 {
5886            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5887            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5888            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5889            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5890            _ => unreachable_unchecked(),
5891        }
5892    }
5893}
5894#[doc = "Insert vector element from another vector element"]
5895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"]
5896#[inline(always)]
5897#[target_feature(enable = "neon")]
5898#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5899#[rustc_legacy_const_generics(1, 3)]
5900#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5901pub fn vcopyq_laneq_f64<const LANE1: i32, const LANE2: i32>(
5902    a: float64x2_t,
5903    b: float64x2_t,
5904) -> float64x2_t {
5905    static_assert_uimm_bits!(LANE1, 1);
5906    static_assert_uimm_bits!(LANE2, 1);
5907    unsafe {
5908        match LANE1 & 0b1 {
5909            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
5910            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
5911            _ => unreachable_unchecked(),
5912        }
5913    }
5914}
5915#[doc = "Insert vector element from another vector element"]
5916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"]
5917#[inline(always)]
5918#[target_feature(enable = "neon")]
5919#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5920#[rustc_legacy_const_generics(1, 3)]
5921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5922pub fn vcopyq_laneq_s8<const LANE1: i32, const LANE2: i32>(
5923    a: int8x16_t,
5924    b: int8x16_t,
5925) -> int8x16_t {
5926    static_assert_uimm_bits!(LANE1, 4);
5927    static_assert_uimm_bits!(LANE2, 4);
5928    unsafe {
5929        match LANE1 & 0b1111 {
5930            0 => simd_shuffle!(
5931                a,
5932                b,
5933                [
5934                    16 + LANE2 as u32,
5935                    1,
5936                    2,
5937                    3,
5938                    4,
5939                    5,
5940                    6,
5941                    7,
5942                    8,
5943                    9,
5944                    10,
5945                    11,
5946                    12,
5947                    13,
5948                    14,
5949                    15
5950                ]
5951            ),
5952            1 => simd_shuffle!(
5953                a,
5954                b,
5955                [
5956                    0,
5957                    16 + LANE2 as u32,
5958                    2,
5959                    3,
5960                    4,
5961                    5,
5962                    6,
5963                    7,
5964                    8,
5965                    9,
5966                    10,
5967                    11,
5968                    12,
5969                    13,
5970                    14,
5971                    15
5972                ]
5973            ),
5974            2 => simd_shuffle!(
5975                a,
5976                b,
5977                [
5978                    0,
5979                    1,
5980                    16 + LANE2 as u32,
5981                    3,
5982                    4,
5983                    5,
5984                    6,
5985                    7,
5986                    8,
5987                    9,
5988                    10,
5989                    11,
5990                    12,
5991                    13,
5992                    14,
5993                    15
5994                ]
5995            ),
5996            3 => simd_shuffle!(
5997                a,
5998                b,
5999                [
6000                    0,
6001                    1,
6002                    2,
6003                    16 + LANE2 as u32,
6004                    4,
6005                    5,
6006                    6,
6007                    7,
6008                    8,
6009                    9,
6010                    10,
6011                    11,
6012                    12,
6013                    13,
6014                    14,
6015                    15
6016                ]
6017            ),
6018            4 => simd_shuffle!(
6019                a,
6020                b,
6021                [
6022                    0,
6023                    1,
6024                    2,
6025                    3,
6026                    16 + LANE2 as u32,
6027                    5,
6028                    6,
6029                    7,
6030                    8,
6031                    9,
6032                    10,
6033                    11,
6034                    12,
6035                    13,
6036                    14,
6037                    15
6038                ]
6039            ),
6040            5 => simd_shuffle!(
6041                a,
6042                b,
6043                [
6044                    0,
6045                    1,
6046                    2,
6047                    3,
6048                    4,
6049                    16 + LANE2 as u32,
6050                    6,
6051                    7,
6052                    8,
6053                    9,
6054                    10,
6055                    11,
6056                    12,
6057                    13,
6058                    14,
6059                    15
6060                ]
6061            ),
6062            6 => simd_shuffle!(
6063                a,
6064                b,
6065                [
6066                    0,
6067                    1,
6068                    2,
6069                    3,
6070                    4,
6071                    5,
6072                    16 + LANE2 as u32,
6073                    7,
6074                    8,
6075                    9,
6076                    10,
6077                    11,
6078                    12,
6079                    13,
6080                    14,
6081                    15
6082                ]
6083            ),
6084            7 => simd_shuffle!(
6085                a,
6086                b,
6087                [
6088                    0,
6089                    1,
6090                    2,
6091                    3,
6092                    4,
6093                    5,
6094                    6,
6095                    16 + LANE2 as u32,
6096                    8,
6097                    9,
6098                    10,
6099                    11,
6100                    12,
6101                    13,
6102                    14,
6103                    15
6104                ]
6105            ),
6106            8 => simd_shuffle!(
6107                a,
6108                b,
6109                [
6110                    0,
6111                    1,
6112                    2,
6113                    3,
6114                    4,
6115                    5,
6116                    6,
6117                    7,
6118                    16 + LANE2 as u32,
6119                    9,
6120                    10,
6121                    11,
6122                    12,
6123                    13,
6124                    14,
6125                    15
6126                ]
6127            ),
6128            9 => simd_shuffle!(
6129                a,
6130                b,
6131                [
6132                    0,
6133                    1,
6134                    2,
6135                    3,
6136                    4,
6137                    5,
6138                    6,
6139                    7,
6140                    8,
6141                    16 + LANE2 as u32,
6142                    10,
6143                    11,
6144                    12,
6145                    13,
6146                    14,
6147                    15
6148                ]
6149            ),
6150            10 => simd_shuffle!(
6151                a,
6152                b,
6153                [
6154                    0,
6155                    1,
6156                    2,
6157                    3,
6158                    4,
6159                    5,
6160                    6,
6161                    7,
6162                    8,
6163                    9,
6164                    16 + LANE2 as u32,
6165                    11,
6166                    12,
6167                    13,
6168                    14,
6169                    15
6170                ]
6171            ),
6172            11 => simd_shuffle!(
6173                a,
6174                b,
6175                [
6176                    0,
6177                    1,
6178                    2,
6179                    3,
6180                    4,
6181                    5,
6182                    6,
6183                    7,
6184                    8,
6185                    9,
6186                    10,
6187                    16 + LANE2 as u32,
6188                    12,
6189                    13,
6190                    14,
6191                    15
6192                ]
6193            ),
6194            12 => simd_shuffle!(
6195                a,
6196                b,
6197                [
6198                    0,
6199                    1,
6200                    2,
6201                    3,
6202                    4,
6203                    5,
6204                    6,
6205                    7,
6206                    8,
6207                    9,
6208                    10,
6209                    11,
6210                    16 + LANE2 as u32,
6211                    13,
6212                    14,
6213                    15
6214                ]
6215            ),
6216            13 => simd_shuffle!(
6217                a,
6218                b,
6219                [
6220                    0,
6221                    1,
6222                    2,
6223                    3,
6224                    4,
6225                    5,
6226                    6,
6227                    7,
6228                    8,
6229                    9,
6230                    10,
6231                    11,
6232                    12,
6233                    16 + LANE2 as u32,
6234                    14,
6235                    15
6236                ]
6237            ),
6238            14 => simd_shuffle!(
6239                a,
6240                b,
6241                [
6242                    0,
6243                    1,
6244                    2,
6245                    3,
6246                    4,
6247                    5,
6248                    6,
6249                    7,
6250                    8,
6251                    9,
6252                    10,
6253                    11,
6254                    12,
6255                    13,
6256                    16 + LANE2 as u32,
6257                    15
6258                ]
6259            ),
6260            15 => simd_shuffle!(
6261                a,
6262                b,
6263                [
6264                    0,
6265                    1,
6266                    2,
6267                    3,
6268                    4,
6269                    5,
6270                    6,
6271                    7,
6272                    8,
6273                    9,
6274                    10,
6275                    11,
6276                    12,
6277                    13,
6278                    14,
6279                    16 + LANE2 as u32
6280                ]
6281            ),
6282            _ => unreachable_unchecked(),
6283        }
6284    }
6285}
6286#[doc = "Insert vector element from another vector element"]
6287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"]
6288#[inline(always)]
6289#[target_feature(enable = "neon")]
6290#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6291#[rustc_legacy_const_generics(1, 3)]
6292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6293pub fn vcopyq_laneq_s16<const LANE1: i32, const LANE2: i32>(
6294    a: int16x8_t,
6295    b: int16x8_t,
6296) -> int16x8_t {
6297    static_assert_uimm_bits!(LANE1, 3);
6298    static_assert_uimm_bits!(LANE2, 3);
6299    unsafe {
6300        match LANE1 & 0b111 {
6301            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6302            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6303            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6304            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6305            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6306            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6307            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6308            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6309            _ => unreachable_unchecked(),
6310        }
6311    }
6312}
6313#[doc = "Insert vector element from another vector element"]
6314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"]
6315#[inline(always)]
6316#[target_feature(enable = "neon")]
6317#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6318#[rustc_legacy_const_generics(1, 3)]
6319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6320pub fn vcopyq_laneq_s32<const LANE1: i32, const LANE2: i32>(
6321    a: int32x4_t,
6322    b: int32x4_t,
6323) -> int32x4_t {
6324    static_assert_uimm_bits!(LANE1, 2);
6325    static_assert_uimm_bits!(LANE2, 2);
6326    unsafe {
6327        match LANE1 & 0b11 {
6328            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6329            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6330            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6331            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6332            _ => unreachable_unchecked(),
6333        }
6334    }
6335}
6336#[doc = "Insert vector element from another vector element"]
6337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"]
6338#[inline(always)]
6339#[target_feature(enable = "neon")]
6340#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6341#[rustc_legacy_const_generics(1, 3)]
6342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6343pub fn vcopyq_laneq_s64<const LANE1: i32, const LANE2: i32>(
6344    a: int64x2_t,
6345    b: int64x2_t,
6346) -> int64x2_t {
6347    static_assert_uimm_bits!(LANE1, 1);
6348    static_assert_uimm_bits!(LANE2, 1);
6349    unsafe {
6350        match LANE1 & 0b1 {
6351            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6352            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6353            _ => unreachable_unchecked(),
6354        }
6355    }
6356}
6357#[doc = "Insert vector element from another vector element"]
6358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"]
6359#[inline(always)]
6360#[target_feature(enable = "neon")]
6361#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6362#[rustc_legacy_const_generics(1, 3)]
6363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6364pub fn vcopyq_laneq_u8<const LANE1: i32, const LANE2: i32>(
6365    a: uint8x16_t,
6366    b: uint8x16_t,
6367) -> uint8x16_t {
6368    static_assert_uimm_bits!(LANE1, 4);
6369    static_assert_uimm_bits!(LANE2, 4);
6370    unsafe {
6371        match LANE1 & 0b1111 {
6372            0 => simd_shuffle!(
6373                a,
6374                b,
6375                [
6376                    16 + LANE2 as u32,
6377                    1,
6378                    2,
6379                    3,
6380                    4,
6381                    5,
6382                    6,
6383                    7,
6384                    8,
6385                    9,
6386                    10,
6387                    11,
6388                    12,
6389                    13,
6390                    14,
6391                    15
6392                ]
6393            ),
6394            1 => simd_shuffle!(
6395                a,
6396                b,
6397                [
6398                    0,
6399                    16 + LANE2 as u32,
6400                    2,
6401                    3,
6402                    4,
6403                    5,
6404                    6,
6405                    7,
6406                    8,
6407                    9,
6408                    10,
6409                    11,
6410                    12,
6411                    13,
6412                    14,
6413                    15
6414                ]
6415            ),
6416            2 => simd_shuffle!(
6417                a,
6418                b,
6419                [
6420                    0,
6421                    1,
6422                    16 + LANE2 as u32,
6423                    3,
6424                    4,
6425                    5,
6426                    6,
6427                    7,
6428                    8,
6429                    9,
6430                    10,
6431                    11,
6432                    12,
6433                    13,
6434                    14,
6435                    15
6436                ]
6437            ),
6438            3 => simd_shuffle!(
6439                a,
6440                b,
6441                [
6442                    0,
6443                    1,
6444                    2,
6445                    16 + LANE2 as u32,
6446                    4,
6447                    5,
6448                    6,
6449                    7,
6450                    8,
6451                    9,
6452                    10,
6453                    11,
6454                    12,
6455                    13,
6456                    14,
6457                    15
6458                ]
6459            ),
6460            4 => simd_shuffle!(
6461                a,
6462                b,
6463                [
6464                    0,
6465                    1,
6466                    2,
6467                    3,
6468                    16 + LANE2 as u32,
6469                    5,
6470                    6,
6471                    7,
6472                    8,
6473                    9,
6474                    10,
6475                    11,
6476                    12,
6477                    13,
6478                    14,
6479                    15
6480                ]
6481            ),
6482            5 => simd_shuffle!(
6483                a,
6484                b,
6485                [
6486                    0,
6487                    1,
6488                    2,
6489                    3,
6490                    4,
6491                    16 + LANE2 as u32,
6492                    6,
6493                    7,
6494                    8,
6495                    9,
6496                    10,
6497                    11,
6498                    12,
6499                    13,
6500                    14,
6501                    15
6502                ]
6503            ),
6504            6 => simd_shuffle!(
6505                a,
6506                b,
6507                [
6508                    0,
6509                    1,
6510                    2,
6511                    3,
6512                    4,
6513                    5,
6514                    16 + LANE2 as u32,
6515                    7,
6516                    8,
6517                    9,
6518                    10,
6519                    11,
6520                    12,
6521                    13,
6522                    14,
6523                    15
6524                ]
6525            ),
6526            7 => simd_shuffle!(
6527                a,
6528                b,
6529                [
6530                    0,
6531                    1,
6532                    2,
6533                    3,
6534                    4,
6535                    5,
6536                    6,
6537                    16 + LANE2 as u32,
6538                    8,
6539                    9,
6540                    10,
6541                    11,
6542                    12,
6543                    13,
6544                    14,
6545                    15
6546                ]
6547            ),
6548            8 => simd_shuffle!(
6549                a,
6550                b,
6551                [
6552                    0,
6553                    1,
6554                    2,
6555                    3,
6556                    4,
6557                    5,
6558                    6,
6559                    7,
6560                    16 + LANE2 as u32,
6561                    9,
6562                    10,
6563                    11,
6564                    12,
6565                    13,
6566                    14,
6567                    15
6568                ]
6569            ),
6570            9 => simd_shuffle!(
6571                a,
6572                b,
6573                [
6574                    0,
6575                    1,
6576                    2,
6577                    3,
6578                    4,
6579                    5,
6580                    6,
6581                    7,
6582                    8,
6583                    16 + LANE2 as u32,
6584                    10,
6585                    11,
6586                    12,
6587                    13,
6588                    14,
6589                    15
6590                ]
6591            ),
6592            10 => simd_shuffle!(
6593                a,
6594                b,
6595                [
6596                    0,
6597                    1,
6598                    2,
6599                    3,
6600                    4,
6601                    5,
6602                    6,
6603                    7,
6604                    8,
6605                    9,
6606                    16 + LANE2 as u32,
6607                    11,
6608                    12,
6609                    13,
6610                    14,
6611                    15
6612                ]
6613            ),
6614            11 => simd_shuffle!(
6615                a,
6616                b,
6617                [
6618                    0,
6619                    1,
6620                    2,
6621                    3,
6622                    4,
6623                    5,
6624                    6,
6625                    7,
6626                    8,
6627                    9,
6628                    10,
6629                    16 + LANE2 as u32,
6630                    12,
6631                    13,
6632                    14,
6633                    15
6634                ]
6635            ),
6636            12 => simd_shuffle!(
6637                a,
6638                b,
6639                [
6640                    0,
6641                    1,
6642                    2,
6643                    3,
6644                    4,
6645                    5,
6646                    6,
6647                    7,
6648                    8,
6649                    9,
6650                    10,
6651                    11,
6652                    16 + LANE2 as u32,
6653                    13,
6654                    14,
6655                    15
6656                ]
6657            ),
6658            13 => simd_shuffle!(
6659                a,
6660                b,
6661                [
6662                    0,
6663                    1,
6664                    2,
6665                    3,
6666                    4,
6667                    5,
6668                    6,
6669                    7,
6670                    8,
6671                    9,
6672                    10,
6673                    11,
6674                    12,
6675                    16 + LANE2 as u32,
6676                    14,
6677                    15
6678                ]
6679            ),
6680            14 => simd_shuffle!(
6681                a,
6682                b,
6683                [
6684                    0,
6685                    1,
6686                    2,
6687                    3,
6688                    4,
6689                    5,
6690                    6,
6691                    7,
6692                    8,
6693                    9,
6694                    10,
6695                    11,
6696                    12,
6697                    13,
6698                    16 + LANE2 as u32,
6699                    15
6700                ]
6701            ),
6702            15 => simd_shuffle!(
6703                a,
6704                b,
6705                [
6706                    0,
6707                    1,
6708                    2,
6709                    3,
6710                    4,
6711                    5,
6712                    6,
6713                    7,
6714                    8,
6715                    9,
6716                    10,
6717                    11,
6718                    12,
6719                    13,
6720                    14,
6721                    16 + LANE2 as u32
6722                ]
6723            ),
6724            _ => unreachable_unchecked(),
6725        }
6726    }
6727}
6728#[doc = "Insert vector element from another vector element"]
6729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"]
6730#[inline(always)]
6731#[target_feature(enable = "neon")]
6732#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6733#[rustc_legacy_const_generics(1, 3)]
6734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6735pub fn vcopyq_laneq_u16<const LANE1: i32, const LANE2: i32>(
6736    a: uint16x8_t,
6737    b: uint16x8_t,
6738) -> uint16x8_t {
6739    static_assert_uimm_bits!(LANE1, 3);
6740    static_assert_uimm_bits!(LANE2, 3);
6741    unsafe {
6742        match LANE1 & 0b111 {
6743            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6744            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6745            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6746            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6747            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6748            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6749            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6750            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6751            _ => unreachable_unchecked(),
6752        }
6753    }
6754}
6755#[doc = "Insert vector element from another vector element"]
6756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"]
6757#[inline(always)]
6758#[target_feature(enable = "neon")]
6759#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6760#[rustc_legacy_const_generics(1, 3)]
6761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6762pub fn vcopyq_laneq_u32<const LANE1: i32, const LANE2: i32>(
6763    a: uint32x4_t,
6764    b: uint32x4_t,
6765) -> uint32x4_t {
6766    static_assert_uimm_bits!(LANE1, 2);
6767    static_assert_uimm_bits!(LANE2, 2);
6768    unsafe {
6769        match LANE1 & 0b11 {
6770            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6771            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6772            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6773            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6774            _ => unreachable_unchecked(),
6775        }
6776    }
6777}
6778#[doc = "Insert vector element from another vector element"]
6779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"]
6780#[inline(always)]
6781#[target_feature(enable = "neon")]
6782#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6783#[rustc_legacy_const_generics(1, 3)]
6784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6785pub fn vcopyq_laneq_u64<const LANE1: i32, const LANE2: i32>(
6786    a: uint64x2_t,
6787    b: uint64x2_t,
6788) -> uint64x2_t {
6789    static_assert_uimm_bits!(LANE1, 1);
6790    static_assert_uimm_bits!(LANE2, 1);
6791    unsafe {
6792        match LANE1 & 0b1 {
6793            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6794            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6795            _ => unreachable_unchecked(),
6796        }
6797    }
6798}
6799#[doc = "Insert vector element from another vector element"]
6800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"]
6801#[inline(always)]
6802#[target_feature(enable = "neon")]
6803#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6804#[rustc_legacy_const_generics(1, 3)]
6805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6806pub fn vcopyq_laneq_p8<const LANE1: i32, const LANE2: i32>(
6807    a: poly8x16_t,
6808    b: poly8x16_t,
6809) -> poly8x16_t {
6810    static_assert_uimm_bits!(LANE1, 4);
6811    static_assert_uimm_bits!(LANE2, 4);
6812    unsafe {
6813        match LANE1 & 0b1111 {
6814            0 => simd_shuffle!(
6815                a,
6816                b,
6817                [
6818                    16 + LANE2 as u32,
6819                    1,
6820                    2,
6821                    3,
6822                    4,
6823                    5,
6824                    6,
6825                    7,
6826                    8,
6827                    9,
6828                    10,
6829                    11,
6830                    12,
6831                    13,
6832                    14,
6833                    15
6834                ]
6835            ),
6836            1 => simd_shuffle!(
6837                a,
6838                b,
6839                [
6840                    0,
6841                    16 + LANE2 as u32,
6842                    2,
6843                    3,
6844                    4,
6845                    5,
6846                    6,
6847                    7,
6848                    8,
6849                    9,
6850                    10,
6851                    11,
6852                    12,
6853                    13,
6854                    14,
6855                    15
6856                ]
6857            ),
6858            2 => simd_shuffle!(
6859                a,
6860                b,
6861                [
6862                    0,
6863                    1,
6864                    16 + LANE2 as u32,
6865                    3,
6866                    4,
6867                    5,
6868                    6,
6869                    7,
6870                    8,
6871                    9,
6872                    10,
6873                    11,
6874                    12,
6875                    13,
6876                    14,
6877                    15
6878                ]
6879            ),
6880            3 => simd_shuffle!(
6881                a,
6882                b,
6883                [
6884                    0,
6885                    1,
6886                    2,
6887                    16 + LANE2 as u32,
6888                    4,
6889                    5,
6890                    6,
6891                    7,
6892                    8,
6893                    9,
6894                    10,
6895                    11,
6896                    12,
6897                    13,
6898                    14,
6899                    15
6900                ]
6901            ),
6902            4 => simd_shuffle!(
6903                a,
6904                b,
6905                [
6906                    0,
6907                    1,
6908                    2,
6909                    3,
6910                    16 + LANE2 as u32,
6911                    5,
6912                    6,
6913                    7,
6914                    8,
6915                    9,
6916                    10,
6917                    11,
6918                    12,
6919                    13,
6920                    14,
6921                    15
6922                ]
6923            ),
6924            5 => simd_shuffle!(
6925                a,
6926                b,
6927                [
6928                    0,
6929                    1,
6930                    2,
6931                    3,
6932                    4,
6933                    16 + LANE2 as u32,
6934                    6,
6935                    7,
6936                    8,
6937                    9,
6938                    10,
6939                    11,
6940                    12,
6941                    13,
6942                    14,
6943                    15
6944                ]
6945            ),
6946            6 => simd_shuffle!(
6947                a,
6948                b,
6949                [
6950                    0,
6951                    1,
6952                    2,
6953                    3,
6954                    4,
6955                    5,
6956                    16 + LANE2 as u32,
6957                    7,
6958                    8,
6959                    9,
6960                    10,
6961                    11,
6962                    12,
6963                    13,
6964                    14,
6965                    15
6966                ]
6967            ),
6968            7 => simd_shuffle!(
6969                a,
6970                b,
6971                [
6972                    0,
6973                    1,
6974                    2,
6975                    3,
6976                    4,
6977                    5,
6978                    6,
6979                    16 + LANE2 as u32,
6980                    8,
6981                    9,
6982                    10,
6983                    11,
6984                    12,
6985                    13,
6986                    14,
6987                    15
6988                ]
6989            ),
6990            8 => simd_shuffle!(
6991                a,
6992                b,
6993                [
6994                    0,
6995                    1,
6996                    2,
6997                    3,
6998                    4,
6999                    5,
7000                    6,
7001                    7,
7002                    16 + LANE2 as u32,
7003                    9,
7004                    10,
7005                    11,
7006                    12,
7007                    13,
7008                    14,
7009                    15
7010                ]
7011            ),
7012            9 => simd_shuffle!(
7013                a,
7014                b,
7015                [
7016                    0,
7017                    1,
7018                    2,
7019                    3,
7020                    4,
7021                    5,
7022                    6,
7023                    7,
7024                    8,
7025                    16 + LANE2 as u32,
7026                    10,
7027                    11,
7028                    12,
7029                    13,
7030                    14,
7031                    15
7032                ]
7033            ),
7034            10 => simd_shuffle!(
7035                a,
7036                b,
7037                [
7038                    0,
7039                    1,
7040                    2,
7041                    3,
7042                    4,
7043                    5,
7044                    6,
7045                    7,
7046                    8,
7047                    9,
7048                    16 + LANE2 as u32,
7049                    11,
7050                    12,
7051                    13,
7052                    14,
7053                    15
7054                ]
7055            ),
7056            11 => simd_shuffle!(
7057                a,
7058                b,
7059                [
7060                    0,
7061                    1,
7062                    2,
7063                    3,
7064                    4,
7065                    5,
7066                    6,
7067                    7,
7068                    8,
7069                    9,
7070                    10,
7071                    16 + LANE2 as u32,
7072                    12,
7073                    13,
7074                    14,
7075                    15
7076                ]
7077            ),
7078            12 => simd_shuffle!(
7079                a,
7080                b,
7081                [
7082                    0,
7083                    1,
7084                    2,
7085                    3,
7086                    4,
7087                    5,
7088                    6,
7089                    7,
7090                    8,
7091                    9,
7092                    10,
7093                    11,
7094                    16 + LANE2 as u32,
7095                    13,
7096                    14,
7097                    15
7098                ]
7099            ),
7100            13 => simd_shuffle!(
7101                a,
7102                b,
7103                [
7104                    0,
7105                    1,
7106                    2,
7107                    3,
7108                    4,
7109                    5,
7110                    6,
7111                    7,
7112                    8,
7113                    9,
7114                    10,
7115                    11,
7116                    12,
7117                    16 + LANE2 as u32,
7118                    14,
7119                    15
7120                ]
7121            ),
7122            14 => simd_shuffle!(
7123                a,
7124                b,
7125                [
7126                    0,
7127                    1,
7128                    2,
7129                    3,
7130                    4,
7131                    5,
7132                    6,
7133                    7,
7134                    8,
7135                    9,
7136                    10,
7137                    11,
7138                    12,
7139                    13,
7140                    16 + LANE2 as u32,
7141                    15
7142                ]
7143            ),
7144            15 => simd_shuffle!(
7145                a,
7146                b,
7147                [
7148                    0,
7149                    1,
7150                    2,
7151                    3,
7152                    4,
7153                    5,
7154                    6,
7155                    7,
7156                    8,
7157                    9,
7158                    10,
7159                    11,
7160                    12,
7161                    13,
7162                    14,
7163                    16 + LANE2 as u32
7164                ]
7165            ),
7166            _ => unreachable_unchecked(),
7167        }
7168    }
7169}
7170#[doc = "Insert vector element from another vector element"]
7171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"]
7172#[inline(always)]
7173#[target_feature(enable = "neon")]
7174#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
7175#[rustc_legacy_const_generics(1, 3)]
7176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7177pub fn vcopyq_laneq_p16<const LANE1: i32, const LANE2: i32>(
7178    a: poly16x8_t,
7179    b: poly16x8_t,
7180) -> poly16x8_t {
7181    static_assert_uimm_bits!(LANE1, 3);
7182    static_assert_uimm_bits!(LANE2, 3);
7183    unsafe {
7184        match LANE1 & 0b111 {
7185            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
7186            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
7187            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
7188            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
7189            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
7190            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
7191            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
7192            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
7193            _ => unreachable_unchecked(),
7194        }
7195    }
7196}
7197#[doc = "Insert vector element from another vector element"]
7198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"]
7199#[inline(always)]
7200#[target_feature(enable = "neon")]
7201#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
7202#[rustc_legacy_const_generics(1, 3)]
7203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7204pub fn vcopyq_laneq_p64<const LANE1: i32, const LANE2: i32>(
7205    a: poly64x2_t,
7206    b: poly64x2_t,
7207) -> poly64x2_t {
7208    static_assert_uimm_bits!(LANE1, 1);
7209    static_assert_uimm_bits!(LANE2, 1);
7210    unsafe {
7211        match LANE1 & 0b1 {
7212            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
7213            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
7214            _ => unreachable_unchecked(),
7215        }
7216    }
7217}
7218#[doc = "Insert vector element from another vector element"]
7219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"]
7220#[inline(always)]
7221#[target_feature(enable = "neon")]
7222#[cfg_attr(test, assert_instr(nop))]
7223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7224pub fn vcreate_f64(a: u64) -> float64x1_t {
7225    unsafe { transmute(a) }
7226}
7227#[doc = "Floating-point convert"]
7228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"]
7229#[inline(always)]
7230#[target_feature(enable = "neon")]
7231#[cfg_attr(test, assert_instr(fcvtn))]
7232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7233pub fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t {
7234    unsafe { simd_cast(a) }
7235}
7236#[doc = "Floating-point convert to higher precision long"]
7237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"]
7238#[inline(always)]
7239#[target_feature(enable = "neon")]
7240#[cfg_attr(test, assert_instr(fcvtl))]
7241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7242pub fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t {
7243    unsafe { simd_cast(a) }
7244}
7245#[doc = "Fixed-point convert to floating-point"]
7246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"]
7247#[inline(always)]
7248#[target_feature(enable = "neon")]
7249#[cfg_attr(test, assert_instr(scvtf))]
7250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7251pub fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t {
7252    unsafe { simd_cast(a) }
7253}
7254#[doc = "Fixed-point convert to floating-point"]
7255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"]
7256#[inline(always)]
7257#[target_feature(enable = "neon")]
7258#[cfg_attr(test, assert_instr(scvtf))]
7259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7260pub fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t {
7261    unsafe { simd_cast(a) }
7262}
7263#[doc = "Fixed-point convert to floating-point"]
7264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"]
7265#[inline(always)]
7266#[target_feature(enable = "neon")]
7267#[cfg_attr(test, assert_instr(ucvtf))]
7268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7269pub fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t {
7270    unsafe { simd_cast(a) }
7271}
7272#[doc = "Fixed-point convert to floating-point"]
7273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"]
7274#[inline(always)]
7275#[target_feature(enable = "neon")]
7276#[cfg_attr(test, assert_instr(ucvtf))]
7277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7278pub fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t {
7279    unsafe { simd_cast(a) }
7280}
7281#[doc = "Floating-point convert to lower precision"]
7282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f16_f32)"]
7283#[inline(always)]
7284#[target_feature(enable = "neon")]
7285#[cfg_attr(test, assert_instr(fcvtn2))]
7286#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7287#[cfg(not(target_arch = "arm64ec"))]
7288pub fn vcvt_high_f16_f32(a: float16x4_t, b: float32x4_t) -> float16x8_t {
7289    vcombine_f16(a, vcvt_f16_f32(b))
7290}
7291#[doc = "Floating-point convert to higher precision"]
7292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f16)"]
7293#[inline(always)]
7294#[target_feature(enable = "neon")]
7295#[cfg_attr(test, assert_instr(fcvtl2))]
7296#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7297#[cfg(not(target_arch = "arm64ec"))]
7298pub fn vcvt_high_f32_f16(a: float16x8_t) -> float32x4_t {
7299    vcvt_f32_f16(vget_high_f16(a))
7300}
7301#[doc = "Floating-point convert to lower precision narrow"]
7302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"]
7303#[inline(always)]
7304#[target_feature(enable = "neon")]
7305#[cfg_attr(test, assert_instr(fcvtn2))]
7306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7307pub fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
7308    unsafe { simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) }
7309}
7310#[doc = "Floating-point convert to higher precision long"]
7311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"]
7312#[inline(always)]
7313#[target_feature(enable = "neon")]
7314#[cfg_attr(test, assert_instr(fcvtl2))]
7315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7316pub fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t {
7317    unsafe {
7318        let b: float32x2_t = simd_shuffle!(a, a, [2, 3]);
7319        simd_cast(b)
7320    }
7321}
7322#[doc = "Fixed-point convert to floating-point"]
7323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"]
7324#[inline(always)]
7325#[target_feature(enable = "neon")]
7326#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7327#[rustc_legacy_const_generics(1)]
7328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7329pub fn vcvt_n_f64_s64<const N: i32>(a: int64x1_t) -> float64x1_t {
7330    static_assert!(N >= 1 && N <= 64);
7331    unsafe extern "unadjusted" {
7332        #[cfg_attr(
7333            any(target_arch = "aarch64", target_arch = "arm64ec"),
7334            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64"
7335        )]
7336        fn _vcvt_n_f64_s64(a: int64x1_t, n: i32) -> float64x1_t;
7337    }
7338    unsafe { _vcvt_n_f64_s64(a, N) }
7339}
7340#[doc = "Fixed-point convert to floating-point"]
7341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"]
7342#[inline(always)]
7343#[target_feature(enable = "neon")]
7344#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7345#[rustc_legacy_const_generics(1)]
7346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7347pub fn vcvtq_n_f64_s64<const N: i32>(a: int64x2_t) -> float64x2_t {
7348    static_assert!(N >= 1 && N <= 64);
7349    unsafe extern "unadjusted" {
7350        #[cfg_attr(
7351            any(target_arch = "aarch64", target_arch = "arm64ec"),
7352            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64"
7353        )]
7354        fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t;
7355    }
7356    unsafe { _vcvtq_n_f64_s64(a, N) }
7357}
7358#[doc = "Fixed-point convert to floating-point"]
7359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"]
7360#[inline(always)]
7361#[target_feature(enable = "neon")]
7362#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7363#[rustc_legacy_const_generics(1)]
7364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7365pub fn vcvt_n_f64_u64<const N: i32>(a: uint64x1_t) -> float64x1_t {
7366    static_assert!(N >= 1 && N <= 64);
7367    unsafe extern "unadjusted" {
7368        #[cfg_attr(
7369            any(target_arch = "aarch64", target_arch = "arm64ec"),
7370            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64"
7371        )]
7372        fn _vcvt_n_f64_u64(a: uint64x1_t, n: i32) -> float64x1_t;
7373    }
7374    unsafe { _vcvt_n_f64_u64(a, N) }
7375}
7376#[doc = "Fixed-point convert to floating-point"]
7377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"]
7378#[inline(always)]
7379#[target_feature(enable = "neon")]
7380#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7381#[rustc_legacy_const_generics(1)]
7382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7383pub fn vcvtq_n_f64_u64<const N: i32>(a: uint64x2_t) -> float64x2_t {
7384    static_assert!(N >= 1 && N <= 64);
7385    unsafe extern "unadjusted" {
7386        #[cfg_attr(
7387            any(target_arch = "aarch64", target_arch = "arm64ec"),
7388            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64"
7389        )]
7390        fn _vcvtq_n_f64_u64(a: uint64x2_t, n: i32) -> float64x2_t;
7391    }
7392    unsafe { _vcvtq_n_f64_u64(a, N) }
7393}
7394#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"]
7396#[inline(always)]
7397#[target_feature(enable = "neon")]
7398#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7399#[rustc_legacy_const_generics(1)]
7400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7401pub fn vcvt_n_s64_f64<const N: i32>(a: float64x1_t) -> int64x1_t {
7402    static_assert!(N >= 1 && N <= 64);
7403    unsafe extern "unadjusted" {
7404        #[cfg_attr(
7405            any(target_arch = "aarch64", target_arch = "arm64ec"),
7406            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64"
7407        )]
7408        fn _vcvt_n_s64_f64(a: float64x1_t, n: i32) -> int64x1_t;
7409    }
7410    unsafe { _vcvt_n_s64_f64(a, N) }
7411}
7412#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"]
7414#[inline(always)]
7415#[target_feature(enable = "neon")]
7416#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7417#[rustc_legacy_const_generics(1)]
7418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7419pub fn vcvtq_n_s64_f64<const N: i32>(a: float64x2_t) -> int64x2_t {
7420    static_assert!(N >= 1 && N <= 64);
7421    unsafe extern "unadjusted" {
7422        #[cfg_attr(
7423            any(target_arch = "aarch64", target_arch = "arm64ec"),
7424            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64"
7425        )]
7426        fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t;
7427    }
7428    unsafe { _vcvtq_n_s64_f64(a, N) }
7429}
7430#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"]
7432#[inline(always)]
7433#[target_feature(enable = "neon")]
7434#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7435#[rustc_legacy_const_generics(1)]
7436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7437pub fn vcvt_n_u64_f64<const N: i32>(a: float64x1_t) -> uint64x1_t {
7438    static_assert!(N >= 1 && N <= 64);
7439    unsafe extern "unadjusted" {
7440        #[cfg_attr(
7441            any(target_arch = "aarch64", target_arch = "arm64ec"),
7442            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64"
7443        )]
7444        fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> uint64x1_t;
7445    }
7446    unsafe { _vcvt_n_u64_f64(a, N) }
7447}
7448#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"]
7450#[inline(always)]
7451#[target_feature(enable = "neon")]
7452#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7453#[rustc_legacy_const_generics(1)]
7454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7455pub fn vcvtq_n_u64_f64<const N: i32>(a: float64x2_t) -> uint64x2_t {
7456    static_assert!(N >= 1 && N <= 64);
7457    unsafe extern "unadjusted" {
7458        #[cfg_attr(
7459            any(target_arch = "aarch64", target_arch = "arm64ec"),
7460            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64"
7461        )]
7462        fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> uint64x2_t;
7463    }
7464    unsafe { _vcvtq_n_u64_f64(a, N) }
7465}
7466#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"]
7468#[inline(always)]
7469#[target_feature(enable = "neon")]
7470#[cfg_attr(test, assert_instr(fcvtzs))]
7471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7472pub fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t {
7473    unsafe extern "unadjusted" {
7474        #[cfg_attr(
7475            any(target_arch = "aarch64", target_arch = "arm64ec"),
7476            link_name = "llvm.fptosi.sat.v1i64.v1f64"
7477        )]
7478        fn _vcvt_s64_f64(a: float64x1_t) -> int64x1_t;
7479    }
7480    unsafe { _vcvt_s64_f64(a) }
7481}
7482#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"]
7484#[inline(always)]
7485#[target_feature(enable = "neon")]
7486#[cfg_attr(test, assert_instr(fcvtzs))]
7487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7488pub fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t {
7489    unsafe extern "unadjusted" {
7490        #[cfg_attr(
7491            any(target_arch = "aarch64", target_arch = "arm64ec"),
7492            link_name = "llvm.fptosi.sat.v2i64.v2f64"
7493        )]
7494        fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t;
7495    }
7496    unsafe { _vcvtq_s64_f64(a) }
7497}
7498#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"]
7500#[inline(always)]
7501#[target_feature(enable = "neon")]
7502#[cfg_attr(test, assert_instr(fcvtzu))]
7503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7504pub fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t {
7505    unsafe extern "unadjusted" {
7506        #[cfg_attr(
7507            any(target_arch = "aarch64", target_arch = "arm64ec"),
7508            link_name = "llvm.fptoui.sat.v1i64.v1f64"
7509        )]
7510        fn _vcvt_u64_f64(a: float64x1_t) -> uint64x1_t;
7511    }
7512    unsafe { _vcvt_u64_f64(a) }
7513}
7514#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"]
7516#[inline(always)]
7517#[target_feature(enable = "neon")]
7518#[cfg_attr(test, assert_instr(fcvtzu))]
7519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7520pub fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t {
7521    unsafe extern "unadjusted" {
7522        #[cfg_attr(
7523            any(target_arch = "aarch64", target_arch = "arm64ec"),
7524            link_name = "llvm.fptoui.sat.v2i64.v2f64"
7525        )]
7526        fn _vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t;
7527    }
7528    unsafe { _vcvtq_u64_f64(a) }
7529}
7530#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s16_f16)"]
7532#[inline(always)]
7533#[cfg_attr(test, assert_instr(fcvtas))]
7534#[target_feature(enable = "neon,fp16")]
7535#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7536#[cfg(not(target_arch = "arm64ec"))]
7537pub fn vcvta_s16_f16(a: float16x4_t) -> int16x4_t {
7538    unsafe extern "unadjusted" {
7539        #[cfg_attr(
7540            any(target_arch = "aarch64", target_arch = "arm64ec"),
7541            link_name = "llvm.aarch64.neon.fcvtas.v4i16.v4f16"
7542        )]
7543        fn _vcvta_s16_f16(a: float16x4_t) -> int16x4_t;
7544    }
7545    unsafe { _vcvta_s16_f16(a) }
7546}
7547#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s16_f16)"]
7549#[inline(always)]
7550#[cfg_attr(test, assert_instr(fcvtas))]
7551#[target_feature(enable = "neon,fp16")]
7552#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7553#[cfg(not(target_arch = "arm64ec"))]
7554pub fn vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t {
7555    unsafe extern "unadjusted" {
7556        #[cfg_attr(
7557            any(target_arch = "aarch64", target_arch = "arm64ec"),
7558            link_name = "llvm.aarch64.neon.fcvtas.v8i16.v8f16"
7559        )]
7560        fn _vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t;
7561    }
7562    unsafe { _vcvtaq_s16_f16(a) }
7563}
7564#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"]
7566#[inline(always)]
7567#[target_feature(enable = "neon")]
7568#[cfg_attr(test, assert_instr(fcvtas))]
7569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7570pub fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t {
7571    unsafe extern "unadjusted" {
7572        #[cfg_attr(
7573            any(target_arch = "aarch64", target_arch = "arm64ec"),
7574            link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32"
7575        )]
7576        fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t;
7577    }
7578    unsafe { _vcvta_s32_f32(a) }
7579}
7580#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"]
7582#[inline(always)]
7583#[target_feature(enable = "neon")]
7584#[cfg_attr(test, assert_instr(fcvtas))]
7585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7586pub fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t {
7587    unsafe extern "unadjusted" {
7588        #[cfg_attr(
7589            any(target_arch = "aarch64", target_arch = "arm64ec"),
7590            link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32"
7591        )]
7592        fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t;
7593    }
7594    unsafe { _vcvtaq_s32_f32(a) }
7595}
7596#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"]
7598#[inline(always)]
7599#[target_feature(enable = "neon")]
7600#[cfg_attr(test, assert_instr(fcvtas))]
7601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7602pub fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t {
7603    unsafe extern "unadjusted" {
7604        #[cfg_attr(
7605            any(target_arch = "aarch64", target_arch = "arm64ec"),
7606            link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64"
7607        )]
7608        fn _vcvta_s64_f64(a: float64x1_t) -> int64x1_t;
7609    }
7610    unsafe { _vcvta_s64_f64(a) }
7611}
7612#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"]
7614#[inline(always)]
7615#[target_feature(enable = "neon")]
7616#[cfg_attr(test, assert_instr(fcvtas))]
7617#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7618pub fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t {
7619    unsafe extern "unadjusted" {
7620        #[cfg_attr(
7621            any(target_arch = "aarch64", target_arch = "arm64ec"),
7622            link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64"
7623        )]
7624        fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t;
7625    }
7626    unsafe { _vcvtaq_s64_f64(a) }
7627}
7628#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u16_f16)"]
7630#[inline(always)]
7631#[cfg_attr(test, assert_instr(fcvtau))]
7632#[target_feature(enable = "neon,fp16")]
7633#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7634#[cfg(not(target_arch = "arm64ec"))]
7635pub fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t {
7636    unsafe extern "unadjusted" {
7637        #[cfg_attr(
7638            any(target_arch = "aarch64", target_arch = "arm64ec"),
7639            link_name = "llvm.aarch64.neon.fcvtau.v4i16.v4f16"
7640        )]
7641        fn _vcvta_u16_f16(a: float16x4_t) -> uint16x4_t;
7642    }
7643    unsafe { _vcvta_u16_f16(a) }
7644}
7645#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u16_f16)"]
7647#[inline(always)]
7648#[cfg_attr(test, assert_instr(fcvtau))]
7649#[target_feature(enable = "neon,fp16")]
7650#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7651#[cfg(not(target_arch = "arm64ec"))]
7652pub fn vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t {
7653    unsafe extern "unadjusted" {
7654        #[cfg_attr(
7655            any(target_arch = "aarch64", target_arch = "arm64ec"),
7656            link_name = "llvm.aarch64.neon.fcvtau.v8i16.v8f16"
7657        )]
7658        fn _vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t;
7659    }
7660    unsafe { _vcvtaq_u16_f16(a) }
7661}
7662#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"]
7664#[inline(always)]
7665#[target_feature(enable = "neon")]
7666#[cfg_attr(test, assert_instr(fcvtau))]
7667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7668pub fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t {
7669    unsafe extern "unadjusted" {
7670        #[cfg_attr(
7671            any(target_arch = "aarch64", target_arch = "arm64ec"),
7672            link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32"
7673        )]
7674        fn _vcvta_u32_f32(a: float32x2_t) -> uint32x2_t;
7675    }
7676    unsafe { _vcvta_u32_f32(a) }
7677}
7678#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"]
7680#[inline(always)]
7681#[target_feature(enable = "neon")]
7682#[cfg_attr(test, assert_instr(fcvtau))]
7683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7684pub fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t {
7685    unsafe extern "unadjusted" {
7686        #[cfg_attr(
7687            any(target_arch = "aarch64", target_arch = "arm64ec"),
7688            link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32"
7689        )]
7690        fn _vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t;
7691    }
7692    unsafe { _vcvtaq_u32_f32(a) }
7693}
7694#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"]
7696#[inline(always)]
7697#[target_feature(enable = "neon")]
7698#[cfg_attr(test, assert_instr(fcvtau))]
7699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7700pub fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t {
7701    unsafe extern "unadjusted" {
7702        #[cfg_attr(
7703            any(target_arch = "aarch64", target_arch = "arm64ec"),
7704            link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64"
7705        )]
7706        fn _vcvta_u64_f64(a: float64x1_t) -> uint64x1_t;
7707    }
7708    unsafe { _vcvta_u64_f64(a) }
7709}
7710#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"]
7712#[inline(always)]
7713#[target_feature(enable = "neon")]
7714#[cfg_attr(test, assert_instr(fcvtau))]
7715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7716pub fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t {
7717    unsafe extern "unadjusted" {
7718        #[cfg_attr(
7719            any(target_arch = "aarch64", target_arch = "arm64ec"),
7720            link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64"
7721        )]
7722        fn _vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t;
7723    }
7724    unsafe { _vcvtaq_u64_f64(a) }
7725}
7726#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s16_f16)"]
7728#[inline(always)]
7729#[cfg_attr(test, assert_instr(fcvtas))]
7730#[target_feature(enable = "neon,fp16")]
7731#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7732#[cfg(not(target_arch = "arm64ec"))]
7733pub fn vcvtah_s16_f16(a: f16) -> i16 {
7734    vcvtah_s32_f16(a) as i16
7735}
7736#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s32_f16)"]
7738#[inline(always)]
7739#[cfg_attr(test, assert_instr(fcvtas))]
7740#[target_feature(enable = "neon,fp16")]
7741#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7742#[cfg(not(target_arch = "arm64ec"))]
7743pub fn vcvtah_s32_f16(a: f16) -> i32 {
7744    unsafe extern "unadjusted" {
7745        #[cfg_attr(
7746            any(target_arch = "aarch64", target_arch = "arm64ec"),
7747            link_name = "llvm.aarch64.neon.fcvtas.i32.f16"
7748        )]
7749        fn _vcvtah_s32_f16(a: f16) -> i32;
7750    }
7751    unsafe { _vcvtah_s32_f16(a) }
7752}
7753#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s64_f16)"]
7755#[inline(always)]
7756#[cfg_attr(test, assert_instr(fcvtas))]
7757#[target_feature(enable = "neon,fp16")]
7758#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7759#[cfg(not(target_arch = "arm64ec"))]
7760pub fn vcvtah_s64_f16(a: f16) -> i64 {
7761    unsafe extern "unadjusted" {
7762        #[cfg_attr(
7763            any(target_arch = "aarch64", target_arch = "arm64ec"),
7764            link_name = "llvm.aarch64.neon.fcvtas.i64.f16"
7765        )]
7766        fn _vcvtah_s64_f16(a: f16) -> i64;
7767    }
7768    unsafe { _vcvtah_s64_f16(a) }
7769}
7770#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u16_f16)"]
7772#[inline(always)]
7773#[cfg_attr(test, assert_instr(fcvtau))]
7774#[target_feature(enable = "neon,fp16")]
7775#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7776#[cfg(not(target_arch = "arm64ec"))]
7777pub fn vcvtah_u16_f16(a: f16) -> u16 {
7778    vcvtah_u32_f16(a) as u16
7779}
7780#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u32_f16)"]
7782#[inline(always)]
7783#[cfg_attr(test, assert_instr(fcvtau))]
7784#[target_feature(enable = "neon,fp16")]
7785#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7786#[cfg(not(target_arch = "arm64ec"))]
7787pub fn vcvtah_u32_f16(a: f16) -> u32 {
7788    unsafe extern "unadjusted" {
7789        #[cfg_attr(
7790            any(target_arch = "aarch64", target_arch = "arm64ec"),
7791            link_name = "llvm.aarch64.neon.fcvtau.i32.f16"
7792        )]
7793        fn _vcvtah_u32_f16(a: f16) -> u32;
7794    }
7795    unsafe { _vcvtah_u32_f16(a) }
7796}
7797#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u64_f16)"]
7799#[inline(always)]
7800#[cfg_attr(test, assert_instr(fcvtau))]
7801#[target_feature(enable = "neon,fp16")]
7802#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7803#[cfg(not(target_arch = "arm64ec"))]
7804pub fn vcvtah_u64_f16(a: f16) -> u64 {
7805    unsafe extern "unadjusted" {
7806        #[cfg_attr(
7807            any(target_arch = "aarch64", target_arch = "arm64ec"),
7808            link_name = "llvm.aarch64.neon.fcvtau.i64.f16"
7809        )]
7810        fn _vcvtah_u64_f16(a: f16) -> u64;
7811    }
7812    unsafe { _vcvtah_u64_f16(a) }
7813}
7814#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"]
7816#[inline(always)]
7817#[target_feature(enable = "neon")]
7818#[cfg_attr(test, assert_instr(fcvtas))]
7819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7820pub fn vcvtas_s32_f32(a: f32) -> i32 {
7821    unsafe extern "unadjusted" {
7822        #[cfg_attr(
7823            any(target_arch = "aarch64", target_arch = "arm64ec"),
7824            link_name = "llvm.aarch64.neon.fcvtas.i32.f32"
7825        )]
7826        fn _vcvtas_s32_f32(a: f32) -> i32;
7827    }
7828    unsafe { _vcvtas_s32_f32(a) }
7829}
7830#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"]
7832#[inline(always)]
7833#[target_feature(enable = "neon")]
7834#[cfg_attr(test, assert_instr(fcvtas))]
7835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7836pub fn vcvtad_s64_f64(a: f64) -> i64 {
7837    unsafe extern "unadjusted" {
7838        #[cfg_attr(
7839            any(target_arch = "aarch64", target_arch = "arm64ec"),
7840            link_name = "llvm.aarch64.neon.fcvtas.i64.f64"
7841        )]
7842        fn _vcvtad_s64_f64(a: f64) -> i64;
7843    }
7844    unsafe { _vcvtad_s64_f64(a) }
7845}
7846#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"]
7848#[inline(always)]
7849#[target_feature(enable = "neon")]
7850#[cfg_attr(test, assert_instr(fcvtau))]
7851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7852pub fn vcvtas_u32_f32(a: f32) -> u32 {
7853    unsafe extern "unadjusted" {
7854        #[cfg_attr(
7855            any(target_arch = "aarch64", target_arch = "arm64ec"),
7856            link_name = "llvm.aarch64.neon.fcvtau.i32.f32"
7857        )]
7858        fn _vcvtas_u32_f32(a: f32) -> u32;
7859    }
7860    unsafe { _vcvtas_u32_f32(a) }
7861}
7862#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"]
7864#[inline(always)]
7865#[target_feature(enable = "neon")]
7866#[cfg_attr(test, assert_instr(fcvtau))]
7867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7868pub fn vcvtad_u64_f64(a: f64) -> u64 {
7869    unsafe extern "unadjusted" {
7870        #[cfg_attr(
7871            any(target_arch = "aarch64", target_arch = "arm64ec"),
7872            link_name = "llvm.aarch64.neon.fcvtau.i64.f64"
7873        )]
7874        fn _vcvtad_u64_f64(a: f64) -> u64;
7875    }
7876    unsafe { _vcvtad_u64_f64(a) }
7877}
7878#[doc = "Fixed-point convert to floating-point"]
7879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"]
7880#[inline(always)]
7881#[target_feature(enable = "neon")]
7882#[cfg_attr(test, assert_instr(scvtf))]
7883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7884pub fn vcvtd_f64_s64(a: i64) -> f64 {
7885    a as f64
7886}
7887#[doc = "Fixed-point convert to floating-point"]
7888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"]
7889#[inline(always)]
7890#[target_feature(enable = "neon")]
7891#[cfg_attr(test, assert_instr(scvtf))]
7892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7893pub fn vcvts_f32_s32(a: i32) -> f32 {
7894    a as f32
7895}
7896#[doc = "Fixed-point convert to floating-point"]
7897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s16)"]
7898#[inline(always)]
7899#[cfg_attr(test, assert_instr(scvtf))]
7900#[target_feature(enable = "neon,fp16")]
7901#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7902#[cfg(not(target_arch = "arm64ec"))]
7903pub fn vcvth_f16_s16(a: i16) -> f16 {
7904    a as f16
7905}
7906#[doc = "Fixed-point convert to floating-point"]
7907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s32)"]
7908#[inline(always)]
7909#[cfg_attr(test, assert_instr(scvtf))]
7910#[target_feature(enable = "neon,fp16")]
7911#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7912#[cfg(not(target_arch = "arm64ec"))]
7913pub fn vcvth_f16_s32(a: i32) -> f16 {
7914    a as f16
7915}
7916#[doc = "Fixed-point convert to floating-point"]
7917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s64)"]
7918#[inline(always)]
7919#[cfg_attr(test, assert_instr(scvtf))]
7920#[target_feature(enable = "neon,fp16")]
7921#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7922#[cfg(not(target_arch = "arm64ec"))]
7923pub fn vcvth_f16_s64(a: i64) -> f16 {
7924    a as f16
7925}
7926#[doc = "Unsigned fixed-point convert to floating-point"]
7927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u16)"]
7928#[inline(always)]
7929#[cfg_attr(test, assert_instr(ucvtf))]
7930#[target_feature(enable = "neon,fp16")]
7931#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7932#[cfg(not(target_arch = "arm64ec"))]
7933pub fn vcvth_f16_u16(a: u16) -> f16 {
7934    a as f16
7935}
7936#[doc = "Unsigned fixed-point convert to floating-point"]
7937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u32)"]
7938#[inline(always)]
7939#[cfg_attr(test, assert_instr(ucvtf))]
7940#[target_feature(enable = "neon,fp16")]
7941#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7942#[cfg(not(target_arch = "arm64ec"))]
7943pub fn vcvth_f16_u32(a: u32) -> f16 {
7944    a as f16
7945}
7946#[doc = "Unsigned fixed-point convert to floating-point"]
7947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u64)"]
7948#[inline(always)]
7949#[cfg_attr(test, assert_instr(ucvtf))]
7950#[target_feature(enable = "neon,fp16")]
7951#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7952#[cfg(not(target_arch = "arm64ec"))]
7953pub fn vcvth_f16_u64(a: u64) -> f16 {
7954    a as f16
7955}
7956#[doc = "Fixed-point convert to floating-point"]
7957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s16)"]
7958#[inline(always)]
7959#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7960#[rustc_legacy_const_generics(1)]
7961#[target_feature(enable = "neon,fp16")]
7962#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7963#[cfg(not(target_arch = "arm64ec"))]
7964pub fn vcvth_n_f16_s16<const N: i32>(a: i16) -> f16 {
7965    static_assert!(N >= 1 && N <= 16);
7966    vcvth_n_f16_s32::<N>(a as i32)
7967}
7968#[doc = "Fixed-point convert to floating-point"]
7969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s32)"]
7970#[inline(always)]
7971#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7972#[rustc_legacy_const_generics(1)]
7973#[target_feature(enable = "neon,fp16")]
7974#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7975#[cfg(not(target_arch = "arm64ec"))]
7976pub fn vcvth_n_f16_s32<const N: i32>(a: i32) -> f16 {
7977    static_assert!(N >= 1 && N <= 16);
7978    unsafe extern "unadjusted" {
7979        #[cfg_attr(
7980            any(target_arch = "aarch64", target_arch = "arm64ec"),
7981            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i32"
7982        )]
7983        fn _vcvth_n_f16_s32(a: i32, n: i32) -> f16;
7984    }
7985    unsafe { _vcvth_n_f16_s32(a, N) }
7986}
7987#[doc = "Fixed-point convert to floating-point"]
7988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s64)"]
7989#[inline(always)]
7990#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7991#[rustc_legacy_const_generics(1)]
7992#[target_feature(enable = "neon,fp16")]
7993#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7994#[cfg(not(target_arch = "arm64ec"))]
7995pub fn vcvth_n_f16_s64<const N: i32>(a: i64) -> f16 {
7996    static_assert!(N >= 1 && N <= 16);
7997    unsafe extern "unadjusted" {
7998        #[cfg_attr(
7999            any(target_arch = "aarch64", target_arch = "arm64ec"),
8000            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i64"
8001        )]
8002        fn _vcvth_n_f16_s64(a: i64, n: i32) -> f16;
8003    }
8004    unsafe { _vcvth_n_f16_s64(a, N) }
8005}
8006#[doc = "Fixed-point convert to floating-point"]
8007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u16)"]
8008#[inline(always)]
8009#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8010#[rustc_legacy_const_generics(1)]
8011#[target_feature(enable = "neon,fp16")]
8012#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8013#[cfg(not(target_arch = "arm64ec"))]
8014pub fn vcvth_n_f16_u16<const N: i32>(a: u16) -> f16 {
8015    static_assert!(N >= 1 && N <= 16);
8016    vcvth_n_f16_u32::<N>(a as u32)
8017}
8018#[doc = "Fixed-point convert to floating-point"]
8019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u32)"]
8020#[inline(always)]
8021#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8022#[rustc_legacy_const_generics(1)]
8023#[target_feature(enable = "neon,fp16")]
8024#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8025#[cfg(not(target_arch = "arm64ec"))]
8026pub fn vcvth_n_f16_u32<const N: i32>(a: u32) -> f16 {
8027    static_assert!(N >= 1 && N <= 16);
8028    unsafe extern "unadjusted" {
8029        #[cfg_attr(
8030            any(target_arch = "aarch64", target_arch = "arm64ec"),
8031            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i32"
8032        )]
8033        fn _vcvth_n_f16_u32(a: u32, n: i32) -> f16;
8034    }
8035    unsafe { _vcvth_n_f16_u32(a, N) }
8036}
8037#[doc = "Fixed-point convert to floating-point"]
8038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u64)"]
8039#[inline(always)]
8040#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8041#[rustc_legacy_const_generics(1)]
8042#[target_feature(enable = "neon,fp16")]
8043#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8044#[cfg(not(target_arch = "arm64ec"))]
8045pub fn vcvth_n_f16_u64<const N: i32>(a: u64) -> f16 {
8046    static_assert!(N >= 1 && N <= 16);
8047    unsafe extern "unadjusted" {
8048        #[cfg_attr(
8049            any(target_arch = "aarch64", target_arch = "arm64ec"),
8050            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i64"
8051        )]
8052        fn _vcvth_n_f16_u64(a: u64, n: i32) -> f16;
8053    }
8054    unsafe { _vcvth_n_f16_u64(a, N) }
8055}
8056#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s16_f16)"]
8058#[inline(always)]
8059#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8060#[rustc_legacy_const_generics(1)]
8061#[target_feature(enable = "neon,fp16")]
8062#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8063#[cfg(not(target_arch = "arm64ec"))]
8064pub fn vcvth_n_s16_f16<const N: i32>(a: f16) -> i16 {
8065    static_assert!(N >= 1 && N <= 16);
8066    vcvth_n_s32_f16::<N>(a) as i16
8067}
8068#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s32_f16)"]
8070#[inline(always)]
8071#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8072#[rustc_legacy_const_generics(1)]
8073#[target_feature(enable = "neon,fp16")]
8074#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8075#[cfg(not(target_arch = "arm64ec"))]
8076pub fn vcvth_n_s32_f16<const N: i32>(a: f16) -> i32 {
8077    static_assert!(N >= 1 && N <= 16);
8078    unsafe extern "unadjusted" {
8079        #[cfg_attr(
8080            any(target_arch = "aarch64", target_arch = "arm64ec"),
8081            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f16"
8082        )]
8083        fn _vcvth_n_s32_f16(a: f16, n: i32) -> i32;
8084    }
8085    unsafe { _vcvth_n_s32_f16(a, N) }
8086}
8087#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s64_f16)"]
8089#[inline(always)]
8090#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8091#[rustc_legacy_const_generics(1)]
8092#[target_feature(enable = "neon,fp16")]
8093#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8094#[cfg(not(target_arch = "arm64ec"))]
8095pub fn vcvth_n_s64_f16<const N: i32>(a: f16) -> i64 {
8096    static_assert!(N >= 1 && N <= 16);
8097    unsafe extern "unadjusted" {
8098        #[cfg_attr(
8099            any(target_arch = "aarch64", target_arch = "arm64ec"),
8100            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f16"
8101        )]
8102        fn _vcvth_n_s64_f16(a: f16, n: i32) -> i64;
8103    }
8104    unsafe { _vcvth_n_s64_f16(a, N) }
8105}
8106#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u16_f16)"]
8108#[inline(always)]
8109#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8110#[rustc_legacy_const_generics(1)]
8111#[target_feature(enable = "neon,fp16")]
8112#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8113#[cfg(not(target_arch = "arm64ec"))]
8114pub fn vcvth_n_u16_f16<const N: i32>(a: f16) -> u16 {
8115    static_assert!(N >= 1 && N <= 16);
8116    vcvth_n_u32_f16::<N>(a) as u16
8117}
8118#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u32_f16)"]
8120#[inline(always)]
8121#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8122#[rustc_legacy_const_generics(1)]
8123#[target_feature(enable = "neon,fp16")]
8124#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8125#[cfg(not(target_arch = "arm64ec"))]
8126pub fn vcvth_n_u32_f16<const N: i32>(a: f16) -> u32 {
8127    static_assert!(N >= 1 && N <= 16);
8128    unsafe extern "unadjusted" {
8129        #[cfg_attr(
8130            any(target_arch = "aarch64", target_arch = "arm64ec"),
8131            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f16"
8132        )]
8133        fn _vcvth_n_u32_f16(a: f16, n: i32) -> u32;
8134    }
8135    unsafe { _vcvth_n_u32_f16(a, N) }
8136}
8137#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u64_f16)"]
8139#[inline(always)]
8140#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8141#[rustc_legacy_const_generics(1)]
8142#[target_feature(enable = "neon,fp16")]
8143#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8144#[cfg(not(target_arch = "arm64ec"))]
8145pub fn vcvth_n_u64_f16<const N: i32>(a: f16) -> u64 {
8146    static_assert!(N >= 1 && N <= 16);
8147    unsafe extern "unadjusted" {
8148        #[cfg_attr(
8149            any(target_arch = "aarch64", target_arch = "arm64ec"),
8150            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f16"
8151        )]
8152        fn _vcvth_n_u64_f16(a: f16, n: i32) -> u64;
8153    }
8154    unsafe { _vcvth_n_u64_f16(a, N) }
8155}
8156#[doc = "Floating-point convert to signed fixed-point"]
8157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s16_f16)"]
8158#[inline(always)]
8159#[cfg_attr(test, assert_instr(fcvtzs))]
8160#[target_feature(enable = "neon,fp16")]
8161#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8162#[cfg(not(target_arch = "arm64ec"))]
8163pub fn vcvth_s16_f16(a: f16) -> i16 {
8164    a as i16
8165}
8166#[doc = "Floating-point convert to signed fixed-point"]
8167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s32_f16)"]
8168#[inline(always)]
8169#[cfg_attr(test, assert_instr(fcvtzs))]
8170#[target_feature(enable = "neon,fp16")]
8171#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8172#[cfg(not(target_arch = "arm64ec"))]
8173pub fn vcvth_s32_f16(a: f16) -> i32 {
8174    a as i32
8175}
8176#[doc = "Floating-point convert to signed fixed-point"]
8177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s64_f16)"]
8178#[inline(always)]
8179#[cfg_attr(test, assert_instr(fcvtzs))]
8180#[target_feature(enable = "neon,fp16")]
8181#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8182#[cfg(not(target_arch = "arm64ec"))]
8183pub fn vcvth_s64_f16(a: f16) -> i64 {
8184    a as i64
8185}
8186#[doc = "Floating-point convert to unsigned fixed-point"]
8187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u16_f16)"]
8188#[inline(always)]
8189#[cfg_attr(test, assert_instr(fcvtzu))]
8190#[target_feature(enable = "neon,fp16")]
8191#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8192#[cfg(not(target_arch = "arm64ec"))]
8193pub fn vcvth_u16_f16(a: f16) -> u16 {
8194    a as u16
8195}
8196#[doc = "Floating-point convert to unsigned fixed-point"]
8197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u32_f16)"]
8198#[inline(always)]
8199#[cfg_attr(test, assert_instr(fcvtzu))]
8200#[target_feature(enable = "neon,fp16")]
8201#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8202#[cfg(not(target_arch = "arm64ec"))]
8203pub fn vcvth_u32_f16(a: f16) -> u32 {
8204    a as u32
8205}
8206#[doc = "Floating-point convert to unsigned fixed-point"]
8207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u64_f16)"]
8208#[inline(always)]
8209#[cfg_attr(test, assert_instr(fcvtzu))]
8210#[target_feature(enable = "neon,fp16")]
8211#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8212#[cfg(not(target_arch = "arm64ec"))]
8213pub fn vcvth_u64_f16(a: f16) -> u64 {
8214    a as u64
8215}
8216#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s16_f16)"]
8218#[inline(always)]
8219#[cfg_attr(test, assert_instr(fcvtms))]
8220#[target_feature(enable = "neon,fp16")]
8221#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8222#[cfg(not(target_arch = "arm64ec"))]
8223pub fn vcvtm_s16_f16(a: float16x4_t) -> int16x4_t {
8224    unsafe extern "unadjusted" {
8225        #[cfg_attr(
8226            any(target_arch = "aarch64", target_arch = "arm64ec"),
8227            link_name = "llvm.aarch64.neon.fcvtms.v4i16.v4f16"
8228        )]
8229        fn _vcvtm_s16_f16(a: float16x4_t) -> int16x4_t;
8230    }
8231    unsafe { _vcvtm_s16_f16(a) }
8232}
8233#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s16_f16)"]
8235#[inline(always)]
8236#[cfg_attr(test, assert_instr(fcvtms))]
8237#[target_feature(enable = "neon,fp16")]
8238#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8239#[cfg(not(target_arch = "arm64ec"))]
8240pub fn vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t {
8241    unsafe extern "unadjusted" {
8242        #[cfg_attr(
8243            any(target_arch = "aarch64", target_arch = "arm64ec"),
8244            link_name = "llvm.aarch64.neon.fcvtms.v8i16.v8f16"
8245        )]
8246        fn _vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t;
8247    }
8248    unsafe { _vcvtmq_s16_f16(a) }
8249}
8250#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"]
8252#[inline(always)]
8253#[target_feature(enable = "neon")]
8254#[cfg_attr(test, assert_instr(fcvtms))]
8255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8256pub fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t {
8257    unsafe extern "unadjusted" {
8258        #[cfg_attr(
8259            any(target_arch = "aarch64", target_arch = "arm64ec"),
8260            link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32"
8261        )]
8262        fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t;
8263    }
8264    unsafe { _vcvtm_s32_f32(a) }
8265}
8266#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"]
8268#[inline(always)]
8269#[target_feature(enable = "neon")]
8270#[cfg_attr(test, assert_instr(fcvtms))]
8271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8272pub fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t {
8273    unsafe extern "unadjusted" {
8274        #[cfg_attr(
8275            any(target_arch = "aarch64", target_arch = "arm64ec"),
8276            link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32"
8277        )]
8278        fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t;
8279    }
8280    unsafe { _vcvtmq_s32_f32(a) }
8281}
8282#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"]
8284#[inline(always)]
8285#[target_feature(enable = "neon")]
8286#[cfg_attr(test, assert_instr(fcvtms))]
8287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8288pub fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t {
8289    unsafe extern "unadjusted" {
8290        #[cfg_attr(
8291            any(target_arch = "aarch64", target_arch = "arm64ec"),
8292            link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64"
8293        )]
8294        fn _vcvtm_s64_f64(a: float64x1_t) -> int64x1_t;
8295    }
8296    unsafe { _vcvtm_s64_f64(a) }
8297}
8298#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"]
8300#[inline(always)]
8301#[target_feature(enable = "neon")]
8302#[cfg_attr(test, assert_instr(fcvtms))]
8303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8304pub fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t {
8305    unsafe extern "unadjusted" {
8306        #[cfg_attr(
8307            any(target_arch = "aarch64", target_arch = "arm64ec"),
8308            link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64"
8309        )]
8310        fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t;
8311    }
8312    unsafe { _vcvtmq_s64_f64(a) }
8313}
8314#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u16_f16)"]
8316#[inline(always)]
8317#[cfg_attr(test, assert_instr(fcvtmu))]
8318#[target_feature(enable = "neon,fp16")]
8319#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8320#[cfg(not(target_arch = "arm64ec"))]
8321pub fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t {
8322    unsafe extern "unadjusted" {
8323        #[cfg_attr(
8324            any(target_arch = "aarch64", target_arch = "arm64ec"),
8325            link_name = "llvm.aarch64.neon.fcvtmu.v4i16.v4f16"
8326        )]
8327        fn _vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t;
8328    }
8329    unsafe { _vcvtm_u16_f16(a) }
8330}
8331#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u16_f16)"]
8333#[inline(always)]
8334#[cfg_attr(test, assert_instr(fcvtmu))]
8335#[target_feature(enable = "neon,fp16")]
8336#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8337#[cfg(not(target_arch = "arm64ec"))]
8338pub fn vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t {
8339    unsafe extern "unadjusted" {
8340        #[cfg_attr(
8341            any(target_arch = "aarch64", target_arch = "arm64ec"),
8342            link_name = "llvm.aarch64.neon.fcvtmu.v8i16.v8f16"
8343        )]
8344        fn _vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t;
8345    }
8346    unsafe { _vcvtmq_u16_f16(a) }
8347}
8348#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"]
8350#[inline(always)]
8351#[target_feature(enable = "neon")]
8352#[cfg_attr(test, assert_instr(fcvtmu))]
8353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8354pub fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t {
8355    unsafe extern "unadjusted" {
8356        #[cfg_attr(
8357            any(target_arch = "aarch64", target_arch = "arm64ec"),
8358            link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32"
8359        )]
8360        fn _vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t;
8361    }
8362    unsafe { _vcvtm_u32_f32(a) }
8363}
8364#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"]
8366#[inline(always)]
8367#[target_feature(enable = "neon")]
8368#[cfg_attr(test, assert_instr(fcvtmu))]
8369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8370pub fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t {
8371    unsafe extern "unadjusted" {
8372        #[cfg_attr(
8373            any(target_arch = "aarch64", target_arch = "arm64ec"),
8374            link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32"
8375        )]
8376        fn _vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t;
8377    }
8378    unsafe { _vcvtmq_u32_f32(a) }
8379}
8380#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"]
8382#[inline(always)]
8383#[target_feature(enable = "neon")]
8384#[cfg_attr(test, assert_instr(fcvtmu))]
8385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8386pub fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t {
8387    unsafe extern "unadjusted" {
8388        #[cfg_attr(
8389            any(target_arch = "aarch64", target_arch = "arm64ec"),
8390            link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64"
8391        )]
8392        fn _vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t;
8393    }
8394    unsafe { _vcvtm_u64_f64(a) }
8395}
8396#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"]
8398#[inline(always)]
8399#[target_feature(enable = "neon")]
8400#[cfg_attr(test, assert_instr(fcvtmu))]
8401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8402pub fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t {
8403    unsafe extern "unadjusted" {
8404        #[cfg_attr(
8405            any(target_arch = "aarch64", target_arch = "arm64ec"),
8406            link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64"
8407        )]
8408        fn _vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t;
8409    }
8410    unsafe { _vcvtmq_u64_f64(a) }
8411}
8412#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s16_f16)"]
8414#[inline(always)]
8415#[cfg_attr(test, assert_instr(fcvtms))]
8416#[target_feature(enable = "neon,fp16")]
8417#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8418#[cfg(not(target_arch = "arm64ec"))]
8419pub fn vcvtmh_s16_f16(a: f16) -> i16 {
8420    vcvtmh_s32_f16(a) as i16
8421}
8422#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s32_f16)"]
8424#[inline(always)]
8425#[cfg_attr(test, assert_instr(fcvtms))]
8426#[target_feature(enable = "neon,fp16")]
8427#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8428#[cfg(not(target_arch = "arm64ec"))]
8429pub fn vcvtmh_s32_f16(a: f16) -> i32 {
8430    unsafe extern "unadjusted" {
8431        #[cfg_attr(
8432            any(target_arch = "aarch64", target_arch = "arm64ec"),
8433            link_name = "llvm.aarch64.neon.fcvtms.i32.f16"
8434        )]
8435        fn _vcvtmh_s32_f16(a: f16) -> i32;
8436    }
8437    unsafe { _vcvtmh_s32_f16(a) }
8438}
8439#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s64_f16)"]
8441#[inline(always)]
8442#[cfg_attr(test, assert_instr(fcvtms))]
8443#[target_feature(enable = "neon,fp16")]
8444#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8445#[cfg(not(target_arch = "arm64ec"))]
8446pub fn vcvtmh_s64_f16(a: f16) -> i64 {
8447    unsafe extern "unadjusted" {
8448        #[cfg_attr(
8449            any(target_arch = "aarch64", target_arch = "arm64ec"),
8450            link_name = "llvm.aarch64.neon.fcvtms.i64.f16"
8451        )]
8452        fn _vcvtmh_s64_f16(a: f16) -> i64;
8453    }
8454    unsafe { _vcvtmh_s64_f16(a) }
8455}
8456#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u16_f16)"]
8458#[inline(always)]
8459#[cfg_attr(test, assert_instr(fcvtmu))]
8460#[target_feature(enable = "neon,fp16")]
8461#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8462#[cfg(not(target_arch = "arm64ec"))]
8463pub fn vcvtmh_u16_f16(a: f16) -> u16 {
8464    vcvtmh_u32_f16(a) as u16
8465}
8466#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u32_f16)"]
8468#[inline(always)]
8469#[cfg_attr(test, assert_instr(fcvtmu))]
8470#[target_feature(enable = "neon,fp16")]
8471#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8472#[cfg(not(target_arch = "arm64ec"))]
8473pub fn vcvtmh_u32_f16(a: f16) -> u32 {
8474    unsafe extern "unadjusted" {
8475        #[cfg_attr(
8476            any(target_arch = "aarch64", target_arch = "arm64ec"),
8477            link_name = "llvm.aarch64.neon.fcvtmu.i32.f16"
8478        )]
8479        fn _vcvtmh_u32_f16(a: f16) -> u32;
8480    }
8481    unsafe { _vcvtmh_u32_f16(a) }
8482}
8483#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u64_f16)"]
8485#[inline(always)]
8486#[cfg_attr(test, assert_instr(fcvtmu))]
8487#[target_feature(enable = "neon,fp16")]
8488#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8489#[cfg(not(target_arch = "arm64ec"))]
8490pub fn vcvtmh_u64_f16(a: f16) -> u64 {
8491    unsafe extern "unadjusted" {
8492        #[cfg_attr(
8493            any(target_arch = "aarch64", target_arch = "arm64ec"),
8494            link_name = "llvm.aarch64.neon.fcvtmu.i64.f16"
8495        )]
8496        fn _vcvtmh_u64_f16(a: f16) -> u64;
8497    }
8498    unsafe { _vcvtmh_u64_f16(a) }
8499}
8500#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"]
8502#[inline(always)]
8503#[target_feature(enable = "neon")]
8504#[cfg_attr(test, assert_instr(fcvtms))]
8505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8506pub fn vcvtms_s32_f32(a: f32) -> i32 {
8507    unsafe extern "unadjusted" {
8508        #[cfg_attr(
8509            any(target_arch = "aarch64", target_arch = "arm64ec"),
8510            link_name = "llvm.aarch64.neon.fcvtms.i32.f32"
8511        )]
8512        fn _vcvtms_s32_f32(a: f32) -> i32;
8513    }
8514    unsafe { _vcvtms_s32_f32(a) }
8515}
8516#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"]
8518#[inline(always)]
8519#[target_feature(enable = "neon")]
8520#[cfg_attr(test, assert_instr(fcvtms))]
8521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8522pub fn vcvtmd_s64_f64(a: f64) -> i64 {
8523    unsafe extern "unadjusted" {
8524        #[cfg_attr(
8525            any(target_arch = "aarch64", target_arch = "arm64ec"),
8526            link_name = "llvm.aarch64.neon.fcvtms.i64.f64"
8527        )]
8528        fn _vcvtmd_s64_f64(a: f64) -> i64;
8529    }
8530    unsafe { _vcvtmd_s64_f64(a) }
8531}
8532#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"]
8534#[inline(always)]
8535#[target_feature(enable = "neon")]
8536#[cfg_attr(test, assert_instr(fcvtmu))]
8537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8538pub fn vcvtms_u32_f32(a: f32) -> u32 {
8539    unsafe extern "unadjusted" {
8540        #[cfg_attr(
8541            any(target_arch = "aarch64", target_arch = "arm64ec"),
8542            link_name = "llvm.aarch64.neon.fcvtmu.i32.f32"
8543        )]
8544        fn _vcvtms_u32_f32(a: f32) -> u32;
8545    }
8546    unsafe { _vcvtms_u32_f32(a) }
8547}
8548#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"]
8550#[inline(always)]
8551#[target_feature(enable = "neon")]
8552#[cfg_attr(test, assert_instr(fcvtmu))]
8553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8554pub fn vcvtmd_u64_f64(a: f64) -> u64 {
8555    unsafe extern "unadjusted" {
8556        #[cfg_attr(
8557            any(target_arch = "aarch64", target_arch = "arm64ec"),
8558            link_name = "llvm.aarch64.neon.fcvtmu.i64.f64"
8559        )]
8560        fn _vcvtmd_u64_f64(a: f64) -> u64;
8561    }
8562    unsafe { _vcvtmd_u64_f64(a) }
8563}
8564#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s16_f16)"]
8566#[inline(always)]
8567#[cfg_attr(test, assert_instr(fcvtns))]
8568#[target_feature(enable = "neon,fp16")]
8569#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8570#[cfg(not(target_arch = "arm64ec"))]
8571pub fn vcvtn_s16_f16(a: float16x4_t) -> int16x4_t {
8572    unsafe extern "unadjusted" {
8573        #[cfg_attr(
8574            any(target_arch = "aarch64", target_arch = "arm64ec"),
8575            link_name = "llvm.aarch64.neon.fcvtns.v4i16.v4f16"
8576        )]
8577        fn _vcvtn_s16_f16(a: float16x4_t) -> int16x4_t;
8578    }
8579    unsafe { _vcvtn_s16_f16(a) }
8580}
8581#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s16_f16)"]
8583#[inline(always)]
8584#[cfg_attr(test, assert_instr(fcvtns))]
8585#[target_feature(enable = "neon,fp16")]
8586#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8587#[cfg(not(target_arch = "arm64ec"))]
8588pub fn vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t {
8589    unsafe extern "unadjusted" {
8590        #[cfg_attr(
8591            any(target_arch = "aarch64", target_arch = "arm64ec"),
8592            link_name = "llvm.aarch64.neon.fcvtns.v8i16.v8f16"
8593        )]
8594        fn _vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t;
8595    }
8596    unsafe { _vcvtnq_s16_f16(a) }
8597}
8598#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"]
8600#[inline(always)]
8601#[target_feature(enable = "neon")]
8602#[cfg_attr(test, assert_instr(fcvtns))]
8603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8604pub fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t {
8605    unsafe extern "unadjusted" {
8606        #[cfg_attr(
8607            any(target_arch = "aarch64", target_arch = "arm64ec"),
8608            link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32"
8609        )]
8610        fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t;
8611    }
8612    unsafe { _vcvtn_s32_f32(a) }
8613}
8614#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"]
8616#[inline(always)]
8617#[target_feature(enable = "neon")]
8618#[cfg_attr(test, assert_instr(fcvtns))]
8619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8620pub fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t {
8621    unsafe extern "unadjusted" {
8622        #[cfg_attr(
8623            any(target_arch = "aarch64", target_arch = "arm64ec"),
8624            link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32"
8625        )]
8626        fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t;
8627    }
8628    unsafe { _vcvtnq_s32_f32(a) }
8629}
8630#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"]
8632#[inline(always)]
8633#[target_feature(enable = "neon")]
8634#[cfg_attr(test, assert_instr(fcvtns))]
8635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8636pub fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t {
8637    unsafe extern "unadjusted" {
8638        #[cfg_attr(
8639            any(target_arch = "aarch64", target_arch = "arm64ec"),
8640            link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64"
8641        )]
8642        fn _vcvtn_s64_f64(a: float64x1_t) -> int64x1_t;
8643    }
8644    unsafe { _vcvtn_s64_f64(a) }
8645}
8646#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"]
8648#[inline(always)]
8649#[target_feature(enable = "neon")]
8650#[cfg_attr(test, assert_instr(fcvtns))]
8651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8652pub fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t {
8653    unsafe extern "unadjusted" {
8654        #[cfg_attr(
8655            any(target_arch = "aarch64", target_arch = "arm64ec"),
8656            link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64"
8657        )]
8658        fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t;
8659    }
8660    unsafe { _vcvtnq_s64_f64(a) }
8661}
8662#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u16_f16)"]
8664#[inline(always)]
8665#[cfg_attr(test, assert_instr(fcvtnu))]
8666#[target_feature(enable = "neon,fp16")]
8667#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8668#[cfg(not(target_arch = "arm64ec"))]
8669pub fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t {
8670    unsafe extern "unadjusted" {
8671        #[cfg_attr(
8672            any(target_arch = "aarch64", target_arch = "arm64ec"),
8673            link_name = "llvm.aarch64.neon.fcvtnu.v4i16.v4f16"
8674        )]
8675        fn _vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t;
8676    }
8677    unsafe { _vcvtn_u16_f16(a) }
8678}
8679#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u16_f16)"]
8681#[inline(always)]
8682#[cfg_attr(test, assert_instr(fcvtnu))]
8683#[target_feature(enable = "neon,fp16")]
8684#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8685#[cfg(not(target_arch = "arm64ec"))]
8686pub fn vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t {
8687    unsafe extern "unadjusted" {
8688        #[cfg_attr(
8689            any(target_arch = "aarch64", target_arch = "arm64ec"),
8690            link_name = "llvm.aarch64.neon.fcvtnu.v8i16.v8f16"
8691        )]
8692        fn _vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t;
8693    }
8694    unsafe { _vcvtnq_u16_f16(a) }
8695}
8696#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"]
8698#[inline(always)]
8699#[target_feature(enable = "neon")]
8700#[cfg_attr(test, assert_instr(fcvtnu))]
8701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8702pub fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t {
8703    unsafe extern "unadjusted" {
8704        #[cfg_attr(
8705            any(target_arch = "aarch64", target_arch = "arm64ec"),
8706            link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32"
8707        )]
8708        fn _vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t;
8709    }
8710    unsafe { _vcvtn_u32_f32(a) }
8711}
8712#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"]
8714#[inline(always)]
8715#[target_feature(enable = "neon")]
8716#[cfg_attr(test, assert_instr(fcvtnu))]
8717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8718pub fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t {
8719    unsafe extern "unadjusted" {
8720        #[cfg_attr(
8721            any(target_arch = "aarch64", target_arch = "arm64ec"),
8722            link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32"
8723        )]
8724        fn _vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t;
8725    }
8726    unsafe { _vcvtnq_u32_f32(a) }
8727}
8728#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"]
8730#[inline(always)]
8731#[target_feature(enable = "neon")]
8732#[cfg_attr(test, assert_instr(fcvtnu))]
8733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8734pub fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t {
8735    unsafe extern "unadjusted" {
8736        #[cfg_attr(
8737            any(target_arch = "aarch64", target_arch = "arm64ec"),
8738            link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64"
8739        )]
8740        fn _vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t;
8741    }
8742    unsafe { _vcvtn_u64_f64(a) }
8743}
8744#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"]
8746#[inline(always)]
8747#[target_feature(enable = "neon")]
8748#[cfg_attr(test, assert_instr(fcvtnu))]
8749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8750pub fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t {
8751    unsafe extern "unadjusted" {
8752        #[cfg_attr(
8753            any(target_arch = "aarch64", target_arch = "arm64ec"),
8754            link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64"
8755        )]
8756        fn _vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t;
8757    }
8758    unsafe { _vcvtnq_u64_f64(a) }
8759}
8760#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s16_f16)"]
8762#[inline(always)]
8763#[cfg_attr(test, assert_instr(fcvtns))]
8764#[target_feature(enable = "neon,fp16")]
8765#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8766#[cfg(not(target_arch = "arm64ec"))]
8767pub fn vcvtnh_s16_f16(a: f16) -> i16 {
8768    vcvtnh_s32_f16(a) as i16
8769}
8770#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s32_f16)"]
8772#[inline(always)]
8773#[cfg_attr(test, assert_instr(fcvtns))]
8774#[target_feature(enable = "neon,fp16")]
8775#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8776#[cfg(not(target_arch = "arm64ec"))]
8777pub fn vcvtnh_s32_f16(a: f16) -> i32 {
8778    unsafe extern "unadjusted" {
8779        #[cfg_attr(
8780            any(target_arch = "aarch64", target_arch = "arm64ec"),
8781            link_name = "llvm.aarch64.neon.fcvtns.i32.f16"
8782        )]
8783        fn _vcvtnh_s32_f16(a: f16) -> i32;
8784    }
8785    unsafe { _vcvtnh_s32_f16(a) }
8786}
8787#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s64_f16)"]
8789#[inline(always)]
8790#[cfg_attr(test, assert_instr(fcvtns))]
8791#[target_feature(enable = "neon,fp16")]
8792#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8793#[cfg(not(target_arch = "arm64ec"))]
8794pub fn vcvtnh_s64_f16(a: f16) -> i64 {
8795    unsafe extern "unadjusted" {
8796        #[cfg_attr(
8797            any(target_arch = "aarch64", target_arch = "arm64ec"),
8798            link_name = "llvm.aarch64.neon.fcvtns.i64.f16"
8799        )]
8800        fn _vcvtnh_s64_f16(a: f16) -> i64;
8801    }
8802    unsafe { _vcvtnh_s64_f16(a) }
8803}
8804#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u16_f16)"]
8806#[inline(always)]
8807#[cfg_attr(test, assert_instr(fcvtnu))]
8808#[target_feature(enable = "neon,fp16")]
8809#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8810#[cfg(not(target_arch = "arm64ec"))]
8811pub fn vcvtnh_u16_f16(a: f16) -> u16 {
8812    vcvtnh_u32_f16(a) as u16
8813}
8814#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u32_f16)"]
8816#[inline(always)]
8817#[cfg_attr(test, assert_instr(fcvtnu))]
8818#[target_feature(enable = "neon,fp16")]
8819#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8820#[cfg(not(target_arch = "arm64ec"))]
8821pub fn vcvtnh_u32_f16(a: f16) -> u32 {
8822    unsafe extern "unadjusted" {
8823        #[cfg_attr(
8824            any(target_arch = "aarch64", target_arch = "arm64ec"),
8825            link_name = "llvm.aarch64.neon.fcvtnu.i32.f16"
8826        )]
8827        fn _vcvtnh_u32_f16(a: f16) -> u32;
8828    }
8829    unsafe { _vcvtnh_u32_f16(a) }
8830}
8831#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u64_f16)"]
8833#[inline(always)]
8834#[cfg_attr(test, assert_instr(fcvtnu))]
8835#[target_feature(enable = "neon,fp16")]
8836#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8837#[cfg(not(target_arch = "arm64ec"))]
8838pub fn vcvtnh_u64_f16(a: f16) -> u64 {
8839    unsafe extern "unadjusted" {
8840        #[cfg_attr(
8841            any(target_arch = "aarch64", target_arch = "arm64ec"),
8842            link_name = "llvm.aarch64.neon.fcvtnu.i64.f16"
8843        )]
8844        fn _vcvtnh_u64_f16(a: f16) -> u64;
8845    }
8846    unsafe { _vcvtnh_u64_f16(a) }
8847}
8848#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"]
8850#[inline(always)]
8851#[target_feature(enable = "neon")]
8852#[cfg_attr(test, assert_instr(fcvtns))]
8853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8854pub fn vcvtns_s32_f32(a: f32) -> i32 {
8855    unsafe extern "unadjusted" {
8856        #[cfg_attr(
8857            any(target_arch = "aarch64", target_arch = "arm64ec"),
8858            link_name = "llvm.aarch64.neon.fcvtns.i32.f32"
8859        )]
8860        fn _vcvtns_s32_f32(a: f32) -> i32;
8861    }
8862    unsafe { _vcvtns_s32_f32(a) }
8863}
8864#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"]
8866#[inline(always)]
8867#[target_feature(enable = "neon")]
8868#[cfg_attr(test, assert_instr(fcvtns))]
8869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8870pub fn vcvtnd_s64_f64(a: f64) -> i64 {
8871    unsafe extern "unadjusted" {
8872        #[cfg_attr(
8873            any(target_arch = "aarch64", target_arch = "arm64ec"),
8874            link_name = "llvm.aarch64.neon.fcvtns.i64.f64"
8875        )]
8876        fn _vcvtnd_s64_f64(a: f64) -> i64;
8877    }
8878    unsafe { _vcvtnd_s64_f64(a) }
8879}
8880#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"]
8882#[inline(always)]
8883#[target_feature(enable = "neon")]
8884#[cfg_attr(test, assert_instr(fcvtnu))]
8885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8886pub fn vcvtns_u32_f32(a: f32) -> u32 {
8887    unsafe extern "unadjusted" {
8888        #[cfg_attr(
8889            any(target_arch = "aarch64", target_arch = "arm64ec"),
8890            link_name = "llvm.aarch64.neon.fcvtnu.i32.f32"
8891        )]
8892        fn _vcvtns_u32_f32(a: f32) -> u32;
8893    }
8894    unsafe { _vcvtns_u32_f32(a) }
8895}
8896#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"]
8898#[inline(always)]
8899#[target_feature(enable = "neon")]
8900#[cfg_attr(test, assert_instr(fcvtnu))]
8901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8902pub fn vcvtnd_u64_f64(a: f64) -> u64 {
8903    unsafe extern "unadjusted" {
8904        #[cfg_attr(
8905            any(target_arch = "aarch64", target_arch = "arm64ec"),
8906            link_name = "llvm.aarch64.neon.fcvtnu.i64.f64"
8907        )]
8908        fn _vcvtnd_u64_f64(a: f64) -> u64;
8909    }
8910    unsafe { _vcvtnd_u64_f64(a) }
8911}
8912#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s16_f16)"]
8914#[inline(always)]
8915#[cfg_attr(test, assert_instr(fcvtps))]
8916#[target_feature(enable = "neon,fp16")]
8917#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8918#[cfg(not(target_arch = "arm64ec"))]
8919pub fn vcvtp_s16_f16(a: float16x4_t) -> int16x4_t {
8920    unsafe extern "unadjusted" {
8921        #[cfg_attr(
8922            any(target_arch = "aarch64", target_arch = "arm64ec"),
8923            link_name = "llvm.aarch64.neon.fcvtps.v4i16.v4f16"
8924        )]
8925        fn _vcvtp_s16_f16(a: float16x4_t) -> int16x4_t;
8926    }
8927    unsafe { _vcvtp_s16_f16(a) }
8928}
8929#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s16_f16)"]
8931#[inline(always)]
8932#[cfg_attr(test, assert_instr(fcvtps))]
8933#[target_feature(enable = "neon,fp16")]
8934#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8935#[cfg(not(target_arch = "arm64ec"))]
8936pub fn vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t {
8937    unsafe extern "unadjusted" {
8938        #[cfg_attr(
8939            any(target_arch = "aarch64", target_arch = "arm64ec"),
8940            link_name = "llvm.aarch64.neon.fcvtps.v8i16.v8f16"
8941        )]
8942        fn _vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t;
8943    }
8944    unsafe { _vcvtpq_s16_f16(a) }
8945}
8946#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"]
8948#[inline(always)]
8949#[target_feature(enable = "neon")]
8950#[cfg_attr(test, assert_instr(fcvtps))]
8951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8952pub fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t {
8953    unsafe extern "unadjusted" {
8954        #[cfg_attr(
8955            any(target_arch = "aarch64", target_arch = "arm64ec"),
8956            link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32"
8957        )]
8958        fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t;
8959    }
8960    unsafe { _vcvtp_s32_f32(a) }
8961}
8962#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"]
8964#[inline(always)]
8965#[target_feature(enable = "neon")]
8966#[cfg_attr(test, assert_instr(fcvtps))]
8967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8968pub fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t {
8969    unsafe extern "unadjusted" {
8970        #[cfg_attr(
8971            any(target_arch = "aarch64", target_arch = "arm64ec"),
8972            link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32"
8973        )]
8974        fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t;
8975    }
8976    unsafe { _vcvtpq_s32_f32(a) }
8977}
8978#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"]
8980#[inline(always)]
8981#[target_feature(enable = "neon")]
8982#[cfg_attr(test, assert_instr(fcvtps))]
8983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8984pub fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t {
8985    unsafe extern "unadjusted" {
8986        #[cfg_attr(
8987            any(target_arch = "aarch64", target_arch = "arm64ec"),
8988            link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64"
8989        )]
8990        fn _vcvtp_s64_f64(a: float64x1_t) -> int64x1_t;
8991    }
8992    unsafe { _vcvtp_s64_f64(a) }
8993}
8994#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"]
8996#[inline(always)]
8997#[target_feature(enable = "neon")]
8998#[cfg_attr(test, assert_instr(fcvtps))]
8999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9000pub fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t {
9001    unsafe extern "unadjusted" {
9002        #[cfg_attr(
9003            any(target_arch = "aarch64", target_arch = "arm64ec"),
9004            link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64"
9005        )]
9006        fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t;
9007    }
9008    unsafe { _vcvtpq_s64_f64(a) }
9009}
9010#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u16_f16)"]
9012#[inline(always)]
9013#[cfg_attr(test, assert_instr(fcvtpu))]
9014#[target_feature(enable = "neon,fp16")]
9015#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9016#[cfg(not(target_arch = "arm64ec"))]
9017pub fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t {
9018    unsafe extern "unadjusted" {
9019        #[cfg_attr(
9020            any(target_arch = "aarch64", target_arch = "arm64ec"),
9021            link_name = "llvm.aarch64.neon.fcvtpu.v4i16.v4f16"
9022        )]
9023        fn _vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t;
9024    }
9025    unsafe { _vcvtp_u16_f16(a) }
9026}
9027#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u16_f16)"]
9029#[inline(always)]
9030#[cfg_attr(test, assert_instr(fcvtpu))]
9031#[target_feature(enable = "neon,fp16")]
9032#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9033#[cfg(not(target_arch = "arm64ec"))]
9034pub fn vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t {
9035    unsafe extern "unadjusted" {
9036        #[cfg_attr(
9037            any(target_arch = "aarch64", target_arch = "arm64ec"),
9038            link_name = "llvm.aarch64.neon.fcvtpu.v8i16.v8f16"
9039        )]
9040        fn _vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t;
9041    }
9042    unsafe { _vcvtpq_u16_f16(a) }
9043}
9044#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"]
9046#[inline(always)]
9047#[target_feature(enable = "neon")]
9048#[cfg_attr(test, assert_instr(fcvtpu))]
9049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9050pub fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t {
9051    unsafe extern "unadjusted" {
9052        #[cfg_attr(
9053            any(target_arch = "aarch64", target_arch = "arm64ec"),
9054            link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32"
9055        )]
9056        fn _vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t;
9057    }
9058    unsafe { _vcvtp_u32_f32(a) }
9059}
9060#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"]
9062#[inline(always)]
9063#[target_feature(enable = "neon")]
9064#[cfg_attr(test, assert_instr(fcvtpu))]
9065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9066pub fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t {
9067    unsafe extern "unadjusted" {
9068        #[cfg_attr(
9069            any(target_arch = "aarch64", target_arch = "arm64ec"),
9070            link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32"
9071        )]
9072        fn _vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t;
9073    }
9074    unsafe { _vcvtpq_u32_f32(a) }
9075}
9076#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"]
9078#[inline(always)]
9079#[target_feature(enable = "neon")]
9080#[cfg_attr(test, assert_instr(fcvtpu))]
9081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9082pub fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t {
9083    unsafe extern "unadjusted" {
9084        #[cfg_attr(
9085            any(target_arch = "aarch64", target_arch = "arm64ec"),
9086            link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64"
9087        )]
9088        fn _vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t;
9089    }
9090    unsafe { _vcvtp_u64_f64(a) }
9091}
9092#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"]
9094#[inline(always)]
9095#[target_feature(enable = "neon")]
9096#[cfg_attr(test, assert_instr(fcvtpu))]
9097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9098pub fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t {
9099    unsafe extern "unadjusted" {
9100        #[cfg_attr(
9101            any(target_arch = "aarch64", target_arch = "arm64ec"),
9102            link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64"
9103        )]
9104        fn _vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t;
9105    }
9106    unsafe { _vcvtpq_u64_f64(a) }
9107}
9108#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s16_f16)"]
9110#[inline(always)]
9111#[cfg_attr(test, assert_instr(fcvtps))]
9112#[target_feature(enable = "neon,fp16")]
9113#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9114#[cfg(not(target_arch = "arm64ec"))]
9115pub fn vcvtph_s16_f16(a: f16) -> i16 {
9116    vcvtph_s32_f16(a) as i16
9117}
9118#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s32_f16)"]
9120#[inline(always)]
9121#[cfg_attr(test, assert_instr(fcvtps))]
9122#[target_feature(enable = "neon,fp16")]
9123#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9124#[cfg(not(target_arch = "arm64ec"))]
9125pub fn vcvtph_s32_f16(a: f16) -> i32 {
9126    unsafe extern "unadjusted" {
9127        #[cfg_attr(
9128            any(target_arch = "aarch64", target_arch = "arm64ec"),
9129            link_name = "llvm.aarch64.neon.fcvtps.i32.f16"
9130        )]
9131        fn _vcvtph_s32_f16(a: f16) -> i32;
9132    }
9133    unsafe { _vcvtph_s32_f16(a) }
9134}
9135#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s64_f16)"]
9137#[inline(always)]
9138#[cfg_attr(test, assert_instr(fcvtps))]
9139#[target_feature(enable = "neon,fp16")]
9140#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9141#[cfg(not(target_arch = "arm64ec"))]
9142pub fn vcvtph_s64_f16(a: f16) -> i64 {
9143    unsafe extern "unadjusted" {
9144        #[cfg_attr(
9145            any(target_arch = "aarch64", target_arch = "arm64ec"),
9146            link_name = "llvm.aarch64.neon.fcvtps.i64.f16"
9147        )]
9148        fn _vcvtph_s64_f16(a: f16) -> i64;
9149    }
9150    unsafe { _vcvtph_s64_f16(a) }
9151}
9152#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u16_f16)"]
9154#[inline(always)]
9155#[cfg_attr(test, assert_instr(fcvtpu))]
9156#[target_feature(enable = "neon,fp16")]
9157#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9158#[cfg(not(target_arch = "arm64ec"))]
9159pub fn vcvtph_u16_f16(a: f16) -> u16 {
9160    vcvtph_u32_f16(a) as u16
9161}
9162#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9163#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u32_f16)"]
9164#[inline(always)]
9165#[cfg_attr(test, assert_instr(fcvtpu))]
9166#[target_feature(enable = "neon,fp16")]
9167#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9168#[cfg(not(target_arch = "arm64ec"))]
9169pub fn vcvtph_u32_f16(a: f16) -> u32 {
9170    unsafe extern "unadjusted" {
9171        #[cfg_attr(
9172            any(target_arch = "aarch64", target_arch = "arm64ec"),
9173            link_name = "llvm.aarch64.neon.fcvtpu.i32.f16"
9174        )]
9175        fn _vcvtph_u32_f16(a: f16) -> u32;
9176    }
9177    unsafe { _vcvtph_u32_f16(a) }
9178}
9179#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u64_f16)"]
9181#[inline(always)]
9182#[cfg_attr(test, assert_instr(fcvtpu))]
9183#[target_feature(enable = "neon,fp16")]
9184#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9185#[cfg(not(target_arch = "arm64ec"))]
9186pub fn vcvtph_u64_f16(a: f16) -> u64 {
9187    unsafe extern "unadjusted" {
9188        #[cfg_attr(
9189            any(target_arch = "aarch64", target_arch = "arm64ec"),
9190            link_name = "llvm.aarch64.neon.fcvtpu.i64.f16"
9191        )]
9192        fn _vcvtph_u64_f16(a: f16) -> u64;
9193    }
9194    unsafe { _vcvtph_u64_f16(a) }
9195}
9196#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"]
9198#[inline(always)]
9199#[target_feature(enable = "neon")]
9200#[cfg_attr(test, assert_instr(fcvtps))]
9201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9202pub fn vcvtps_s32_f32(a: f32) -> i32 {
9203    unsafe extern "unadjusted" {
9204        #[cfg_attr(
9205            any(target_arch = "aarch64", target_arch = "arm64ec"),
9206            link_name = "llvm.aarch64.neon.fcvtps.i32.f32"
9207        )]
9208        fn _vcvtps_s32_f32(a: f32) -> i32;
9209    }
9210    unsafe { _vcvtps_s32_f32(a) }
9211}
9212#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"]
9214#[inline(always)]
9215#[target_feature(enable = "neon")]
9216#[cfg_attr(test, assert_instr(fcvtps))]
9217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9218pub fn vcvtpd_s64_f64(a: f64) -> i64 {
9219    unsafe extern "unadjusted" {
9220        #[cfg_attr(
9221            any(target_arch = "aarch64", target_arch = "arm64ec"),
9222            link_name = "llvm.aarch64.neon.fcvtps.i64.f64"
9223        )]
9224        fn _vcvtpd_s64_f64(a: f64) -> i64;
9225    }
9226    unsafe { _vcvtpd_s64_f64(a) }
9227}
9228#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"]
9230#[inline(always)]
9231#[target_feature(enable = "neon")]
9232#[cfg_attr(test, assert_instr(fcvtpu))]
9233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9234pub fn vcvtps_u32_f32(a: f32) -> u32 {
9235    unsafe extern "unadjusted" {
9236        #[cfg_attr(
9237            any(target_arch = "aarch64", target_arch = "arm64ec"),
9238            link_name = "llvm.aarch64.neon.fcvtpu.i32.f32"
9239        )]
9240        fn _vcvtps_u32_f32(a: f32) -> u32;
9241    }
9242    unsafe { _vcvtps_u32_f32(a) }
9243}
9244#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"]
9246#[inline(always)]
9247#[target_feature(enable = "neon")]
9248#[cfg_attr(test, assert_instr(fcvtpu))]
9249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9250pub fn vcvtpd_u64_f64(a: f64) -> u64 {
9251    unsafe extern "unadjusted" {
9252        #[cfg_attr(
9253            any(target_arch = "aarch64", target_arch = "arm64ec"),
9254            link_name = "llvm.aarch64.neon.fcvtpu.i64.f64"
9255        )]
9256        fn _vcvtpd_u64_f64(a: f64) -> u64;
9257    }
9258    unsafe { _vcvtpd_u64_f64(a) }
9259}
9260#[doc = "Fixed-point convert to floating-point"]
9261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"]
9262#[inline(always)]
9263#[target_feature(enable = "neon")]
9264#[cfg_attr(test, assert_instr(ucvtf))]
9265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9266pub fn vcvts_f32_u32(a: u32) -> f32 {
9267    a as f32
9268}
9269#[doc = "Fixed-point convert to floating-point"]
9270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"]
9271#[inline(always)]
9272#[target_feature(enable = "neon")]
9273#[cfg_attr(test, assert_instr(ucvtf))]
9274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9275pub fn vcvtd_f64_u64(a: u64) -> f64 {
9276    a as f64
9277}
9278#[doc = "Fixed-point convert to floating-point"]
9279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"]
9280#[inline(always)]
9281#[target_feature(enable = "neon")]
9282#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9283#[rustc_legacy_const_generics(1)]
9284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9285pub fn vcvts_n_f32_s32<const N: i32>(a: i32) -> f32 {
9286    static_assert!(N >= 1 && N <= 64);
9287    unsafe extern "unadjusted" {
9288        #[cfg_attr(
9289            any(target_arch = "aarch64", target_arch = "arm64ec"),
9290            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32"
9291        )]
9292        fn _vcvts_n_f32_s32(a: i32, n: i32) -> f32;
9293    }
9294    unsafe { _vcvts_n_f32_s32(a, N) }
9295}
9296#[doc = "Fixed-point convert to floating-point"]
9297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"]
9298#[inline(always)]
9299#[target_feature(enable = "neon")]
9300#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9301#[rustc_legacy_const_generics(1)]
9302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9303pub fn vcvtd_n_f64_s64<const N: i32>(a: i64) -> f64 {
9304    static_assert!(N >= 1 && N <= 64);
9305    unsafe extern "unadjusted" {
9306        #[cfg_attr(
9307            any(target_arch = "aarch64", target_arch = "arm64ec"),
9308            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64"
9309        )]
9310        fn _vcvtd_n_f64_s64(a: i64, n: i32) -> f64;
9311    }
9312    unsafe { _vcvtd_n_f64_s64(a, N) }
9313}
9314#[doc = "Fixed-point convert to floating-point"]
9315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"]
9316#[inline(always)]
9317#[target_feature(enable = "neon")]
9318#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9319#[rustc_legacy_const_generics(1)]
9320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9321pub fn vcvts_n_f32_u32<const N: i32>(a: u32) -> f32 {
9322    static_assert!(N >= 1 && N <= 32);
9323    unsafe extern "unadjusted" {
9324        #[cfg_attr(
9325            any(target_arch = "aarch64", target_arch = "arm64ec"),
9326            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32"
9327        )]
9328        fn _vcvts_n_f32_u32(a: u32, n: i32) -> f32;
9329    }
9330    unsafe { _vcvts_n_f32_u32(a, N) }
9331}
9332#[doc = "Fixed-point convert to floating-point"]
9333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"]
9334#[inline(always)]
9335#[target_feature(enable = "neon")]
9336#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9337#[rustc_legacy_const_generics(1)]
9338#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9339pub fn vcvtd_n_f64_u64<const N: i32>(a: u64) -> f64 {
9340    static_assert!(N >= 1 && N <= 64);
9341    unsafe extern "unadjusted" {
9342        #[cfg_attr(
9343            any(target_arch = "aarch64", target_arch = "arm64ec"),
9344            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64"
9345        )]
9346        fn _vcvtd_n_f64_u64(a: u64, n: i32) -> f64;
9347    }
9348    unsafe { _vcvtd_n_f64_u64(a, N) }
9349}
9350#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"]
9352#[inline(always)]
9353#[target_feature(enable = "neon")]
9354#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9355#[rustc_legacy_const_generics(1)]
9356#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9357pub fn vcvts_n_s32_f32<const N: i32>(a: f32) -> i32 {
9358    static_assert!(N >= 1 && N <= 32);
9359    unsafe extern "unadjusted" {
9360        #[cfg_attr(
9361            any(target_arch = "aarch64", target_arch = "arm64ec"),
9362            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32"
9363        )]
9364        fn _vcvts_n_s32_f32(a: f32, n: i32) -> i32;
9365    }
9366    unsafe { _vcvts_n_s32_f32(a, N) }
9367}
9368#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"]
9370#[inline(always)]
9371#[target_feature(enable = "neon")]
9372#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9373#[rustc_legacy_const_generics(1)]
9374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9375pub fn vcvtd_n_s64_f64<const N: i32>(a: f64) -> i64 {
9376    static_assert!(N >= 1 && N <= 64);
9377    unsafe extern "unadjusted" {
9378        #[cfg_attr(
9379            any(target_arch = "aarch64", target_arch = "arm64ec"),
9380            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64"
9381        )]
9382        fn _vcvtd_n_s64_f64(a: f64, n: i32) -> i64;
9383    }
9384    unsafe { _vcvtd_n_s64_f64(a, N) }
9385}
9386#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"]
9388#[inline(always)]
9389#[target_feature(enable = "neon")]
9390#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9391#[rustc_legacy_const_generics(1)]
9392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9393pub fn vcvts_n_u32_f32<const N: i32>(a: f32) -> u32 {
9394    static_assert!(N >= 1 && N <= 32);
9395    unsafe extern "unadjusted" {
9396        #[cfg_attr(
9397            any(target_arch = "aarch64", target_arch = "arm64ec"),
9398            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32"
9399        )]
9400        fn _vcvts_n_u32_f32(a: f32, n: i32) -> u32;
9401    }
9402    unsafe { _vcvts_n_u32_f32(a, N) }
9403}
9404#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"]
9406#[inline(always)]
9407#[target_feature(enable = "neon")]
9408#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9409#[rustc_legacy_const_generics(1)]
9410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9411pub fn vcvtd_n_u64_f64<const N: i32>(a: f64) -> u64 {
9412    static_assert!(N >= 1 && N <= 64);
9413    unsafe extern "unadjusted" {
9414        #[cfg_attr(
9415            any(target_arch = "aarch64", target_arch = "arm64ec"),
9416            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64"
9417        )]
9418        fn _vcvtd_n_u64_f64(a: f64, n: i32) -> u64;
9419    }
9420    unsafe { _vcvtd_n_u64_f64(a, N) }
9421}
9422#[doc = "Fixed-point convert to floating-point"]
9423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"]
9424#[inline(always)]
9425#[target_feature(enable = "neon")]
9426#[cfg_attr(test, assert_instr(fcvtzs))]
9427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9428pub fn vcvts_s32_f32(a: f32) -> i32 {
9429    a as i32
9430}
9431#[doc = "Fixed-point convert to floating-point"]
9432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"]
9433#[inline(always)]
9434#[target_feature(enable = "neon")]
9435#[cfg_attr(test, assert_instr(fcvtzs))]
9436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9437pub fn vcvtd_s64_f64(a: f64) -> i64 {
9438    a as i64
9439}
9440#[doc = "Fixed-point convert to floating-point"]
9441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"]
9442#[inline(always)]
9443#[target_feature(enable = "neon")]
9444#[cfg_attr(test, assert_instr(fcvtzu))]
9445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9446pub fn vcvts_u32_f32(a: f32) -> u32 {
9447    a as u32
9448}
9449#[doc = "Fixed-point convert to floating-point"]
9450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"]
9451#[inline(always)]
9452#[target_feature(enable = "neon")]
9453#[cfg_attr(test, assert_instr(fcvtzu))]
9454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9455pub fn vcvtd_u64_f64(a: f64) -> u64 {
9456    a as u64
9457}
9458#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"]
9460#[inline(always)]
9461#[target_feature(enable = "neon")]
9462#[cfg_attr(test, assert_instr(fcvtxn))]
9463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9464pub fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t {
9465    unsafe extern "unadjusted" {
9466        #[cfg_attr(
9467            any(target_arch = "aarch64", target_arch = "arm64ec"),
9468            link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64"
9469        )]
9470        fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t;
9471    }
9472    unsafe { _vcvtx_f32_f64(a) }
9473}
9474#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"]
9476#[inline(always)]
9477#[target_feature(enable = "neon")]
9478#[cfg_attr(test, assert_instr(fcvtxn2))]
9479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9480pub fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
9481    unsafe { simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) }
9482}
9483#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"]
9485#[inline(always)]
9486#[target_feature(enable = "neon")]
9487#[cfg_attr(test, assert_instr(fcvtxn))]
9488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9489pub fn vcvtxd_f32_f64(a: f64) -> f32 {
9490    unsafe { simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0) }
9491}
9492#[doc = "Divide"]
9493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f16)"]
9494#[inline(always)]
9495#[target_feature(enable = "neon,fp16")]
9496#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9497#[cfg(not(target_arch = "arm64ec"))]
9498#[cfg_attr(test, assert_instr(fdiv))]
9499pub fn vdiv_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
9500    unsafe { simd_div(a, b) }
9501}
9502#[doc = "Divide"]
9503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f16)"]
9504#[inline(always)]
9505#[target_feature(enable = "neon,fp16")]
9506#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9507#[cfg(not(target_arch = "arm64ec"))]
9508#[cfg_attr(test, assert_instr(fdiv))]
9509pub fn vdivq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
9510    unsafe { simd_div(a, b) }
9511}
9512#[doc = "Divide"]
9513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"]
9514#[inline(always)]
9515#[target_feature(enable = "neon")]
9516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9517#[cfg_attr(test, assert_instr(fdiv))]
9518pub fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
9519    unsafe { simd_div(a, b) }
9520}
9521#[doc = "Divide"]
9522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"]
9523#[inline(always)]
9524#[target_feature(enable = "neon")]
9525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9526#[cfg_attr(test, assert_instr(fdiv))]
9527pub fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
9528    unsafe { simd_div(a, b) }
9529}
9530#[doc = "Divide"]
9531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"]
9532#[inline(always)]
9533#[target_feature(enable = "neon")]
9534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9535#[cfg_attr(test, assert_instr(fdiv))]
9536pub fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
9537    unsafe { simd_div(a, b) }
9538}
9539#[doc = "Divide"]
9540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"]
9541#[inline(always)]
9542#[target_feature(enable = "neon")]
9543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9544#[cfg_attr(test, assert_instr(fdiv))]
9545pub fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9546    unsafe { simd_div(a, b) }
9547}
9548#[doc = "Divide"]
9549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivh_f16)"]
9550#[inline(always)]
9551#[target_feature(enable = "neon,fp16")]
9552#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9553#[cfg(not(target_arch = "arm64ec"))]
9554#[cfg_attr(test, assert_instr(fdiv))]
9555pub fn vdivh_f16(a: f16, b: f16) -> f16 {
9556    a / b
9557}
9558#[doc = "Set all vector lanes to the same value"]
9559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"]
9560#[inline(always)]
9561#[target_feature(enable = "neon")]
9562#[cfg_attr(test, assert_instr(nop, N = 0))]
9563#[rustc_legacy_const_generics(1)]
9564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9565pub fn vdup_lane_f64<const N: i32>(a: float64x1_t) -> float64x1_t {
9566    static_assert!(N == 0);
9567    a
9568}
9569#[doc = "Set all vector lanes to the same value"]
9570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"]
9571#[inline(always)]
9572#[target_feature(enable = "neon")]
9573#[cfg_attr(test, assert_instr(nop, N = 0))]
9574#[rustc_legacy_const_generics(1)]
9575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9576pub fn vdup_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x1_t {
9577    static_assert!(N == 0);
9578    a
9579}
9580#[doc = "Set all vector lanes to the same value"]
9581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"]
9582#[inline(always)]
9583#[target_feature(enable = "neon")]
9584#[cfg_attr(test, assert_instr(nop, N = 1))]
9585#[rustc_legacy_const_generics(1)]
9586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9587pub fn vdup_laneq_f64<const N: i32>(a: float64x2_t) -> float64x1_t {
9588    static_assert_uimm_bits!(N, 1);
9589    unsafe { transmute::<f64, _>(simd_extract!(a, N as u32)) }
9590}
9591#[doc = "Set all vector lanes to the same value"]
9592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"]
9593#[inline(always)]
9594#[target_feature(enable = "neon")]
9595#[cfg_attr(test, assert_instr(nop, N = 1))]
9596#[rustc_legacy_const_generics(1)]
9597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9598pub fn vdup_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x1_t {
9599    static_assert_uimm_bits!(N, 1);
9600    unsafe { transmute::<u64, _>(simd_extract!(a, N as u32)) }
9601}
9602#[doc = "Set all vector lanes to the same value"]
9603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"]
9604#[inline(always)]
9605#[target_feature(enable = "neon")]
9606#[cfg_attr(test, assert_instr(nop, N = 4))]
9607#[rustc_legacy_const_generics(1)]
9608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9609pub fn vdupb_lane_s8<const N: i32>(a: int8x8_t) -> i8 {
9610    static_assert_uimm_bits!(N, 3);
9611    unsafe { simd_extract!(a, N as u32) }
9612}
9613#[doc = "Set all vector lanes to the same value"]
9614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"]
9615#[inline(always)]
9616#[target_feature(enable = "neon")]
9617#[cfg_attr(test, assert_instr(nop, N = 4))]
9618#[rustc_legacy_const_generics(1)]
9619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9620pub fn vduph_laneq_s16<const N: i32>(a: int16x8_t) -> i16 {
9621    static_assert_uimm_bits!(N, 3);
9622    unsafe { simd_extract!(a, N as u32) }
9623}
9624#[doc = "Set all vector lanes to the same value"]
9625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"]
9626#[inline(always)]
9627#[target_feature(enable = "neon")]
9628#[cfg_attr(test, assert_instr(nop, N = 4))]
9629#[rustc_legacy_const_generics(1)]
9630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9631pub fn vdupb_lane_u8<const N: i32>(a: uint8x8_t) -> u8 {
9632    static_assert_uimm_bits!(N, 3);
9633    unsafe { simd_extract!(a, N as u32) }
9634}
9635#[doc = "Set all vector lanes to the same value"]
9636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"]
9637#[inline(always)]
9638#[target_feature(enable = "neon")]
9639#[cfg_attr(test, assert_instr(nop, N = 4))]
9640#[rustc_legacy_const_generics(1)]
9641#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9642pub fn vduph_laneq_u16<const N: i32>(a: uint16x8_t) -> u16 {
9643    static_assert_uimm_bits!(N, 3);
9644    unsafe { simd_extract!(a, N as u32) }
9645}
9646#[doc = "Set all vector lanes to the same value"]
9647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"]
9648#[inline(always)]
9649#[target_feature(enable = "neon")]
9650#[cfg_attr(test, assert_instr(nop, N = 4))]
9651#[rustc_legacy_const_generics(1)]
9652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9653pub fn vdupb_lane_p8<const N: i32>(a: poly8x8_t) -> p8 {
9654    static_assert_uimm_bits!(N, 3);
9655    unsafe { simd_extract!(a, N as u32) }
9656}
9657#[doc = "Set all vector lanes to the same value"]
9658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"]
9659#[inline(always)]
9660#[target_feature(enable = "neon")]
9661#[cfg_attr(test, assert_instr(nop, N = 4))]
9662#[rustc_legacy_const_generics(1)]
9663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9664pub fn vduph_laneq_p16<const N: i32>(a: poly16x8_t) -> p16 {
9665    static_assert_uimm_bits!(N, 3);
9666    unsafe { simd_extract!(a, N as u32) }
9667}
9668#[doc = "Extract an element from a vector"]
9669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"]
9670#[inline(always)]
9671#[target_feature(enable = "neon")]
9672#[cfg_attr(test, assert_instr(nop, N = 8))]
9673#[rustc_legacy_const_generics(1)]
9674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9675pub fn vdupb_laneq_s8<const N: i32>(a: int8x16_t) -> i8 {
9676    static_assert_uimm_bits!(N, 4);
9677    unsafe { simd_extract!(a, N as u32) }
9678}
9679#[doc = "Extract an element from a vector"]
9680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"]
9681#[inline(always)]
9682#[target_feature(enable = "neon")]
9683#[cfg_attr(test, assert_instr(nop, N = 8))]
9684#[rustc_legacy_const_generics(1)]
9685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9686pub fn vdupb_laneq_u8<const N: i32>(a: uint8x16_t) -> u8 {
9687    static_assert_uimm_bits!(N, 4);
9688    unsafe { simd_extract!(a, N as u32) }
9689}
9690#[doc = "Extract an element from a vector"]
9691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"]
9692#[inline(always)]
9693#[target_feature(enable = "neon")]
9694#[cfg_attr(test, assert_instr(nop, N = 8))]
9695#[rustc_legacy_const_generics(1)]
9696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9697pub fn vdupb_laneq_p8<const N: i32>(a: poly8x16_t) -> p8 {
9698    static_assert_uimm_bits!(N, 4);
9699    unsafe { simd_extract!(a, N as u32) }
9700}
9701#[doc = "Set all vector lanes to the same value"]
9702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"]
9703#[inline(always)]
9704#[target_feature(enable = "neon")]
9705#[cfg_attr(test, assert_instr(nop, N = 0))]
9706#[rustc_legacy_const_generics(1)]
9707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9708pub fn vdupd_lane_f64<const N: i32>(a: float64x1_t) -> f64 {
9709    static_assert!(N == 0);
9710    unsafe { simd_extract!(a, N as u32) }
9711}
9712#[doc = "Set all vector lanes to the same value"]
9713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"]
9714#[inline(always)]
9715#[target_feature(enable = "neon")]
9716#[cfg_attr(test, assert_instr(nop, N = 0))]
9717#[rustc_legacy_const_generics(1)]
9718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9719pub fn vdupd_lane_s64<const N: i32>(a: int64x1_t) -> i64 {
9720    static_assert!(N == 0);
9721    unsafe { simd_extract!(a, N as u32) }
9722}
9723#[doc = "Set all vector lanes to the same value"]
9724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"]
9725#[inline(always)]
9726#[target_feature(enable = "neon")]
9727#[cfg_attr(test, assert_instr(nop, N = 0))]
9728#[rustc_legacy_const_generics(1)]
9729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9730pub fn vdupd_lane_u64<const N: i32>(a: uint64x1_t) -> u64 {
9731    static_assert!(N == 0);
9732    unsafe { simd_extract!(a, N as u32) }
9733}
9734#[doc = "Set all vector lanes to the same value"]
9735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_f16)"]
9736#[inline(always)]
9737#[cfg_attr(test, assert_instr(nop, N = 2))]
9738#[rustc_legacy_const_generics(1)]
9739#[target_feature(enable = "neon,fp16")]
9740#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9741#[cfg(not(target_arch = "arm64ec"))]
9742pub fn vduph_lane_f16<const N: i32>(a: float16x4_t) -> f16 {
9743    static_assert_uimm_bits!(N, 2);
9744    unsafe { simd_extract!(a, N as u32) }
9745}
9746#[doc = "Extract an element from a vector"]
9747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_f16)"]
9748#[inline(always)]
9749#[cfg_attr(test, assert_instr(nop, N = 4))]
9750#[rustc_legacy_const_generics(1)]
9751#[target_feature(enable = "neon,fp16")]
9752#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9753#[cfg(not(target_arch = "arm64ec"))]
9754pub fn vduph_laneq_f16<const N: i32>(a: float16x8_t) -> f16 {
9755    static_assert_uimm_bits!(N, 4);
9756    unsafe { simd_extract!(a, N as u32) }
9757}
9758#[doc = "Set all vector lanes to the same value"]
9759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"]
9760#[inline(always)]
9761#[target_feature(enable = "neon")]
9762#[cfg_attr(test, assert_instr(dup, N = 0))]
9763#[rustc_legacy_const_generics(1)]
9764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9765pub fn vdupq_lane_f64<const N: i32>(a: float64x1_t) -> float64x2_t {
9766    static_assert!(N == 0);
9767    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9768}
9769#[doc = "Set all vector lanes to the same value"]
9770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"]
9771#[inline(always)]
9772#[target_feature(enable = "neon")]
9773#[cfg_attr(test, assert_instr(dup, N = 0))]
9774#[rustc_legacy_const_generics(1)]
9775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9776pub fn vdupq_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x2_t {
9777    static_assert!(N == 0);
9778    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9779}
9780#[doc = "Set all vector lanes to the same value"]
9781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"]
9782#[inline(always)]
9783#[target_feature(enable = "neon")]
9784#[cfg_attr(test, assert_instr(dup, N = 1))]
9785#[rustc_legacy_const_generics(1)]
9786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9787pub fn vdupq_laneq_f64<const N: i32>(a: float64x2_t) -> float64x2_t {
9788    static_assert_uimm_bits!(N, 1);
9789    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9790}
9791#[doc = "Set all vector lanes to the same value"]
9792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"]
9793#[inline(always)]
9794#[target_feature(enable = "neon")]
9795#[cfg_attr(test, assert_instr(dup, N = 1))]
9796#[rustc_legacy_const_generics(1)]
9797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9798pub fn vdupq_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x2_t {
9799    static_assert_uimm_bits!(N, 1);
9800    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9801}
9802#[doc = "Set all vector lanes to the same value"]
9803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"]
9804#[inline(always)]
9805#[target_feature(enable = "neon")]
9806#[cfg_attr(test, assert_instr(nop, N = 1))]
9807#[rustc_legacy_const_generics(1)]
9808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9809pub fn vdups_lane_f32<const N: i32>(a: float32x2_t) -> f32 {
9810    static_assert_uimm_bits!(N, 1);
9811    unsafe { simd_extract!(a, N as u32) }
9812}
9813#[doc = "Set all vector lanes to the same value"]
9814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"]
9815#[inline(always)]
9816#[target_feature(enable = "neon")]
9817#[cfg_attr(test, assert_instr(nop, N = 1))]
9818#[rustc_legacy_const_generics(1)]
9819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9820pub fn vdupd_laneq_f64<const N: i32>(a: float64x2_t) -> f64 {
9821    static_assert_uimm_bits!(N, 1);
9822    unsafe { simd_extract!(a, N as u32) }
9823}
9824#[doc = "Set all vector lanes to the same value"]
9825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"]
9826#[inline(always)]
9827#[target_feature(enable = "neon")]
9828#[cfg_attr(test, assert_instr(nop, N = 1))]
9829#[rustc_legacy_const_generics(1)]
9830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9831pub fn vdups_lane_s32<const N: i32>(a: int32x2_t) -> i32 {
9832    static_assert_uimm_bits!(N, 1);
9833    unsafe { simd_extract!(a, N as u32) }
9834}
9835#[doc = "Set all vector lanes to the same value"]
9836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"]
9837#[inline(always)]
9838#[target_feature(enable = "neon")]
9839#[cfg_attr(test, assert_instr(nop, N = 1))]
9840#[rustc_legacy_const_generics(1)]
9841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9842pub fn vdupd_laneq_s64<const N: i32>(a: int64x2_t) -> i64 {
9843    static_assert_uimm_bits!(N, 1);
9844    unsafe { simd_extract!(a, N as u32) }
9845}
9846#[doc = "Set all vector lanes to the same value"]
9847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"]
9848#[inline(always)]
9849#[target_feature(enable = "neon")]
9850#[cfg_attr(test, assert_instr(nop, N = 1))]
9851#[rustc_legacy_const_generics(1)]
9852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9853pub fn vdups_lane_u32<const N: i32>(a: uint32x2_t) -> u32 {
9854    static_assert_uimm_bits!(N, 1);
9855    unsafe { simd_extract!(a, N as u32) }
9856}
9857#[doc = "Set all vector lanes to the same value"]
9858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"]
9859#[inline(always)]
9860#[target_feature(enable = "neon")]
9861#[cfg_attr(test, assert_instr(nop, N = 1))]
9862#[rustc_legacy_const_generics(1)]
9863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9864pub fn vdupd_laneq_u64<const N: i32>(a: uint64x2_t) -> u64 {
9865    static_assert_uimm_bits!(N, 1);
9866    unsafe { simd_extract!(a, N as u32) }
9867}
9868#[doc = "Set all vector lanes to the same value"]
9869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"]
9870#[inline(always)]
9871#[target_feature(enable = "neon")]
9872#[cfg_attr(test, assert_instr(nop, N = 2))]
9873#[rustc_legacy_const_generics(1)]
9874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9875pub fn vdups_laneq_f32<const N: i32>(a: float32x4_t) -> f32 {
9876    static_assert_uimm_bits!(N, 2);
9877    unsafe { simd_extract!(a, N as u32) }
9878}
9879#[doc = "Set all vector lanes to the same value"]
9880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"]
9881#[inline(always)]
9882#[target_feature(enable = "neon")]
9883#[cfg_attr(test, assert_instr(nop, N = 2))]
9884#[rustc_legacy_const_generics(1)]
9885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9886pub fn vduph_lane_s16<const N: i32>(a: int16x4_t) -> i16 {
9887    static_assert_uimm_bits!(N, 2);
9888    unsafe { simd_extract!(a, N as u32) }
9889}
9890#[doc = "Set all vector lanes to the same value"]
9891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"]
9892#[inline(always)]
9893#[target_feature(enable = "neon")]
9894#[cfg_attr(test, assert_instr(nop, N = 2))]
9895#[rustc_legacy_const_generics(1)]
9896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9897pub fn vdups_laneq_s32<const N: i32>(a: int32x4_t) -> i32 {
9898    static_assert_uimm_bits!(N, 2);
9899    unsafe { simd_extract!(a, N as u32) }
9900}
9901#[doc = "Set all vector lanes to the same value"]
9902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"]
9903#[inline(always)]
9904#[target_feature(enable = "neon")]
9905#[cfg_attr(test, assert_instr(nop, N = 2))]
9906#[rustc_legacy_const_generics(1)]
9907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9908pub fn vduph_lane_u16<const N: i32>(a: uint16x4_t) -> u16 {
9909    static_assert_uimm_bits!(N, 2);
9910    unsafe { simd_extract!(a, N as u32) }
9911}
9912#[doc = "Set all vector lanes to the same value"]
9913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"]
9914#[inline(always)]
9915#[target_feature(enable = "neon")]
9916#[cfg_attr(test, assert_instr(nop, N = 2))]
9917#[rustc_legacy_const_generics(1)]
9918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9919pub fn vdups_laneq_u32<const N: i32>(a: uint32x4_t) -> u32 {
9920    static_assert_uimm_bits!(N, 2);
9921    unsafe { simd_extract!(a, N as u32) }
9922}
9923#[doc = "Set all vector lanes to the same value"]
9924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"]
9925#[inline(always)]
9926#[target_feature(enable = "neon")]
9927#[cfg_attr(test, assert_instr(nop, N = 2))]
9928#[rustc_legacy_const_generics(1)]
9929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9930pub fn vduph_lane_p16<const N: i32>(a: poly16x4_t) -> p16 {
9931    static_assert_uimm_bits!(N, 2);
9932    unsafe { simd_extract!(a, N as u32) }
9933}
9934#[doc = "Three-way exclusive OR"]
9935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"]
9936#[inline(always)]
9937#[target_feature(enable = "neon,sha3")]
9938#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9939#[cfg_attr(test, assert_instr(eor3))]
9940pub fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
9941    unsafe extern "unadjusted" {
9942        #[cfg_attr(
9943            any(target_arch = "aarch64", target_arch = "arm64ec"),
9944            link_name = "llvm.aarch64.crypto.eor3s.v16i8"
9945        )]
9946        fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
9947    }
9948    unsafe { _veor3q_s8(a, b, c) }
9949}
9950#[doc = "Three-way exclusive OR"]
9951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"]
9952#[inline(always)]
9953#[target_feature(enable = "neon,sha3")]
9954#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9955#[cfg_attr(test, assert_instr(eor3))]
9956pub fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
9957    unsafe extern "unadjusted" {
9958        #[cfg_attr(
9959            any(target_arch = "aarch64", target_arch = "arm64ec"),
9960            link_name = "llvm.aarch64.crypto.eor3s.v8i16"
9961        )]
9962        fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
9963    }
9964    unsafe { _veor3q_s16(a, b, c) }
9965}
9966#[doc = "Three-way exclusive OR"]
9967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"]
9968#[inline(always)]
9969#[target_feature(enable = "neon,sha3")]
9970#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9971#[cfg_attr(test, assert_instr(eor3))]
9972pub fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
9973    unsafe extern "unadjusted" {
9974        #[cfg_attr(
9975            any(target_arch = "aarch64", target_arch = "arm64ec"),
9976            link_name = "llvm.aarch64.crypto.eor3s.v4i32"
9977        )]
9978        fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
9979    }
9980    unsafe { _veor3q_s32(a, b, c) }
9981}
9982#[doc = "Three-way exclusive OR"]
9983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"]
9984#[inline(always)]
9985#[target_feature(enable = "neon,sha3")]
9986#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9987#[cfg_attr(test, assert_instr(eor3))]
9988pub fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
9989    unsafe extern "unadjusted" {
9990        #[cfg_attr(
9991            any(target_arch = "aarch64", target_arch = "arm64ec"),
9992            link_name = "llvm.aarch64.crypto.eor3s.v2i64"
9993        )]
9994        fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
9995    }
9996    unsafe { _veor3q_s64(a, b, c) }
9997}
9998#[doc = "Three-way exclusive OR"]
9999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"]
10000#[inline(always)]
10001#[target_feature(enable = "neon,sha3")]
10002#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10003#[cfg_attr(test, assert_instr(eor3))]
10004pub fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
10005    unsafe extern "unadjusted" {
10006        #[cfg_attr(
10007            any(target_arch = "aarch64", target_arch = "arm64ec"),
10008            link_name = "llvm.aarch64.crypto.eor3u.v16i8"
10009        )]
10010        fn _veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
10011    }
10012    unsafe { _veor3q_u8(a, b, c) }
10013}
10014#[doc = "Three-way exclusive OR"]
10015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"]
10016#[inline(always)]
10017#[target_feature(enable = "neon,sha3")]
10018#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10019#[cfg_attr(test, assert_instr(eor3))]
10020pub fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
10021    unsafe extern "unadjusted" {
10022        #[cfg_attr(
10023            any(target_arch = "aarch64", target_arch = "arm64ec"),
10024            link_name = "llvm.aarch64.crypto.eor3u.v8i16"
10025        )]
10026        fn _veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
10027    }
10028    unsafe { _veor3q_u16(a, b, c) }
10029}
10030#[doc = "Three-way exclusive OR"]
10031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"]
10032#[inline(always)]
10033#[target_feature(enable = "neon,sha3")]
10034#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10035#[cfg_attr(test, assert_instr(eor3))]
10036pub fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
10037    unsafe extern "unadjusted" {
10038        #[cfg_attr(
10039            any(target_arch = "aarch64", target_arch = "arm64ec"),
10040            link_name = "llvm.aarch64.crypto.eor3u.v4i32"
10041        )]
10042        fn _veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
10043    }
10044    unsafe { _veor3q_u32(a, b, c) }
10045}
10046#[doc = "Three-way exclusive OR"]
10047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"]
10048#[inline(always)]
10049#[target_feature(enable = "neon,sha3")]
10050#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10051#[cfg_attr(test, assert_instr(eor3))]
10052pub fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
10053    unsafe extern "unadjusted" {
10054        #[cfg_attr(
10055            any(target_arch = "aarch64", target_arch = "arm64ec"),
10056            link_name = "llvm.aarch64.crypto.eor3u.v2i64"
10057        )]
10058        fn _veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
10059    }
10060    unsafe { _veor3q_u64(a, b, c) }
10061}
10062#[doc = "Extract vector from pair of vectors"]
10063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"]
10064#[inline(always)]
10065#[target_feature(enable = "neon")]
10066#[cfg_attr(test, assert_instr(ext, N = 1))]
10067#[rustc_legacy_const_generics(2)]
10068#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10069pub fn vextq_f64<const N: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10070    static_assert_uimm_bits!(N, 1);
10071    unsafe {
10072        match N & 0b1 {
10073            0 => simd_shuffle!(a, b, [0, 1]),
10074            1 => simd_shuffle!(a, b, [1, 2]),
10075            _ => unreachable_unchecked(),
10076        }
10077    }
10078}
10079#[doc = "Extract vector from pair of vectors"]
10080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"]
10081#[inline(always)]
10082#[target_feature(enable = "neon")]
10083#[cfg_attr(test, assert_instr(ext, N = 1))]
10084#[rustc_legacy_const_generics(2)]
10085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10086pub fn vextq_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
10087    static_assert_uimm_bits!(N, 1);
10088    unsafe {
10089        match N & 0b1 {
10090            0 => simd_shuffle!(a, b, [0, 1]),
10091            1 => simd_shuffle!(a, b, [1, 2]),
10092            _ => unreachable_unchecked(),
10093        }
10094    }
10095}
10096#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"]
10098#[inline(always)]
10099#[target_feature(enable = "neon")]
10100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10101#[cfg_attr(test, assert_instr(fmadd))]
10102pub fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10103    unsafe { simd_fma(b, c, a) }
10104}
10105#[doc = "Floating-point fused multiply-add to accumulator"]
10106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f16)"]
10107#[inline(always)]
10108#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10109#[rustc_legacy_const_generics(3)]
10110#[target_feature(enable = "neon,fp16")]
10111#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10112#[cfg(not(target_arch = "arm64ec"))]
10113pub fn vfma_lane_f16<const LANE: i32>(
10114    a: float16x4_t,
10115    b: float16x4_t,
10116    c: float16x4_t,
10117) -> float16x4_t {
10118    static_assert_uimm_bits!(LANE, 2);
10119    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10120}
10121#[doc = "Floating-point fused multiply-add to accumulator"]
10122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f16)"]
10123#[inline(always)]
10124#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10125#[rustc_legacy_const_generics(3)]
10126#[target_feature(enable = "neon,fp16")]
10127#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10128#[cfg(not(target_arch = "arm64ec"))]
10129pub fn vfma_laneq_f16<const LANE: i32>(
10130    a: float16x4_t,
10131    b: float16x4_t,
10132    c: float16x8_t,
10133) -> float16x4_t {
10134    static_assert_uimm_bits!(LANE, 3);
10135    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10136}
10137#[doc = "Floating-point fused multiply-add to accumulator"]
10138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f16)"]
10139#[inline(always)]
10140#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10141#[rustc_legacy_const_generics(3)]
10142#[target_feature(enable = "neon,fp16")]
10143#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10144#[cfg(not(target_arch = "arm64ec"))]
10145pub fn vfmaq_lane_f16<const LANE: i32>(
10146    a: float16x8_t,
10147    b: float16x8_t,
10148    c: float16x4_t,
10149) -> float16x8_t {
10150    static_assert_uimm_bits!(LANE, 2);
10151    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10152}
10153#[doc = "Floating-point fused multiply-add to accumulator"]
10154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f16)"]
10155#[inline(always)]
10156#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10157#[rustc_legacy_const_generics(3)]
10158#[target_feature(enable = "neon,fp16")]
10159#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10160#[cfg(not(target_arch = "arm64ec"))]
10161pub fn vfmaq_laneq_f16<const LANE: i32>(
10162    a: float16x8_t,
10163    b: float16x8_t,
10164    c: float16x8_t,
10165) -> float16x8_t {
10166    static_assert_uimm_bits!(LANE, 3);
10167    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10168}
10169#[doc = "Floating-point fused multiply-add to accumulator"]
10170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"]
10171#[inline(always)]
10172#[target_feature(enable = "neon")]
10173#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10174#[rustc_legacy_const_generics(3)]
10175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10176pub fn vfma_lane_f32<const LANE: i32>(
10177    a: float32x2_t,
10178    b: float32x2_t,
10179    c: float32x2_t,
10180) -> float32x2_t {
10181    static_assert_uimm_bits!(LANE, 1);
10182    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10183}
10184#[doc = "Floating-point fused multiply-add to accumulator"]
10185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"]
10186#[inline(always)]
10187#[target_feature(enable = "neon")]
10188#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10189#[rustc_legacy_const_generics(3)]
10190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10191pub fn vfma_laneq_f32<const LANE: i32>(
10192    a: float32x2_t,
10193    b: float32x2_t,
10194    c: float32x4_t,
10195) -> float32x2_t {
10196    static_assert_uimm_bits!(LANE, 2);
10197    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10198}
10199#[doc = "Floating-point fused multiply-add to accumulator"]
10200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"]
10201#[inline(always)]
10202#[target_feature(enable = "neon")]
10203#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10204#[rustc_legacy_const_generics(3)]
10205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10206pub fn vfmaq_lane_f32<const LANE: i32>(
10207    a: float32x4_t,
10208    b: float32x4_t,
10209    c: float32x2_t,
10210) -> float32x4_t {
10211    static_assert_uimm_bits!(LANE, 1);
10212    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10213}
10214#[doc = "Floating-point fused multiply-add to accumulator"]
10215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"]
10216#[inline(always)]
10217#[target_feature(enable = "neon")]
10218#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10219#[rustc_legacy_const_generics(3)]
10220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10221pub fn vfmaq_laneq_f32<const LANE: i32>(
10222    a: float32x4_t,
10223    b: float32x4_t,
10224    c: float32x4_t,
10225) -> float32x4_t {
10226    static_assert_uimm_bits!(LANE, 2);
10227    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10228}
10229#[doc = "Floating-point fused multiply-add to accumulator"]
10230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"]
10231#[inline(always)]
10232#[target_feature(enable = "neon")]
10233#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10234#[rustc_legacy_const_generics(3)]
10235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10236pub fn vfmaq_laneq_f64<const LANE: i32>(
10237    a: float64x2_t,
10238    b: float64x2_t,
10239    c: float64x2_t,
10240) -> float64x2_t {
10241    static_assert_uimm_bits!(LANE, 1);
10242    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10243}
10244#[doc = "Floating-point fused multiply-add to accumulator"]
10245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"]
10246#[inline(always)]
10247#[target_feature(enable = "neon")]
10248#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10249#[rustc_legacy_const_generics(3)]
10250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10251pub fn vfma_lane_f64<const LANE: i32>(
10252    a: float64x1_t,
10253    b: float64x1_t,
10254    c: float64x1_t,
10255) -> float64x1_t {
10256    static_assert!(LANE == 0);
10257    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10258}
10259#[doc = "Floating-point fused multiply-add to accumulator"]
10260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"]
10261#[inline(always)]
10262#[target_feature(enable = "neon")]
10263#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10264#[rustc_legacy_const_generics(3)]
10265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10266pub fn vfma_laneq_f64<const LANE: i32>(
10267    a: float64x1_t,
10268    b: float64x1_t,
10269    c: float64x2_t,
10270) -> float64x1_t {
10271    static_assert_uimm_bits!(LANE, 1);
10272    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10273}
10274#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f16)"]
10276#[inline(always)]
10277#[target_feature(enable = "neon,fp16")]
10278#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10279#[cfg(not(target_arch = "arm64ec"))]
10280#[cfg_attr(test, assert_instr(fmla))]
10281pub fn vfma_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
10282    vfma_f16(a, b, vdup_n_f16(c))
10283}
10284#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f16)"]
10286#[inline(always)]
10287#[target_feature(enable = "neon,fp16")]
10288#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10289#[cfg(not(target_arch = "arm64ec"))]
10290#[cfg_attr(test, assert_instr(fmla))]
10291pub fn vfmaq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
10292    vfmaq_f16(a, b, vdupq_n_f16(c))
10293}
10294#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"]
10296#[inline(always)]
10297#[target_feature(enable = "neon")]
10298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10299#[cfg_attr(test, assert_instr(fmadd))]
10300pub fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
10301    vfma_f64(a, b, vdup_n_f64(c))
10302}
10303#[doc = "Floating-point fused multiply-add to accumulator"]
10304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"]
10305#[inline(always)]
10306#[target_feature(enable = "neon")]
10307#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10308#[rustc_legacy_const_generics(3)]
10309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10310pub fn vfmad_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
10311    static_assert!(LANE == 0);
10312    unsafe {
10313        let c: f64 = simd_extract!(c, LANE as u32);
10314        fmaf64(b, c, a)
10315    }
10316}
10317#[doc = "Floating-point fused multiply-add to accumulator"]
10318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_f16)"]
10319#[inline(always)]
10320#[cfg_attr(test, assert_instr(fmadd))]
10321#[target_feature(enable = "neon,fp16")]
10322#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10323#[cfg(not(target_arch = "arm64ec"))]
10324pub fn vfmah_f16(a: f16, b: f16, c: f16) -> f16 {
10325    fmaf16(b, c, a)
10326}
10327#[doc = "Floating-point fused multiply-add to accumulator"]
10328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_lane_f16)"]
10329#[inline(always)]
10330#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10331#[rustc_legacy_const_generics(3)]
10332#[target_feature(enable = "neon,fp16")]
10333#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10334#[cfg(not(target_arch = "arm64ec"))]
10335pub fn vfmah_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
10336    static_assert_uimm_bits!(LANE, 2);
10337    unsafe {
10338        let c: f16 = simd_extract!(v, LANE as u32);
10339        vfmah_f16(a, b, c)
10340    }
10341}
10342#[doc = "Floating-point fused multiply-add to accumulator"]
10343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_laneq_f16)"]
10344#[inline(always)]
10345#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10346#[rustc_legacy_const_generics(3)]
10347#[target_feature(enable = "neon,fp16")]
10348#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10349#[cfg(not(target_arch = "arm64ec"))]
10350pub fn vfmah_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
10351    static_assert_uimm_bits!(LANE, 3);
10352    unsafe {
10353        let c: f16 = simd_extract!(v, LANE as u32);
10354        vfmah_f16(a, b, c)
10355    }
10356}
10357#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"]
10359#[inline(always)]
10360#[target_feature(enable = "neon")]
10361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10362#[cfg_attr(test, assert_instr(fmla))]
10363pub fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10364    unsafe { simd_fma(b, c, a) }
10365}
10366#[doc = "Floating-point fused multiply-add to accumulator"]
10367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"]
10368#[inline(always)]
10369#[target_feature(enable = "neon")]
10370#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10371#[rustc_legacy_const_generics(3)]
10372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10373pub fn vfmaq_lane_f64<const LANE: i32>(
10374    a: float64x2_t,
10375    b: float64x2_t,
10376    c: float64x1_t,
10377) -> float64x2_t {
10378    static_assert!(LANE == 0);
10379    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10380}
10381#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"]
10383#[inline(always)]
10384#[target_feature(enable = "neon")]
10385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10386#[cfg_attr(test, assert_instr(fmla))]
10387pub fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
10388    vfmaq_f64(a, b, vdupq_n_f64(c))
10389}
10390#[doc = "Floating-point fused multiply-add to accumulator"]
10391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"]
10392#[inline(always)]
10393#[target_feature(enable = "neon")]
10394#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10395#[rustc_legacy_const_generics(3)]
10396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10397pub fn vfmas_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
10398    static_assert_uimm_bits!(LANE, 1);
10399    unsafe {
10400        let c: f32 = simd_extract!(c, LANE as u32);
10401        fmaf32(b, c, a)
10402    }
10403}
10404#[doc = "Floating-point fused multiply-add to accumulator"]
10405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"]
10406#[inline(always)]
10407#[target_feature(enable = "neon")]
10408#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10409#[rustc_legacy_const_generics(3)]
10410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10411pub fn vfmas_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
10412    static_assert_uimm_bits!(LANE, 2);
10413    unsafe {
10414        let c: f32 = simd_extract!(c, LANE as u32);
10415        fmaf32(b, c, a)
10416    }
10417}
10418#[doc = "Floating-point fused multiply-add to accumulator"]
10419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"]
10420#[inline(always)]
10421#[target_feature(enable = "neon")]
10422#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10423#[rustc_legacy_const_generics(3)]
10424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10425pub fn vfmad_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
10426    static_assert_uimm_bits!(LANE, 1);
10427    unsafe {
10428        let c: f64 = simd_extract!(c, LANE as u32);
10429        fmaf64(b, c, a)
10430    }
10431}
10432#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_high_f16)"]
10434#[inline(always)]
10435#[target_feature(enable = "neon,fp16")]
10436#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10437#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10438#[cfg(not(target_arch = "arm64ec"))]
10439#[cfg_attr(test, assert_instr(fmlal2))]
10440pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10441    unsafe extern "unadjusted" {
10442        #[cfg_attr(
10443            any(target_arch = "aarch64", target_arch = "arm64ec"),
10444            link_name = "llvm.aarch64.neon.fmlal2.v2f32.v4f16"
10445        )]
10446        fn _vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10447    }
10448    unsafe { _vfmlal_high_f16(r, a, b) }
10449}
10450#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_high_f16)"]
10452#[inline(always)]
10453#[target_feature(enable = "neon,fp16")]
10454#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10455#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10456#[cfg(not(target_arch = "arm64ec"))]
10457#[cfg_attr(test, assert_instr(fmlal2))]
10458pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10459    unsafe extern "unadjusted" {
10460        #[cfg_attr(
10461            any(target_arch = "aarch64", target_arch = "arm64ec"),
10462            link_name = "llvm.aarch64.neon.fmlal2.v4f32.v8f16"
10463        )]
10464        fn _vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10465    }
10466    unsafe { _vfmlalq_high_f16(r, a, b) }
10467}
10468#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_high_f16)"]
10470#[inline(always)]
10471#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10472#[target_feature(enable = "neon,fp16")]
10473#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10474#[rustc_legacy_const_generics(3)]
10475#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10476#[cfg(not(target_arch = "arm64ec"))]
10477pub fn vfmlal_lane_high_f16<const LANE: i32>(
10478    r: float32x2_t,
10479    a: float16x4_t,
10480    b: float16x4_t,
10481) -> float32x2_t {
10482    static_assert_uimm_bits!(LANE, 2);
10483    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10484}
10485#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_high_f16)"]
10487#[inline(always)]
10488#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10489#[target_feature(enable = "neon,fp16")]
10490#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10491#[rustc_legacy_const_generics(3)]
10492#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10493#[cfg(not(target_arch = "arm64ec"))]
10494pub fn vfmlal_laneq_high_f16<const LANE: i32>(
10495    r: float32x2_t,
10496    a: float16x4_t,
10497    b: float16x8_t,
10498) -> float32x2_t {
10499    static_assert_uimm_bits!(LANE, 3);
10500    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10501}
10502#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_high_f16)"]
10504#[inline(always)]
10505#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10506#[target_feature(enable = "neon,fp16")]
10507#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10508#[rustc_legacy_const_generics(3)]
10509#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10510#[cfg(not(target_arch = "arm64ec"))]
10511pub fn vfmlalq_lane_high_f16<const LANE: i32>(
10512    r: float32x4_t,
10513    a: float16x8_t,
10514    b: float16x4_t,
10515) -> float32x4_t {
10516    static_assert_uimm_bits!(LANE, 2);
10517    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10518}
10519#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_high_f16)"]
10521#[inline(always)]
10522#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10523#[target_feature(enable = "neon,fp16")]
10524#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10525#[rustc_legacy_const_generics(3)]
10526#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10527#[cfg(not(target_arch = "arm64ec"))]
10528pub fn vfmlalq_laneq_high_f16<const LANE: i32>(
10529    r: float32x4_t,
10530    a: float16x8_t,
10531    b: float16x8_t,
10532) -> float32x4_t {
10533    static_assert_uimm_bits!(LANE, 3);
10534    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10535}
10536#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_low_f16)"]
10538#[inline(always)]
10539#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10540#[target_feature(enable = "neon,fp16")]
10541#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10542#[rustc_legacy_const_generics(3)]
10543#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10544#[cfg(not(target_arch = "arm64ec"))]
10545pub fn vfmlal_lane_low_f16<const LANE: i32>(
10546    r: float32x2_t,
10547    a: float16x4_t,
10548    b: float16x4_t,
10549) -> float32x2_t {
10550    static_assert_uimm_bits!(LANE, 2);
10551    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10552}
10553#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_low_f16)"]
10555#[inline(always)]
10556#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10557#[target_feature(enable = "neon,fp16")]
10558#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10559#[rustc_legacy_const_generics(3)]
10560#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10561#[cfg(not(target_arch = "arm64ec"))]
10562pub fn vfmlal_laneq_low_f16<const LANE: i32>(
10563    r: float32x2_t,
10564    a: float16x4_t,
10565    b: float16x8_t,
10566) -> float32x2_t {
10567    static_assert_uimm_bits!(LANE, 3);
10568    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10569}
10570#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_low_f16)"]
10572#[inline(always)]
10573#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10574#[target_feature(enable = "neon,fp16")]
10575#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10576#[rustc_legacy_const_generics(3)]
10577#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10578#[cfg(not(target_arch = "arm64ec"))]
10579pub fn vfmlalq_lane_low_f16<const LANE: i32>(
10580    r: float32x4_t,
10581    a: float16x8_t,
10582    b: float16x4_t,
10583) -> float32x4_t {
10584    static_assert_uimm_bits!(LANE, 2);
10585    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10586}
10587#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_low_f16)"]
10589#[inline(always)]
10590#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10591#[target_feature(enable = "neon,fp16")]
10592#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10593#[rustc_legacy_const_generics(3)]
10594#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10595#[cfg(not(target_arch = "arm64ec"))]
10596pub fn vfmlalq_laneq_low_f16<const LANE: i32>(
10597    r: float32x4_t,
10598    a: float16x8_t,
10599    b: float16x8_t,
10600) -> float32x4_t {
10601    static_assert_uimm_bits!(LANE, 3);
10602    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10603}
10604#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_low_f16)"]
10606#[inline(always)]
10607#[target_feature(enable = "neon,fp16")]
10608#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10609#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10610#[cfg(not(target_arch = "arm64ec"))]
10611#[cfg_attr(test, assert_instr(fmlal))]
10612pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10613    unsafe extern "unadjusted" {
10614        #[cfg_attr(
10615            any(target_arch = "aarch64", target_arch = "arm64ec"),
10616            link_name = "llvm.aarch64.neon.fmlal.v2f32.v4f16"
10617        )]
10618        fn _vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10619    }
10620    unsafe { _vfmlal_low_f16(r, a, b) }
10621}
10622#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_low_f16)"]
10624#[inline(always)]
10625#[target_feature(enable = "neon,fp16")]
10626#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10627#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10628#[cfg(not(target_arch = "arm64ec"))]
10629#[cfg_attr(test, assert_instr(fmlal))]
10630pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10631    unsafe extern "unadjusted" {
10632        #[cfg_attr(
10633            any(target_arch = "aarch64", target_arch = "arm64ec"),
10634            link_name = "llvm.aarch64.neon.fmlal.v4f32.v8f16"
10635        )]
10636        fn _vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10637    }
10638    unsafe { _vfmlalq_low_f16(r, a, b) }
10639}
10640#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_high_f16)"]
10642#[inline(always)]
10643#[target_feature(enable = "neon,fp16")]
10644#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10645#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10646#[cfg(not(target_arch = "arm64ec"))]
10647#[cfg_attr(test, assert_instr(fmlsl2))]
10648pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10649    unsafe extern "unadjusted" {
10650        #[cfg_attr(
10651            any(target_arch = "aarch64", target_arch = "arm64ec"),
10652            link_name = "llvm.aarch64.neon.fmlsl2.v2f32.v4f16"
10653        )]
10654        fn _vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10655    }
10656    unsafe { _vfmlsl_high_f16(r, a, b) }
10657}
10658#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_high_f16)"]
10660#[inline(always)]
10661#[target_feature(enable = "neon,fp16")]
10662#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10663#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10664#[cfg(not(target_arch = "arm64ec"))]
10665#[cfg_attr(test, assert_instr(fmlsl2))]
10666pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10667    unsafe extern "unadjusted" {
10668        #[cfg_attr(
10669            any(target_arch = "aarch64", target_arch = "arm64ec"),
10670            link_name = "llvm.aarch64.neon.fmlsl2.v4f32.v8f16"
10671        )]
10672        fn _vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10673    }
10674    unsafe { _vfmlslq_high_f16(r, a, b) }
10675}
10676#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_high_f16)"]
10678#[inline(always)]
10679#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10680#[target_feature(enable = "neon,fp16")]
10681#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10682#[rustc_legacy_const_generics(3)]
10683#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10684#[cfg(not(target_arch = "arm64ec"))]
10685pub fn vfmlsl_lane_high_f16<const LANE: i32>(
10686    r: float32x2_t,
10687    a: float16x4_t,
10688    b: float16x4_t,
10689) -> float32x2_t {
10690    static_assert_uimm_bits!(LANE, 2);
10691    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10692}
10693#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_high_f16)"]
10695#[inline(always)]
10696#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10697#[target_feature(enable = "neon,fp16")]
10698#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10699#[rustc_legacy_const_generics(3)]
10700#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10701#[cfg(not(target_arch = "arm64ec"))]
10702pub fn vfmlsl_laneq_high_f16<const LANE: i32>(
10703    r: float32x2_t,
10704    a: float16x4_t,
10705    b: float16x8_t,
10706) -> float32x2_t {
10707    static_assert_uimm_bits!(LANE, 3);
10708    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10709}
10710#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_high_f16)"]
10712#[inline(always)]
10713#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10714#[target_feature(enable = "neon,fp16")]
10715#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10716#[rustc_legacy_const_generics(3)]
10717#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10718#[cfg(not(target_arch = "arm64ec"))]
10719pub fn vfmlslq_lane_high_f16<const LANE: i32>(
10720    r: float32x4_t,
10721    a: float16x8_t,
10722    b: float16x4_t,
10723) -> float32x4_t {
10724    static_assert_uimm_bits!(LANE, 2);
10725    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10726}
10727#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_high_f16)"]
10729#[inline(always)]
10730#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10731#[target_feature(enable = "neon,fp16")]
10732#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10733#[rustc_legacy_const_generics(3)]
10734#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10735#[cfg(not(target_arch = "arm64ec"))]
10736pub fn vfmlslq_laneq_high_f16<const LANE: i32>(
10737    r: float32x4_t,
10738    a: float16x8_t,
10739    b: float16x8_t,
10740) -> float32x4_t {
10741    static_assert_uimm_bits!(LANE, 3);
10742    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10743}
10744#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_low_f16)"]
10746#[inline(always)]
10747#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10748#[target_feature(enable = "neon,fp16")]
10749#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10750#[rustc_legacy_const_generics(3)]
10751#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10752#[cfg(not(target_arch = "arm64ec"))]
10753pub fn vfmlsl_lane_low_f16<const LANE: i32>(
10754    r: float32x2_t,
10755    a: float16x4_t,
10756    b: float16x4_t,
10757) -> float32x2_t {
10758    static_assert_uimm_bits!(LANE, 2);
10759    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10760}
10761#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_low_f16)"]
10763#[inline(always)]
10764#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10765#[target_feature(enable = "neon,fp16")]
10766#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10767#[rustc_legacy_const_generics(3)]
10768#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10769#[cfg(not(target_arch = "arm64ec"))]
10770pub fn vfmlsl_laneq_low_f16<const LANE: i32>(
10771    r: float32x2_t,
10772    a: float16x4_t,
10773    b: float16x8_t,
10774) -> float32x2_t {
10775    static_assert_uimm_bits!(LANE, 3);
10776    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10777}
10778#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_low_f16)"]
10780#[inline(always)]
10781#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10782#[target_feature(enable = "neon,fp16")]
10783#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10784#[rustc_legacy_const_generics(3)]
10785#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10786#[cfg(not(target_arch = "arm64ec"))]
10787pub fn vfmlslq_lane_low_f16<const LANE: i32>(
10788    r: float32x4_t,
10789    a: float16x8_t,
10790    b: float16x4_t,
10791) -> float32x4_t {
10792    static_assert_uimm_bits!(LANE, 2);
10793    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10794}
10795#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_low_f16)"]
10797#[inline(always)]
10798#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10799#[target_feature(enable = "neon,fp16")]
10800#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10801#[rustc_legacy_const_generics(3)]
10802#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10803#[cfg(not(target_arch = "arm64ec"))]
10804pub fn vfmlslq_laneq_low_f16<const LANE: i32>(
10805    r: float32x4_t,
10806    a: float16x8_t,
10807    b: float16x8_t,
10808) -> float32x4_t {
10809    static_assert_uimm_bits!(LANE, 3);
10810    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10811}
10812#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_low_f16)"]
10814#[inline(always)]
10815#[target_feature(enable = "neon,fp16")]
10816#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10817#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10818#[cfg(not(target_arch = "arm64ec"))]
10819#[cfg_attr(test, assert_instr(fmlsl))]
10820pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10821    unsafe extern "unadjusted" {
10822        #[cfg_attr(
10823            any(target_arch = "aarch64", target_arch = "arm64ec"),
10824            link_name = "llvm.aarch64.neon.fmlsl.v2f32.v4f16"
10825        )]
10826        fn _vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10827    }
10828    unsafe { _vfmlsl_low_f16(r, a, b) }
10829}
10830#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_low_f16)"]
10832#[inline(always)]
10833#[target_feature(enable = "neon,fp16")]
10834#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10835#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10836#[cfg(not(target_arch = "arm64ec"))]
10837#[cfg_attr(test, assert_instr(fmlsl))]
10838pub fn vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10839    unsafe extern "unadjusted" {
10840        #[cfg_attr(
10841            any(target_arch = "aarch64", target_arch = "arm64ec"),
10842            link_name = "llvm.aarch64.neon.fmlsl.v4f32.v8f16"
10843        )]
10844        fn _vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10845    }
10846    unsafe { _vfmlslq_low_f16(r, a, b) }
10847}
10848#[doc = "Floating-point fused multiply-subtract from accumulator"]
10849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"]
10850#[inline(always)]
10851#[target_feature(enable = "neon")]
10852#[cfg_attr(test, assert_instr(fmsub))]
10853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10854pub fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10855    unsafe {
10856        let b: float64x1_t = simd_neg(b);
10857        vfma_f64(a, b, c)
10858    }
10859}
10860#[doc = "Floating-point fused multiply-subtract from accumulator"]
10861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f16)"]
10862#[inline(always)]
10863#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10864#[rustc_legacy_const_generics(3)]
10865#[target_feature(enable = "neon,fp16")]
10866#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10867#[cfg(not(target_arch = "arm64ec"))]
10868pub fn vfms_lane_f16<const LANE: i32>(
10869    a: float16x4_t,
10870    b: float16x4_t,
10871    c: float16x4_t,
10872) -> float16x4_t {
10873    static_assert_uimm_bits!(LANE, 2);
10874    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10875}
10876#[doc = "Floating-point fused multiply-subtract from accumulator"]
10877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f16)"]
10878#[inline(always)]
10879#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10880#[rustc_legacy_const_generics(3)]
10881#[target_feature(enable = "neon,fp16")]
10882#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10883#[cfg(not(target_arch = "arm64ec"))]
10884pub fn vfms_laneq_f16<const LANE: i32>(
10885    a: float16x4_t,
10886    b: float16x4_t,
10887    c: float16x8_t,
10888) -> float16x4_t {
10889    static_assert_uimm_bits!(LANE, 3);
10890    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10891}
10892#[doc = "Floating-point fused multiply-subtract from accumulator"]
10893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f16)"]
10894#[inline(always)]
10895#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10896#[rustc_legacy_const_generics(3)]
10897#[target_feature(enable = "neon,fp16")]
10898#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10899#[cfg(not(target_arch = "arm64ec"))]
10900pub fn vfmsq_lane_f16<const LANE: i32>(
10901    a: float16x8_t,
10902    b: float16x8_t,
10903    c: float16x4_t,
10904) -> float16x8_t {
10905    static_assert_uimm_bits!(LANE, 2);
10906    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10907}
10908#[doc = "Floating-point fused multiply-subtract from accumulator"]
10909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f16)"]
10910#[inline(always)]
10911#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10912#[rustc_legacy_const_generics(3)]
10913#[target_feature(enable = "neon,fp16")]
10914#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10915#[cfg(not(target_arch = "arm64ec"))]
10916pub fn vfmsq_laneq_f16<const LANE: i32>(
10917    a: float16x8_t,
10918    b: float16x8_t,
10919    c: float16x8_t,
10920) -> float16x8_t {
10921    static_assert_uimm_bits!(LANE, 3);
10922    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10923}
10924#[doc = "Floating-point fused multiply-subtract to accumulator"]
10925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"]
10926#[inline(always)]
10927#[target_feature(enable = "neon")]
10928#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10929#[rustc_legacy_const_generics(3)]
10930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10931pub fn vfms_lane_f32<const LANE: i32>(
10932    a: float32x2_t,
10933    b: float32x2_t,
10934    c: float32x2_t,
10935) -> float32x2_t {
10936    static_assert_uimm_bits!(LANE, 1);
10937    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10938}
10939#[doc = "Floating-point fused multiply-subtract to accumulator"]
10940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"]
10941#[inline(always)]
10942#[target_feature(enable = "neon")]
10943#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10944#[rustc_legacy_const_generics(3)]
10945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10946pub fn vfms_laneq_f32<const LANE: i32>(
10947    a: float32x2_t,
10948    b: float32x2_t,
10949    c: float32x4_t,
10950) -> float32x2_t {
10951    static_assert_uimm_bits!(LANE, 2);
10952    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10953}
10954#[doc = "Floating-point fused multiply-subtract to accumulator"]
10955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"]
10956#[inline(always)]
10957#[target_feature(enable = "neon")]
10958#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10959#[rustc_legacy_const_generics(3)]
10960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10961pub fn vfmsq_lane_f32<const LANE: i32>(
10962    a: float32x4_t,
10963    b: float32x4_t,
10964    c: float32x2_t,
10965) -> float32x4_t {
10966    static_assert_uimm_bits!(LANE, 1);
10967    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10968}
10969#[doc = "Floating-point fused multiply-subtract to accumulator"]
10970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"]
10971#[inline(always)]
10972#[target_feature(enable = "neon")]
10973#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10974#[rustc_legacy_const_generics(3)]
10975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10976pub fn vfmsq_laneq_f32<const LANE: i32>(
10977    a: float32x4_t,
10978    b: float32x4_t,
10979    c: float32x4_t,
10980) -> float32x4_t {
10981    static_assert_uimm_bits!(LANE, 2);
10982    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10983}
10984#[doc = "Floating-point fused multiply-subtract to accumulator"]
10985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"]
10986#[inline(always)]
10987#[target_feature(enable = "neon")]
10988#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10989#[rustc_legacy_const_generics(3)]
10990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10991pub fn vfmsq_laneq_f64<const LANE: i32>(
10992    a: float64x2_t,
10993    b: float64x2_t,
10994    c: float64x2_t,
10995) -> float64x2_t {
10996    static_assert_uimm_bits!(LANE, 1);
10997    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10998}
10999#[doc = "Floating-point fused multiply-subtract to accumulator"]
11000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"]
11001#[inline(always)]
11002#[target_feature(enable = "neon")]
11003#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11004#[rustc_legacy_const_generics(3)]
11005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11006pub fn vfms_lane_f64<const LANE: i32>(
11007    a: float64x1_t,
11008    b: float64x1_t,
11009    c: float64x1_t,
11010) -> float64x1_t {
11011    static_assert!(LANE == 0);
11012    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
11013}
11014#[doc = "Floating-point fused multiply-subtract to accumulator"]
11015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"]
11016#[inline(always)]
11017#[target_feature(enable = "neon")]
11018#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11019#[rustc_legacy_const_generics(3)]
11020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11021pub fn vfms_laneq_f64<const LANE: i32>(
11022    a: float64x1_t,
11023    b: float64x1_t,
11024    c: float64x2_t,
11025) -> float64x1_t {
11026    static_assert_uimm_bits!(LANE, 1);
11027    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
11028}
11029#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f16)"]
11031#[inline(always)]
11032#[target_feature(enable = "neon,fp16")]
11033#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11034#[cfg(not(target_arch = "arm64ec"))]
11035#[cfg_attr(test, assert_instr(fmls))]
11036pub fn vfms_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
11037    vfms_f16(a, b, vdup_n_f16(c))
11038}
11039#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f16)"]
11041#[inline(always)]
11042#[target_feature(enable = "neon,fp16")]
11043#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11044#[cfg(not(target_arch = "arm64ec"))]
11045#[cfg_attr(test, assert_instr(fmls))]
11046pub fn vfmsq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
11047    vfmsq_f16(a, b, vdupq_n_f16(c))
11048}
11049#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"]
11051#[inline(always)]
11052#[target_feature(enable = "neon")]
11053#[cfg_attr(test, assert_instr(fmsub))]
11054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11055pub fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
11056    vfms_f64(a, b, vdup_n_f64(c))
11057}
11058#[doc = "Floating-point fused multiply-subtract from accumulator"]
11059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_f16)"]
11060#[inline(always)]
11061#[cfg_attr(test, assert_instr(fmsub))]
11062#[target_feature(enable = "neon,fp16")]
11063#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11064#[cfg(not(target_arch = "arm64ec"))]
11065pub fn vfmsh_f16(a: f16, b: f16, c: f16) -> f16 {
11066    vfmah_f16(a, -b, c)
11067}
11068#[doc = "Floating-point fused multiply-subtract from accumulator"]
11069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_lane_f16)"]
11070#[inline(always)]
11071#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11072#[rustc_legacy_const_generics(3)]
11073#[target_feature(enable = "neon,fp16")]
11074#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11075#[cfg(not(target_arch = "arm64ec"))]
11076pub fn vfmsh_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
11077    static_assert_uimm_bits!(LANE, 2);
11078    unsafe {
11079        let c: f16 = simd_extract!(v, LANE as u32);
11080        vfmsh_f16(a, b, c)
11081    }
11082}
11083#[doc = "Floating-point fused multiply-subtract from accumulator"]
11084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_laneq_f16)"]
11085#[inline(always)]
11086#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11087#[rustc_legacy_const_generics(3)]
11088#[target_feature(enable = "neon,fp16")]
11089#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11090#[cfg(not(target_arch = "arm64ec"))]
11091pub fn vfmsh_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
11092    static_assert_uimm_bits!(LANE, 3);
11093    unsafe {
11094        let c: f16 = simd_extract!(v, LANE as u32);
11095        vfmsh_f16(a, b, c)
11096    }
11097}
11098#[doc = "Floating-point fused multiply-subtract from accumulator"]
11099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"]
11100#[inline(always)]
11101#[target_feature(enable = "neon")]
11102#[cfg_attr(test, assert_instr(fmls))]
11103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11104pub fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
11105    unsafe {
11106        let b: float64x2_t = simd_neg(b);
11107        vfmaq_f64(a, b, c)
11108    }
11109}
11110#[doc = "Floating-point fused multiply-subtract to accumulator"]
11111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"]
11112#[inline(always)]
11113#[target_feature(enable = "neon")]
11114#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
11115#[rustc_legacy_const_generics(3)]
11116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11117pub fn vfmsq_lane_f64<const LANE: i32>(
11118    a: float64x2_t,
11119    b: float64x2_t,
11120    c: float64x1_t,
11121) -> float64x2_t {
11122    static_assert!(LANE == 0);
11123    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
11124}
11125#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"]
11127#[inline(always)]
11128#[target_feature(enable = "neon")]
11129#[cfg_attr(test, assert_instr(fmls))]
11130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11131pub fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
11132    vfmsq_f64(a, b, vdupq_n_f64(c))
11133}
11134#[doc = "Floating-point fused multiply-subtract to accumulator"]
11135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"]
11136#[inline(always)]
11137#[target_feature(enable = "neon")]
11138#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11139#[rustc_legacy_const_generics(3)]
11140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11141pub fn vfmss_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
11142    vfmas_lane_f32::<LANE>(a, -b, c)
11143}
11144#[doc = "Floating-point fused multiply-subtract to accumulator"]
11145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"]
11146#[inline(always)]
11147#[target_feature(enable = "neon")]
11148#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11149#[rustc_legacy_const_generics(3)]
11150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11151pub fn vfmss_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
11152    vfmas_laneq_f32::<LANE>(a, -b, c)
11153}
11154#[doc = "Floating-point fused multiply-subtract to accumulator"]
11155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"]
11156#[inline(always)]
11157#[target_feature(enable = "neon")]
11158#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11159#[rustc_legacy_const_generics(3)]
11160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11161pub fn vfmsd_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
11162    vfmad_lane_f64::<LANE>(a, -b, c)
11163}
11164#[doc = "Floating-point fused multiply-subtract to accumulator"]
11165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"]
11166#[inline(always)]
11167#[target_feature(enable = "neon")]
11168#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11169#[rustc_legacy_const_generics(3)]
11170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11171pub fn vfmsd_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
11172    vfmad_laneq_f64::<LANE>(a, -b, c)
11173}
11174#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"]
11176#[doc = "## Safety"]
11177#[doc = "  * Neon intrinsic unsafe"]
11178#[inline(always)]
11179#[target_feature(enable = "neon,fp16")]
11180#[cfg_attr(test, assert_instr(ldr))]
11181#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11182#[cfg(not(target_arch = "arm64ec"))]
11183pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t {
11184    crate::ptr::read_unaligned(ptr.cast())
11185}
11186#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"]
11188#[doc = "## Safety"]
11189#[doc = "  * Neon intrinsic unsafe"]
11190#[inline(always)]
11191#[target_feature(enable = "neon,fp16")]
11192#[cfg_attr(test, assert_instr(ldr))]
11193#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11194#[cfg(not(target_arch = "arm64ec"))]
11195pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t {
11196    crate::ptr::read_unaligned(ptr.cast())
11197}
11198#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"]
11200#[doc = "## Safety"]
11201#[doc = "  * Neon intrinsic unsafe"]
11202#[inline(always)]
11203#[target_feature(enable = "neon")]
11204#[cfg_attr(test, assert_instr(ldr))]
11205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11206pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t {
11207    crate::ptr::read_unaligned(ptr.cast())
11208}
11209#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"]
11211#[doc = "## Safety"]
11212#[doc = "  * Neon intrinsic unsafe"]
11213#[inline(always)]
11214#[target_feature(enable = "neon")]
11215#[cfg_attr(test, assert_instr(ldr))]
11216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11217pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t {
11218    crate::ptr::read_unaligned(ptr.cast())
11219}
11220#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64)"]
11222#[doc = "## Safety"]
11223#[doc = "  * Neon intrinsic unsafe"]
11224#[inline(always)]
11225#[target_feature(enable = "neon")]
11226#[cfg_attr(test, assert_instr(ldr))]
11227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11228pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t {
11229    crate::ptr::read_unaligned(ptr.cast())
11230}
11231#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"]
11233#[doc = "## Safety"]
11234#[doc = "  * Neon intrinsic unsafe"]
11235#[inline(always)]
11236#[target_feature(enable = "neon")]
11237#[cfg_attr(test, assert_instr(ldr))]
11238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11239pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t {
11240    crate::ptr::read_unaligned(ptr.cast())
11241}
11242#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"]
11244#[doc = "## Safety"]
11245#[doc = "  * Neon intrinsic unsafe"]
11246#[inline(always)]
11247#[target_feature(enable = "neon")]
11248#[cfg_attr(test, assert_instr(ldr))]
11249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11250pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t {
11251    crate::ptr::read_unaligned(ptr.cast())
11252}
11253#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"]
11255#[doc = "## Safety"]
11256#[doc = "  * Neon intrinsic unsafe"]
11257#[inline(always)]
11258#[target_feature(enable = "neon")]
11259#[cfg_attr(test, assert_instr(ldr))]
11260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11261pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t {
11262    crate::ptr::read_unaligned(ptr.cast())
11263}
11264#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"]
11266#[doc = "## Safety"]
11267#[doc = "  * Neon intrinsic unsafe"]
11268#[inline(always)]
11269#[target_feature(enable = "neon")]
11270#[cfg_attr(test, assert_instr(ldr))]
11271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11272pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t {
11273    crate::ptr::read_unaligned(ptr.cast())
11274}
11275#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"]
11277#[doc = "## Safety"]
11278#[doc = "  * Neon intrinsic unsafe"]
11279#[inline(always)]
11280#[target_feature(enable = "neon")]
11281#[cfg_attr(test, assert_instr(ldr))]
11282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11283pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t {
11284    crate::ptr::read_unaligned(ptr.cast())
11285}
11286#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"]
11288#[doc = "## Safety"]
11289#[doc = "  * Neon intrinsic unsafe"]
11290#[inline(always)]
11291#[target_feature(enable = "neon")]
11292#[cfg_attr(test, assert_instr(ldr))]
11293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11294pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t {
11295    crate::ptr::read_unaligned(ptr.cast())
11296}
11297#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"]
11299#[doc = "## Safety"]
11300#[doc = "  * Neon intrinsic unsafe"]
11301#[inline(always)]
11302#[target_feature(enable = "neon")]
11303#[cfg_attr(test, assert_instr(ldr))]
11304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11305pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t {
11306    crate::ptr::read_unaligned(ptr.cast())
11307}
11308#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"]
11310#[doc = "## Safety"]
11311#[doc = "  * Neon intrinsic unsafe"]
11312#[inline(always)]
11313#[target_feature(enable = "neon")]
11314#[cfg_attr(test, assert_instr(ldr))]
11315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11316pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t {
11317    crate::ptr::read_unaligned(ptr.cast())
11318}
11319#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"]
11321#[doc = "## Safety"]
11322#[doc = "  * Neon intrinsic unsafe"]
11323#[inline(always)]
11324#[target_feature(enable = "neon")]
11325#[cfg_attr(test, assert_instr(ldr))]
11326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11327pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t {
11328    crate::ptr::read_unaligned(ptr.cast())
11329}
11330#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"]
11332#[doc = "## Safety"]
11333#[doc = "  * Neon intrinsic unsafe"]
11334#[inline(always)]
11335#[target_feature(enable = "neon")]
11336#[cfg_attr(test, assert_instr(ldr))]
11337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11338pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t {
11339    crate::ptr::read_unaligned(ptr.cast())
11340}
11341#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"]
11343#[doc = "## Safety"]
11344#[doc = "  * Neon intrinsic unsafe"]
11345#[inline(always)]
11346#[target_feature(enable = "neon")]
11347#[cfg_attr(test, assert_instr(ldr))]
11348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11349pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t {
11350    crate::ptr::read_unaligned(ptr.cast())
11351}
11352#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"]
11354#[doc = "## Safety"]
11355#[doc = "  * Neon intrinsic unsafe"]
11356#[inline(always)]
11357#[target_feature(enable = "neon")]
11358#[cfg_attr(test, assert_instr(ldr))]
11359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11360pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t {
11361    crate::ptr::read_unaligned(ptr.cast())
11362}
11363#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"]
11365#[doc = "## Safety"]
11366#[doc = "  * Neon intrinsic unsafe"]
11367#[inline(always)]
11368#[target_feature(enable = "neon")]
11369#[cfg_attr(test, assert_instr(ldr))]
11370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11371pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t {
11372    crate::ptr::read_unaligned(ptr.cast())
11373}
11374#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"]
11376#[doc = "## Safety"]
11377#[doc = "  * Neon intrinsic unsafe"]
11378#[inline(always)]
11379#[target_feature(enable = "neon")]
11380#[cfg_attr(test, assert_instr(ldr))]
11381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11382pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t {
11383    crate::ptr::read_unaligned(ptr.cast())
11384}
11385#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"]
11387#[doc = "## Safety"]
11388#[doc = "  * Neon intrinsic unsafe"]
11389#[inline(always)]
11390#[target_feature(enable = "neon")]
11391#[cfg_attr(test, assert_instr(ldr))]
11392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11393pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t {
11394    crate::ptr::read_unaligned(ptr.cast())
11395}
11396#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"]
11398#[doc = "## Safety"]
11399#[doc = "  * Neon intrinsic unsafe"]
11400#[inline(always)]
11401#[target_feature(enable = "neon")]
11402#[cfg_attr(test, assert_instr(ldr))]
11403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11404pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t {
11405    crate::ptr::read_unaligned(ptr.cast())
11406}
11407#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"]
11409#[doc = "## Safety"]
11410#[doc = "  * Neon intrinsic unsafe"]
11411#[inline(always)]
11412#[target_feature(enable = "neon")]
11413#[cfg_attr(test, assert_instr(ldr))]
11414#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11415pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t {
11416    crate::ptr::read_unaligned(ptr.cast())
11417}
11418#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"]
11420#[doc = "## Safety"]
11421#[doc = "  * Neon intrinsic unsafe"]
11422#[inline(always)]
11423#[target_feature(enable = "neon")]
11424#[cfg_attr(test, assert_instr(ldr))]
11425#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11426pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t {
11427    crate::ptr::read_unaligned(ptr.cast())
11428}
11429#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"]
11431#[doc = "## Safety"]
11432#[doc = "  * Neon intrinsic unsafe"]
11433#[inline(always)]
11434#[target_feature(enable = "neon")]
11435#[cfg_attr(test, assert_instr(ldr))]
11436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11437pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t {
11438    crate::ptr::read_unaligned(ptr.cast())
11439}
11440#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"]
11442#[doc = "## Safety"]
11443#[doc = "  * Neon intrinsic unsafe"]
11444#[inline(always)]
11445#[target_feature(enable = "neon")]
11446#[cfg_attr(test, assert_instr(ldr))]
11447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11448pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t {
11449    crate::ptr::read_unaligned(ptr.cast())
11450}
11451#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"]
11453#[doc = "## Safety"]
11454#[doc = "  * Neon intrinsic unsafe"]
11455#[inline(always)]
11456#[target_feature(enable = "neon")]
11457#[cfg_attr(test, assert_instr(ldr))]
11458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11459pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t {
11460    crate::ptr::read_unaligned(ptr.cast())
11461}
11462#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"]
11464#[doc = "## Safety"]
11465#[doc = "  * Neon intrinsic unsafe"]
11466#[inline(always)]
11467#[target_feature(enable = "neon,aes")]
11468#[cfg_attr(test, assert_instr(ldr))]
11469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11470pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t {
11471    crate::ptr::read_unaligned(ptr.cast())
11472}
11473#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"]
11475#[doc = "## Safety"]
11476#[doc = "  * Neon intrinsic unsafe"]
11477#[inline(always)]
11478#[target_feature(enable = "neon,aes")]
11479#[cfg_attr(test, assert_instr(ldr))]
11480#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11481pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t {
11482    crate::ptr::read_unaligned(ptr.cast())
11483}
11484#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"]
11486#[doc = "## Safety"]
11487#[doc = "  * Neon intrinsic unsafe"]
11488#[inline(always)]
11489#[target_feature(enable = "neon")]
11490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11491#[cfg_attr(test, assert_instr(ld))]
11492pub unsafe fn vld1_f64_x2(ptr: *const f64) -> float64x1x2_t {
11493    crate::ptr::read_unaligned(ptr.cast())
11494}
11495#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"]
11497#[doc = "## Safety"]
11498#[doc = "  * Neon intrinsic unsafe"]
11499#[inline(always)]
11500#[target_feature(enable = "neon")]
11501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11502#[cfg_attr(test, assert_instr(ld))]
11503pub unsafe fn vld1_f64_x3(ptr: *const f64) -> float64x1x3_t {
11504    crate::ptr::read_unaligned(ptr.cast())
11505}
11506#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"]
11508#[doc = "## Safety"]
11509#[doc = "  * Neon intrinsic unsafe"]
11510#[inline(always)]
11511#[target_feature(enable = "neon")]
11512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11513#[cfg_attr(test, assert_instr(ld))]
11514pub unsafe fn vld1_f64_x4(ptr: *const f64) -> float64x1x4_t {
11515    crate::ptr::read_unaligned(ptr.cast())
11516}
11517#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"]
11519#[doc = "## Safety"]
11520#[doc = "  * Neon intrinsic unsafe"]
11521#[inline(always)]
11522#[target_feature(enable = "neon")]
11523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11524#[cfg_attr(test, assert_instr(ld))]
11525pub unsafe fn vld1q_f64_x2(ptr: *const f64) -> float64x2x2_t {
11526    crate::ptr::read_unaligned(ptr.cast())
11527}
11528#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"]
11530#[doc = "## Safety"]
11531#[doc = "  * Neon intrinsic unsafe"]
11532#[inline(always)]
11533#[target_feature(enable = "neon")]
11534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11535#[cfg_attr(test, assert_instr(ld))]
11536pub unsafe fn vld1q_f64_x3(ptr: *const f64) -> float64x2x3_t {
11537    crate::ptr::read_unaligned(ptr.cast())
11538}
11539#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"]
11541#[doc = "## Safety"]
11542#[doc = "  * Neon intrinsic unsafe"]
11543#[inline(always)]
11544#[target_feature(enable = "neon")]
11545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11546#[cfg_attr(test, assert_instr(ld))]
11547pub unsafe fn vld1q_f64_x4(ptr: *const f64) -> float64x2x4_t {
11548    crate::ptr::read_unaligned(ptr.cast())
11549}
11550#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"]
11552#[doc = "## Safety"]
11553#[doc = "  * Neon intrinsic unsafe"]
11554#[inline(always)]
11555#[target_feature(enable = "neon")]
11556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11557#[cfg_attr(test, assert_instr(ld2r))]
11558pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t {
11559    unsafe extern "unadjusted" {
11560        #[cfg_attr(
11561            any(target_arch = "aarch64", target_arch = "arm64ec"),
11562            link_name = "llvm.aarch64.neon.ld2r.v1f64.p0"
11563        )]
11564        fn _vld2_dup_f64(ptr: *const f64) -> float64x1x2_t;
11565    }
11566    _vld2_dup_f64(a as _)
11567}
11568#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"]
11570#[doc = "## Safety"]
11571#[doc = "  * Neon intrinsic unsafe"]
11572#[inline(always)]
11573#[target_feature(enable = "neon")]
11574#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11575#[cfg_attr(test, assert_instr(ld2r))]
11576pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t {
11577    unsafe extern "unadjusted" {
11578        #[cfg_attr(
11579            any(target_arch = "aarch64", target_arch = "arm64ec"),
11580            link_name = "llvm.aarch64.neon.ld2r.v2f64.p0"
11581        )]
11582        fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t;
11583    }
11584    _vld2q_dup_f64(a as _)
11585}
11586#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"]
11588#[doc = "## Safety"]
11589#[doc = "  * Neon intrinsic unsafe"]
11590#[inline(always)]
11591#[target_feature(enable = "neon")]
11592#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11593#[cfg_attr(test, assert_instr(ld2r))]
11594pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t {
11595    unsafe extern "unadjusted" {
11596        #[cfg_attr(
11597            any(target_arch = "aarch64", target_arch = "arm64ec"),
11598            link_name = "llvm.aarch64.neon.ld2r.v2i64.p0"
11599        )]
11600        fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t;
11601    }
11602    _vld2q_dup_s64(a as _)
11603}
11604#[doc = "Load multiple 2-element structures to two registers"]
11605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"]
11606#[doc = "## Safety"]
11607#[doc = "  * Neon intrinsic unsafe"]
11608#[inline(always)]
11609#[target_feature(enable = "neon")]
11610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11611#[cfg_attr(test, assert_instr(nop))]
11612pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t {
11613    crate::ptr::read_unaligned(a.cast())
11614}
11615#[doc = "Load multiple 2-element structures to two registers"]
11616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"]
11617#[doc = "## Safety"]
11618#[doc = "  * Neon intrinsic unsafe"]
11619#[inline(always)]
11620#[target_feature(enable = "neon")]
11621#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11622#[rustc_legacy_const_generics(2)]
11623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11624pub unsafe fn vld2_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x2_t) -> float64x1x2_t {
11625    static_assert!(LANE == 0);
11626    unsafe extern "unadjusted" {
11627        #[cfg_attr(
11628            any(target_arch = "aarch64", target_arch = "arm64ec"),
11629            link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0"
11630        )]
11631        fn _vld2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t;
11632    }
11633    _vld2_lane_f64(b.0, b.1, LANE as i64, a as _)
11634}
11635#[doc = "Load multiple 2-element structures to two registers"]
11636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"]
11637#[doc = "## Safety"]
11638#[doc = "  * Neon intrinsic unsafe"]
11639#[inline(always)]
11640#[target_feature(enable = "neon")]
11641#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11642#[rustc_legacy_const_generics(2)]
11643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11644pub unsafe fn vld2_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x2_t) -> int64x1x2_t {
11645    static_assert!(LANE == 0);
11646    unsafe extern "unadjusted" {
11647        #[cfg_attr(
11648            any(target_arch = "aarch64", target_arch = "arm64ec"),
11649            link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0"
11650        )]
11651        fn _vld2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t;
11652    }
11653    _vld2_lane_s64(b.0, b.1, LANE as i64, a as _)
11654}
11655#[doc = "Load multiple 2-element structures to two registers"]
11656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"]
11657#[doc = "## Safety"]
11658#[doc = "  * Neon intrinsic unsafe"]
11659#[inline(always)]
11660#[target_feature(enable = "neon,aes")]
11661#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11662#[rustc_legacy_const_generics(2)]
11663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11664pub unsafe fn vld2_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t {
11665    static_assert!(LANE == 0);
11666    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11667}
11668#[doc = "Load multiple 2-element structures to two registers"]
11669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"]
11670#[doc = "## Safety"]
11671#[doc = "  * Neon intrinsic unsafe"]
11672#[inline(always)]
11673#[target_feature(enable = "neon")]
11674#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11675#[rustc_legacy_const_generics(2)]
11676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11677pub unsafe fn vld2_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t {
11678    static_assert!(LANE == 0);
11679    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11680}
11681#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11683#[doc = "## Safety"]
11684#[doc = "  * Neon intrinsic unsafe"]
11685#[inline(always)]
11686#[cfg(target_endian = "little")]
11687#[target_feature(enable = "neon,aes")]
11688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11689#[cfg_attr(test, assert_instr(ld2r))]
11690pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11691    transmute(vld2q_dup_s64(transmute(a)))
11692}
11693#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11695#[doc = "## Safety"]
11696#[doc = "  * Neon intrinsic unsafe"]
11697#[inline(always)]
11698#[cfg(target_endian = "big")]
11699#[target_feature(enable = "neon,aes")]
11700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11701#[cfg_attr(test, assert_instr(ld2r))]
11702pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11703    let mut ret_val: poly64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11704    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11705    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11706    ret_val
11707}
11708#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11710#[doc = "## Safety"]
11711#[doc = "  * Neon intrinsic unsafe"]
11712#[inline(always)]
11713#[cfg(target_endian = "little")]
11714#[target_feature(enable = "neon")]
11715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11716#[cfg_attr(test, assert_instr(ld2r))]
11717pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11718    transmute(vld2q_dup_s64(transmute(a)))
11719}
11720#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11722#[doc = "## Safety"]
11723#[doc = "  * Neon intrinsic unsafe"]
11724#[inline(always)]
11725#[cfg(target_endian = "big")]
11726#[target_feature(enable = "neon")]
11727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11728#[cfg_attr(test, assert_instr(ld2r))]
11729pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11730    let mut ret_val: uint64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11731    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11732    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11733    ret_val
11734}
11735#[doc = "Load multiple 2-element structures to two registers"]
11736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"]
11737#[doc = "## Safety"]
11738#[doc = "  * Neon intrinsic unsafe"]
11739#[inline(always)]
11740#[target_feature(enable = "neon")]
11741#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11742#[cfg_attr(test, assert_instr(ld2))]
11743pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t {
11744    unsafe extern "unadjusted" {
11745        #[cfg_attr(
11746            any(target_arch = "aarch64", target_arch = "arm64ec"),
11747            link_name = "llvm.aarch64.neon.ld2.v2f64.p0"
11748        )]
11749        fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t;
11750    }
11751    _vld2q_f64(a as _)
11752}
11753#[doc = "Load multiple 2-element structures to two registers"]
11754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"]
11755#[doc = "## Safety"]
11756#[doc = "  * Neon intrinsic unsafe"]
11757#[inline(always)]
11758#[target_feature(enable = "neon")]
11759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11760#[cfg_attr(test, assert_instr(ld2))]
11761pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t {
11762    unsafe extern "unadjusted" {
11763        #[cfg_attr(
11764            any(target_arch = "aarch64", target_arch = "arm64ec"),
11765            link_name = "llvm.aarch64.neon.ld2.v2i64.p0"
11766        )]
11767        fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t;
11768    }
11769    _vld2q_s64(a as _)
11770}
11771#[doc = "Load multiple 2-element structures to two registers"]
11772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"]
11773#[doc = "## Safety"]
11774#[doc = "  * Neon intrinsic unsafe"]
11775#[inline(always)]
11776#[target_feature(enable = "neon")]
11777#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11778#[rustc_legacy_const_generics(2)]
11779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11780pub unsafe fn vld2q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x2_t) -> float64x2x2_t {
11781    static_assert_uimm_bits!(LANE, 1);
11782    unsafe extern "unadjusted" {
11783        #[cfg_attr(
11784            any(target_arch = "aarch64", target_arch = "arm64ec"),
11785            link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0"
11786        )]
11787        fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8)
11788            -> float64x2x2_t;
11789    }
11790    _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _)
11791}
11792#[doc = "Load multiple 2-element structures to two registers"]
11793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"]
11794#[doc = "## Safety"]
11795#[doc = "  * Neon intrinsic unsafe"]
11796#[inline(always)]
11797#[target_feature(enable = "neon")]
11798#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11799#[rustc_legacy_const_generics(2)]
11800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11801pub unsafe fn vld2q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x2_t) -> int8x16x2_t {
11802    static_assert_uimm_bits!(LANE, 4);
11803    unsafe extern "unadjusted" {
11804        #[cfg_attr(
11805            any(target_arch = "aarch64", target_arch = "arm64ec"),
11806            link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0"
11807        )]
11808        fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t;
11809    }
11810    _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _)
11811}
11812#[doc = "Load multiple 2-element structures to two registers"]
11813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"]
11814#[doc = "## Safety"]
11815#[doc = "  * Neon intrinsic unsafe"]
11816#[inline(always)]
11817#[target_feature(enable = "neon")]
11818#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11819#[rustc_legacy_const_generics(2)]
11820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11821pub unsafe fn vld2q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x2_t) -> int64x2x2_t {
11822    static_assert_uimm_bits!(LANE, 1);
11823    unsafe extern "unadjusted" {
11824        #[cfg_attr(
11825            any(target_arch = "aarch64", target_arch = "arm64ec"),
11826            link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0"
11827        )]
11828        fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t;
11829    }
11830    _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _)
11831}
11832#[doc = "Load multiple 2-element structures to two registers"]
11833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"]
11834#[doc = "## Safety"]
11835#[doc = "  * Neon intrinsic unsafe"]
11836#[inline(always)]
11837#[target_feature(enable = "neon,aes")]
11838#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11839#[rustc_legacy_const_generics(2)]
11840#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11841pub unsafe fn vld2q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t {
11842    static_assert_uimm_bits!(LANE, 1);
11843    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11844}
11845#[doc = "Load multiple 2-element structures to two registers"]
11846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"]
11847#[doc = "## Safety"]
11848#[doc = "  * Neon intrinsic unsafe"]
11849#[inline(always)]
11850#[target_feature(enable = "neon")]
11851#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11852#[rustc_legacy_const_generics(2)]
11853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11854pub unsafe fn vld2q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t {
11855    static_assert_uimm_bits!(LANE, 4);
11856    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11857}
11858#[doc = "Load multiple 2-element structures to two registers"]
11859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"]
11860#[doc = "## Safety"]
11861#[doc = "  * Neon intrinsic unsafe"]
11862#[inline(always)]
11863#[target_feature(enable = "neon")]
11864#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11865#[rustc_legacy_const_generics(2)]
11866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11867pub unsafe fn vld2q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t {
11868    static_assert_uimm_bits!(LANE, 1);
11869    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11870}
11871#[doc = "Load multiple 2-element structures to two registers"]
11872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"]
11873#[doc = "## Safety"]
11874#[doc = "  * Neon intrinsic unsafe"]
11875#[inline(always)]
11876#[target_feature(enable = "neon")]
11877#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11878#[rustc_legacy_const_generics(2)]
11879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11880pub unsafe fn vld2q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t {
11881    static_assert_uimm_bits!(LANE, 4);
11882    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11883}
11884#[doc = "Load multiple 2-element structures to two registers"]
11885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11886#[doc = "## Safety"]
11887#[doc = "  * Neon intrinsic unsafe"]
11888#[inline(always)]
11889#[cfg(target_endian = "little")]
11890#[target_feature(enable = "neon,aes")]
11891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11892#[cfg_attr(test, assert_instr(ld2))]
11893pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11894    transmute(vld2q_s64(transmute(a)))
11895}
11896#[doc = "Load multiple 2-element structures to two registers"]
11897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11898#[doc = "## Safety"]
11899#[doc = "  * Neon intrinsic unsafe"]
11900#[inline(always)]
11901#[cfg(target_endian = "big")]
11902#[target_feature(enable = "neon,aes")]
11903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11904#[cfg_attr(test, assert_instr(ld2))]
11905pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11906    let mut ret_val: poly64x2x2_t = transmute(vld2q_s64(transmute(a)));
11907    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11908    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11909    ret_val
11910}
11911#[doc = "Load multiple 2-element structures to two registers"]
11912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11913#[doc = "## Safety"]
11914#[doc = "  * Neon intrinsic unsafe"]
11915#[inline(always)]
11916#[target_feature(enable = "neon")]
11917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11918#[cfg_attr(test, assert_instr(ld2))]
11919pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11920    transmute(vld2q_s64(transmute(a)))
11921}
11922#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"]
11924#[doc = "## Safety"]
11925#[doc = "  * Neon intrinsic unsafe"]
11926#[inline(always)]
11927#[target_feature(enable = "neon")]
11928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11929#[cfg_attr(test, assert_instr(ld3r))]
11930pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t {
11931    unsafe extern "unadjusted" {
11932        #[cfg_attr(
11933            any(target_arch = "aarch64", target_arch = "arm64ec"),
11934            link_name = "llvm.aarch64.neon.ld3r.v1f64.p0"
11935        )]
11936        fn _vld3_dup_f64(ptr: *const f64) -> float64x1x3_t;
11937    }
11938    _vld3_dup_f64(a as _)
11939}
11940#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"]
11942#[doc = "## Safety"]
11943#[doc = "  * Neon intrinsic unsafe"]
11944#[inline(always)]
11945#[target_feature(enable = "neon")]
11946#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11947#[cfg_attr(test, assert_instr(ld3r))]
11948pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t {
11949    unsafe extern "unadjusted" {
11950        #[cfg_attr(
11951            any(target_arch = "aarch64", target_arch = "arm64ec"),
11952            link_name = "llvm.aarch64.neon.ld3r.v2f64.p0"
11953        )]
11954        fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t;
11955    }
11956    _vld3q_dup_f64(a as _)
11957}
11958#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"]
11960#[doc = "## Safety"]
11961#[doc = "  * Neon intrinsic unsafe"]
11962#[inline(always)]
11963#[target_feature(enable = "neon")]
11964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11965#[cfg_attr(test, assert_instr(ld3r))]
11966pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t {
11967    unsafe extern "unadjusted" {
11968        #[cfg_attr(
11969            any(target_arch = "aarch64", target_arch = "arm64ec"),
11970            link_name = "llvm.aarch64.neon.ld3r.v2i64.p0"
11971        )]
11972        fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t;
11973    }
11974    _vld3q_dup_s64(a as _)
11975}
11976#[doc = "Load multiple 3-element structures to three registers"]
11977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"]
11978#[doc = "## Safety"]
11979#[doc = "  * Neon intrinsic unsafe"]
11980#[inline(always)]
11981#[target_feature(enable = "neon")]
11982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11983#[cfg_attr(test, assert_instr(nop))]
11984pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t {
11985    crate::ptr::read_unaligned(a.cast())
11986}
11987#[doc = "Load multiple 3-element structures to three registers"]
11988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"]
11989#[doc = "## Safety"]
11990#[doc = "  * Neon intrinsic unsafe"]
11991#[inline(always)]
11992#[target_feature(enable = "neon")]
11993#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
11994#[rustc_legacy_const_generics(2)]
11995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11996pub unsafe fn vld3_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x3_t) -> float64x1x3_t {
11997    static_assert!(LANE == 0);
11998    unsafe extern "unadjusted" {
11999        #[cfg_attr(
12000            any(target_arch = "aarch64", target_arch = "arm64ec"),
12001            link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0"
12002        )]
12003        fn _vld3_lane_f64(
12004            a: float64x1_t,
12005            b: float64x1_t,
12006            c: float64x1_t,
12007            n: i64,
12008            ptr: *const i8,
12009        ) -> float64x1x3_t;
12010    }
12011    _vld3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12012}
12013#[doc = "Load multiple 3-element structures to three registers"]
12014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"]
12015#[doc = "## Safety"]
12016#[doc = "  * Neon intrinsic unsafe"]
12017#[inline(always)]
12018#[target_feature(enable = "neon,aes")]
12019#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12020#[rustc_legacy_const_generics(2)]
12021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12022pub unsafe fn vld3_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t {
12023    static_assert!(LANE == 0);
12024    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12025}
12026#[doc = "Load multiple 3-element structures to two registers"]
12027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"]
12028#[doc = "## Safety"]
12029#[doc = "  * Neon intrinsic unsafe"]
12030#[inline(always)]
12031#[target_feature(enable = "neon")]
12032#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12033#[rustc_legacy_const_generics(2)]
12034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12035pub unsafe fn vld3_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x3_t) -> int64x1x3_t {
12036    static_assert!(LANE == 0);
12037    unsafe extern "unadjusted" {
12038        #[cfg_attr(
12039            any(target_arch = "aarch64", target_arch = "arm64ec"),
12040            link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0"
12041        )]
12042        fn _vld3_lane_s64(
12043            a: int64x1_t,
12044            b: int64x1_t,
12045            c: int64x1_t,
12046            n: i64,
12047            ptr: *const i8,
12048        ) -> int64x1x3_t;
12049    }
12050    _vld3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12051}
12052#[doc = "Load multiple 3-element structures to three registers"]
12053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"]
12054#[doc = "## Safety"]
12055#[doc = "  * Neon intrinsic unsafe"]
12056#[inline(always)]
12057#[target_feature(enable = "neon")]
12058#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12059#[rustc_legacy_const_generics(2)]
12060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12061pub unsafe fn vld3_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t {
12062    static_assert!(LANE == 0);
12063    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12064}
12065#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12067#[doc = "## Safety"]
12068#[doc = "  * Neon intrinsic unsafe"]
12069#[inline(always)]
12070#[cfg(target_endian = "little")]
12071#[target_feature(enable = "neon,aes")]
12072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12073#[cfg_attr(test, assert_instr(ld3r))]
12074pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12075    transmute(vld3q_dup_s64(transmute(a)))
12076}
12077#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12079#[doc = "## Safety"]
12080#[doc = "  * Neon intrinsic unsafe"]
12081#[inline(always)]
12082#[cfg(target_endian = "big")]
12083#[target_feature(enable = "neon,aes")]
12084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12085#[cfg_attr(test, assert_instr(ld3r))]
12086pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12087    let mut ret_val: poly64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12088    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12089    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12090    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12091    ret_val
12092}
12093#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12095#[doc = "## Safety"]
12096#[doc = "  * Neon intrinsic unsafe"]
12097#[inline(always)]
12098#[cfg(target_endian = "little")]
12099#[target_feature(enable = "neon")]
12100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12101#[cfg_attr(test, assert_instr(ld3r))]
12102pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12103    transmute(vld3q_dup_s64(transmute(a)))
12104}
12105#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12107#[doc = "## Safety"]
12108#[doc = "  * Neon intrinsic unsafe"]
12109#[inline(always)]
12110#[cfg(target_endian = "big")]
12111#[target_feature(enable = "neon")]
12112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12113#[cfg_attr(test, assert_instr(ld3r))]
12114pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12115    let mut ret_val: uint64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12116    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12117    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12118    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12119    ret_val
12120}
12121#[doc = "Load multiple 3-element structures to three registers"]
12122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"]
12123#[doc = "## Safety"]
12124#[doc = "  * Neon intrinsic unsafe"]
12125#[inline(always)]
12126#[target_feature(enable = "neon")]
12127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12128#[cfg_attr(test, assert_instr(ld3))]
12129pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t {
12130    unsafe extern "unadjusted" {
12131        #[cfg_attr(
12132            any(target_arch = "aarch64", target_arch = "arm64ec"),
12133            link_name = "llvm.aarch64.neon.ld3.v2f64.p0"
12134        )]
12135        fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t;
12136    }
12137    _vld3q_f64(a as _)
12138}
12139#[doc = "Load multiple 3-element structures to three registers"]
12140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"]
12141#[doc = "## Safety"]
12142#[doc = "  * Neon intrinsic unsafe"]
12143#[inline(always)]
12144#[target_feature(enable = "neon")]
12145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12146#[cfg_attr(test, assert_instr(ld3))]
12147pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t {
12148    unsafe extern "unadjusted" {
12149        #[cfg_attr(
12150            any(target_arch = "aarch64", target_arch = "arm64ec"),
12151            link_name = "llvm.aarch64.neon.ld3.v2i64.p0"
12152        )]
12153        fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t;
12154    }
12155    _vld3q_s64(a as _)
12156}
12157#[doc = "Load multiple 3-element structures to three registers"]
12158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"]
12159#[doc = "## Safety"]
12160#[doc = "  * Neon intrinsic unsafe"]
12161#[inline(always)]
12162#[target_feature(enable = "neon")]
12163#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12164#[rustc_legacy_const_generics(2)]
12165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12166pub unsafe fn vld3q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x3_t) -> float64x2x3_t {
12167    static_assert_uimm_bits!(LANE, 1);
12168    unsafe extern "unadjusted" {
12169        #[cfg_attr(
12170            any(target_arch = "aarch64", target_arch = "arm64ec"),
12171            link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0"
12172        )]
12173        fn _vld3q_lane_f64(
12174            a: float64x2_t,
12175            b: float64x2_t,
12176            c: float64x2_t,
12177            n: i64,
12178            ptr: *const i8,
12179        ) -> float64x2x3_t;
12180    }
12181    _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12182}
12183#[doc = "Load multiple 3-element structures to three registers"]
12184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"]
12185#[doc = "## Safety"]
12186#[doc = "  * Neon intrinsic unsafe"]
12187#[inline(always)]
12188#[target_feature(enable = "neon,aes")]
12189#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12190#[rustc_legacy_const_generics(2)]
12191#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12192pub unsafe fn vld3q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t {
12193    static_assert_uimm_bits!(LANE, 1);
12194    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12195}
12196#[doc = "Load multiple 3-element structures to two registers"]
12197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"]
12198#[doc = "## Safety"]
12199#[doc = "  * Neon intrinsic unsafe"]
12200#[inline(always)]
12201#[target_feature(enable = "neon")]
12202#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12203#[rustc_legacy_const_generics(2)]
12204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12205pub unsafe fn vld3q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x3_t) -> int8x16x3_t {
12206    static_assert_uimm_bits!(LANE, 3);
12207    unsafe extern "unadjusted" {
12208        #[cfg_attr(
12209            any(target_arch = "aarch64", target_arch = "arm64ec"),
12210            link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0"
12211        )]
12212        fn _vld3q_lane_s8(
12213            a: int8x16_t,
12214            b: int8x16_t,
12215            c: int8x16_t,
12216            n: i64,
12217            ptr: *const i8,
12218        ) -> int8x16x3_t;
12219    }
12220    _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
12221}
12222#[doc = "Load multiple 3-element structures to two registers"]
12223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"]
12224#[doc = "## Safety"]
12225#[doc = "  * Neon intrinsic unsafe"]
12226#[inline(always)]
12227#[target_feature(enable = "neon")]
12228#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12229#[rustc_legacy_const_generics(2)]
12230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12231pub unsafe fn vld3q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x3_t) -> int64x2x3_t {
12232    static_assert_uimm_bits!(LANE, 1);
12233    unsafe extern "unadjusted" {
12234        #[cfg_attr(
12235            any(target_arch = "aarch64", target_arch = "arm64ec"),
12236            link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0"
12237        )]
12238        fn _vld3q_lane_s64(
12239            a: int64x2_t,
12240            b: int64x2_t,
12241            c: int64x2_t,
12242            n: i64,
12243            ptr: *const i8,
12244        ) -> int64x2x3_t;
12245    }
12246    _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12247}
12248#[doc = "Load multiple 3-element structures to three registers"]
12249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"]
12250#[doc = "## Safety"]
12251#[doc = "  * Neon intrinsic unsafe"]
12252#[inline(always)]
12253#[target_feature(enable = "neon")]
12254#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12255#[rustc_legacy_const_generics(2)]
12256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12257pub unsafe fn vld3q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t {
12258    static_assert_uimm_bits!(LANE, 4);
12259    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12260}
12261#[doc = "Load multiple 3-element structures to three registers"]
12262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"]
12263#[doc = "## Safety"]
12264#[doc = "  * Neon intrinsic unsafe"]
12265#[inline(always)]
12266#[target_feature(enable = "neon")]
12267#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12268#[rustc_legacy_const_generics(2)]
12269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12270pub unsafe fn vld3q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t {
12271    static_assert_uimm_bits!(LANE, 1);
12272    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12273}
12274#[doc = "Load multiple 3-element structures to three registers"]
12275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"]
12276#[doc = "## Safety"]
12277#[doc = "  * Neon intrinsic unsafe"]
12278#[inline(always)]
12279#[target_feature(enable = "neon")]
12280#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12281#[rustc_legacy_const_generics(2)]
12282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12283pub unsafe fn vld3q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t {
12284    static_assert_uimm_bits!(LANE, 4);
12285    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12286}
12287#[doc = "Load multiple 3-element structures to three registers"]
12288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12289#[doc = "## Safety"]
12290#[doc = "  * Neon intrinsic unsafe"]
12291#[inline(always)]
12292#[cfg(target_endian = "little")]
12293#[target_feature(enable = "neon,aes")]
12294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12295#[cfg_attr(test, assert_instr(ld3))]
12296pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12297    transmute(vld3q_s64(transmute(a)))
12298}
12299#[doc = "Load multiple 3-element structures to three registers"]
12300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12301#[doc = "## Safety"]
12302#[doc = "  * Neon intrinsic unsafe"]
12303#[inline(always)]
12304#[cfg(target_endian = "big")]
12305#[target_feature(enable = "neon,aes")]
12306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12307#[cfg_attr(test, assert_instr(ld3))]
12308pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12309    let mut ret_val: poly64x2x3_t = transmute(vld3q_s64(transmute(a)));
12310    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12311    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12312    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12313    ret_val
12314}
12315#[doc = "Load multiple 3-element structures to three registers"]
12316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12317#[doc = "## Safety"]
12318#[doc = "  * Neon intrinsic unsafe"]
12319#[inline(always)]
12320#[target_feature(enable = "neon")]
12321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12322#[cfg_attr(test, assert_instr(ld3))]
12323pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12324    transmute(vld3q_s64(transmute(a)))
12325}
12326#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"]
12328#[doc = "## Safety"]
12329#[doc = "  * Neon intrinsic unsafe"]
12330#[inline(always)]
12331#[target_feature(enable = "neon")]
12332#[cfg_attr(test, assert_instr(ld4r))]
12333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12334pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t {
12335    unsafe extern "unadjusted" {
12336        #[cfg_attr(
12337            any(target_arch = "aarch64", target_arch = "arm64ec"),
12338            link_name = "llvm.aarch64.neon.ld4r.v1f64.p0"
12339        )]
12340        fn _vld4_dup_f64(ptr: *const f64) -> float64x1x4_t;
12341    }
12342    _vld4_dup_f64(a as _)
12343}
12344#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"]
12346#[doc = "## Safety"]
12347#[doc = "  * Neon intrinsic unsafe"]
12348#[inline(always)]
12349#[target_feature(enable = "neon")]
12350#[cfg_attr(test, assert_instr(ld4r))]
12351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12352pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t {
12353    unsafe extern "unadjusted" {
12354        #[cfg_attr(
12355            any(target_arch = "aarch64", target_arch = "arm64ec"),
12356            link_name = "llvm.aarch64.neon.ld4r.v2f64.p0"
12357        )]
12358        fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t;
12359    }
12360    _vld4q_dup_f64(a as _)
12361}
12362#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"]
12364#[doc = "## Safety"]
12365#[doc = "  * Neon intrinsic unsafe"]
12366#[inline(always)]
12367#[target_feature(enable = "neon")]
12368#[cfg_attr(test, assert_instr(ld4r))]
12369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12370pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t {
12371    unsafe extern "unadjusted" {
12372        #[cfg_attr(
12373            any(target_arch = "aarch64", target_arch = "arm64ec"),
12374            link_name = "llvm.aarch64.neon.ld4r.v2i64.p0"
12375        )]
12376        fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t;
12377    }
12378    _vld4q_dup_s64(a as _)
12379}
12380#[doc = "Load multiple 4-element structures to four registers"]
12381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"]
12382#[doc = "## Safety"]
12383#[doc = "  * Neon intrinsic unsafe"]
12384#[inline(always)]
12385#[target_feature(enable = "neon")]
12386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12387#[cfg_attr(test, assert_instr(nop))]
12388pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t {
12389    crate::ptr::read_unaligned(a.cast())
12390}
12391#[doc = "Load multiple 4-element structures to four registers"]
12392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"]
12393#[doc = "## Safety"]
12394#[doc = "  * Neon intrinsic unsafe"]
12395#[inline(always)]
12396#[target_feature(enable = "neon")]
12397#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12398#[rustc_legacy_const_generics(2)]
12399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12400pub unsafe fn vld4_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x4_t) -> float64x1x4_t {
12401    static_assert!(LANE == 0);
12402    unsafe extern "unadjusted" {
12403        #[cfg_attr(
12404            any(target_arch = "aarch64", target_arch = "arm64ec"),
12405            link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0"
12406        )]
12407        fn _vld4_lane_f64(
12408            a: float64x1_t,
12409            b: float64x1_t,
12410            c: float64x1_t,
12411            d: float64x1_t,
12412            n: i64,
12413            ptr: *const i8,
12414        ) -> float64x1x4_t;
12415    }
12416    _vld4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12417}
12418#[doc = "Load multiple 4-element structures to four registers"]
12419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"]
12420#[doc = "## Safety"]
12421#[doc = "  * Neon intrinsic unsafe"]
12422#[inline(always)]
12423#[target_feature(enable = "neon")]
12424#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12425#[rustc_legacy_const_generics(2)]
12426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12427pub unsafe fn vld4_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x4_t) -> int64x1x4_t {
12428    static_assert!(LANE == 0);
12429    unsafe extern "unadjusted" {
12430        #[cfg_attr(
12431            any(target_arch = "aarch64", target_arch = "arm64ec"),
12432            link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0"
12433        )]
12434        fn _vld4_lane_s64(
12435            a: int64x1_t,
12436            b: int64x1_t,
12437            c: int64x1_t,
12438            d: int64x1_t,
12439            n: i64,
12440            ptr: *const i8,
12441        ) -> int64x1x4_t;
12442    }
12443    _vld4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12444}
12445#[doc = "Load multiple 4-element structures to four registers"]
12446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"]
12447#[doc = "## Safety"]
12448#[doc = "  * Neon intrinsic unsafe"]
12449#[inline(always)]
12450#[target_feature(enable = "neon,aes")]
12451#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12452#[rustc_legacy_const_generics(2)]
12453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12454pub unsafe fn vld4_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t {
12455    static_assert!(LANE == 0);
12456    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12457}
12458#[doc = "Load multiple 4-element structures to four registers"]
12459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"]
12460#[doc = "## Safety"]
12461#[doc = "  * Neon intrinsic unsafe"]
12462#[inline(always)]
12463#[target_feature(enable = "neon")]
12464#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12465#[rustc_legacy_const_generics(2)]
12466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12467pub unsafe fn vld4_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t {
12468    static_assert!(LANE == 0);
12469    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12470}
12471#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12473#[doc = "## Safety"]
12474#[doc = "  * Neon intrinsic unsafe"]
12475#[inline(always)]
12476#[cfg(target_endian = "little")]
12477#[target_feature(enable = "neon,aes")]
12478#[cfg_attr(test, assert_instr(ld4r))]
12479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12480pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12481    transmute(vld4q_dup_s64(transmute(a)))
12482}
12483#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12485#[doc = "## Safety"]
12486#[doc = "  * Neon intrinsic unsafe"]
12487#[inline(always)]
12488#[cfg(target_endian = "big")]
12489#[target_feature(enable = "neon,aes")]
12490#[cfg_attr(test, assert_instr(ld4r))]
12491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12492pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12493    let mut ret_val: poly64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12494    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12495    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12496    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12497    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12498    ret_val
12499}
12500#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12502#[doc = "## Safety"]
12503#[doc = "  * Neon intrinsic unsafe"]
12504#[inline(always)]
12505#[cfg(target_endian = "little")]
12506#[target_feature(enable = "neon")]
12507#[cfg_attr(test, assert_instr(ld4r))]
12508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12509pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12510    transmute(vld4q_dup_s64(transmute(a)))
12511}
12512#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12514#[doc = "## Safety"]
12515#[doc = "  * Neon intrinsic unsafe"]
12516#[inline(always)]
12517#[cfg(target_endian = "big")]
12518#[target_feature(enable = "neon")]
12519#[cfg_attr(test, assert_instr(ld4r))]
12520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12521pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12522    let mut ret_val: uint64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12523    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12524    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12525    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12526    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12527    ret_val
12528}
12529#[doc = "Load multiple 4-element structures to four registers"]
12530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"]
12531#[doc = "## Safety"]
12532#[doc = "  * Neon intrinsic unsafe"]
12533#[inline(always)]
12534#[target_feature(enable = "neon")]
12535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12536#[cfg_attr(test, assert_instr(ld4))]
12537pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t {
12538    unsafe extern "unadjusted" {
12539        #[cfg_attr(
12540            any(target_arch = "aarch64", target_arch = "arm64ec"),
12541            link_name = "llvm.aarch64.neon.ld4.v2f64.p0"
12542        )]
12543        fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t;
12544    }
12545    _vld4q_f64(a as _)
12546}
12547#[doc = "Load multiple 4-element structures to four registers"]
12548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"]
12549#[doc = "## Safety"]
12550#[doc = "  * Neon intrinsic unsafe"]
12551#[inline(always)]
12552#[target_feature(enable = "neon")]
12553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12554#[cfg_attr(test, assert_instr(ld4))]
12555pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t {
12556    unsafe extern "unadjusted" {
12557        #[cfg_attr(
12558            any(target_arch = "aarch64", target_arch = "arm64ec"),
12559            link_name = "llvm.aarch64.neon.ld4.v2i64.p0"
12560        )]
12561        fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t;
12562    }
12563    _vld4q_s64(a as _)
12564}
12565#[doc = "Load multiple 4-element structures to four registers"]
12566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"]
12567#[doc = "## Safety"]
12568#[doc = "  * Neon intrinsic unsafe"]
12569#[inline(always)]
12570#[target_feature(enable = "neon")]
12571#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12572#[rustc_legacy_const_generics(2)]
12573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12574pub unsafe fn vld4q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x4_t) -> float64x2x4_t {
12575    static_assert_uimm_bits!(LANE, 1);
12576    unsafe extern "unadjusted" {
12577        #[cfg_attr(
12578            any(target_arch = "aarch64", target_arch = "arm64ec"),
12579            link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0"
12580        )]
12581        fn _vld4q_lane_f64(
12582            a: float64x2_t,
12583            b: float64x2_t,
12584            c: float64x2_t,
12585            d: float64x2_t,
12586            n: i64,
12587            ptr: *const i8,
12588        ) -> float64x2x4_t;
12589    }
12590    _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12591}
12592#[doc = "Load multiple 4-element structures to four registers"]
12593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"]
12594#[doc = "## Safety"]
12595#[doc = "  * Neon intrinsic unsafe"]
12596#[inline(always)]
12597#[target_feature(enable = "neon")]
12598#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12599#[rustc_legacy_const_generics(2)]
12600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12601pub unsafe fn vld4q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x4_t) -> int8x16x4_t {
12602    static_assert_uimm_bits!(LANE, 3);
12603    unsafe extern "unadjusted" {
12604        #[cfg_attr(
12605            any(target_arch = "aarch64", target_arch = "arm64ec"),
12606            link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0"
12607        )]
12608        fn _vld4q_lane_s8(
12609            a: int8x16_t,
12610            b: int8x16_t,
12611            c: int8x16_t,
12612            d: int8x16_t,
12613            n: i64,
12614            ptr: *const i8,
12615        ) -> int8x16x4_t;
12616    }
12617    _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12618}
12619#[doc = "Load multiple 4-element structures to four registers"]
12620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"]
12621#[doc = "## Safety"]
12622#[doc = "  * Neon intrinsic unsafe"]
12623#[inline(always)]
12624#[target_feature(enable = "neon")]
12625#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12626#[rustc_legacy_const_generics(2)]
12627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12628pub unsafe fn vld4q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x4_t) -> int64x2x4_t {
12629    static_assert_uimm_bits!(LANE, 1);
12630    unsafe extern "unadjusted" {
12631        #[cfg_attr(
12632            any(target_arch = "aarch64", target_arch = "arm64ec"),
12633            link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0"
12634        )]
12635        fn _vld4q_lane_s64(
12636            a: int64x2_t,
12637            b: int64x2_t,
12638            c: int64x2_t,
12639            d: int64x2_t,
12640            n: i64,
12641            ptr: *const i8,
12642        ) -> int64x2x4_t;
12643    }
12644    _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12645}
12646#[doc = "Load multiple 4-element structures to four registers"]
12647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"]
12648#[doc = "## Safety"]
12649#[doc = "  * Neon intrinsic unsafe"]
12650#[inline(always)]
12651#[target_feature(enable = "neon,aes")]
12652#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12653#[rustc_legacy_const_generics(2)]
12654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12655pub unsafe fn vld4q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t {
12656    static_assert_uimm_bits!(LANE, 1);
12657    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12658}
12659#[doc = "Load multiple 4-element structures to four registers"]
12660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"]
12661#[doc = "## Safety"]
12662#[doc = "  * Neon intrinsic unsafe"]
12663#[inline(always)]
12664#[target_feature(enable = "neon")]
12665#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12666#[rustc_legacy_const_generics(2)]
12667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12668pub unsafe fn vld4q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t {
12669    static_assert_uimm_bits!(LANE, 4);
12670    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12671}
12672#[doc = "Load multiple 4-element structures to four registers"]
12673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"]
12674#[doc = "## Safety"]
12675#[doc = "  * Neon intrinsic unsafe"]
12676#[inline(always)]
12677#[target_feature(enable = "neon")]
12678#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12679#[rustc_legacy_const_generics(2)]
12680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12681pub unsafe fn vld4q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t {
12682    static_assert_uimm_bits!(LANE, 1);
12683    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12684}
12685#[doc = "Load multiple 4-element structures to four registers"]
12686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"]
12687#[doc = "## Safety"]
12688#[doc = "  * Neon intrinsic unsafe"]
12689#[inline(always)]
12690#[target_feature(enable = "neon")]
12691#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12692#[rustc_legacy_const_generics(2)]
12693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12694pub unsafe fn vld4q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t {
12695    static_assert_uimm_bits!(LANE, 4);
12696    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12697}
12698#[doc = "Load multiple 4-element structures to four registers"]
12699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12700#[doc = "## Safety"]
12701#[doc = "  * Neon intrinsic unsafe"]
12702#[inline(always)]
12703#[cfg(target_endian = "little")]
12704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12705#[target_feature(enable = "neon,aes")]
12706#[cfg_attr(test, assert_instr(ld4))]
12707pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12708    transmute(vld4q_s64(transmute(a)))
12709}
12710#[doc = "Load multiple 4-element structures to four registers"]
12711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12712#[doc = "## Safety"]
12713#[doc = "  * Neon intrinsic unsafe"]
12714#[inline(always)]
12715#[cfg(target_endian = "big")]
12716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12717#[target_feature(enable = "neon,aes")]
12718#[cfg_attr(test, assert_instr(ld4))]
12719pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12720    let mut ret_val: poly64x2x4_t = transmute(vld4q_s64(transmute(a)));
12721    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12722    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12723    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12724    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12725    ret_val
12726}
12727#[doc = "Load multiple 4-element structures to four registers"]
12728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12729#[doc = "## Safety"]
12730#[doc = "  * Neon intrinsic unsafe"]
12731#[inline(always)]
12732#[target_feature(enable = "neon")]
12733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12734#[cfg_attr(test, assert_instr(ld4))]
12735pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12736    transmute(vld4q_s64(transmute(a)))
12737}
12738#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_s64)"]
12740#[doc = "## Safety"]
12741#[doc = "  * Neon intrinsic unsafe"]
12742#[inline(always)]
12743#[target_feature(enable = "neon,rcpc3")]
12744#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12745#[rustc_legacy_const_generics(2)]
12746#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12747#[cfg(target_has_atomic = "64")]
12748pub unsafe fn vldap1_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x1_t) -> int64x1_t {
12749    static_assert!(LANE == 0);
12750    let atomic_src = crate::sync::atomic::AtomicI64::from_ptr(ptr as *mut i64);
12751    simd_insert!(
12752        src,
12753        LANE as u32,
12754        atomic_src.load(crate::sync::atomic::Ordering::Acquire)
12755    )
12756}
12757#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_s64)"]
12759#[doc = "## Safety"]
12760#[doc = "  * Neon intrinsic unsafe"]
12761#[inline(always)]
12762#[target_feature(enable = "neon,rcpc3")]
12763#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12764#[rustc_legacy_const_generics(2)]
12765#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12766#[cfg(target_has_atomic = "64")]
12767pub unsafe fn vldap1q_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x2_t) -> int64x2_t {
12768    static_assert_uimm_bits!(LANE, 1);
12769    let atomic_src = crate::sync::atomic::AtomicI64::from_ptr(ptr as *mut i64);
12770    simd_insert!(
12771        src,
12772        LANE as u32,
12773        atomic_src.load(crate::sync::atomic::Ordering::Acquire)
12774    )
12775}
12776#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_f64)"]
12778#[doc = "## Safety"]
12779#[doc = "  * Neon intrinsic unsafe"]
12780#[inline(always)]
12781#[rustc_legacy_const_generics(2)]
12782#[target_feature(enable = "neon,rcpc3")]
12783#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12784#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12785#[cfg(target_has_atomic = "64")]
12786pub unsafe fn vldap1q_lane_f64<const LANE: i32>(ptr: *const f64, src: float64x2_t) -> float64x2_t {
12787    static_assert_uimm_bits!(LANE, 1);
12788    transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12789}
12790#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_u64)"]
12792#[doc = "## Safety"]
12793#[doc = "  * Neon intrinsic unsafe"]
12794#[inline(always)]
12795#[rustc_legacy_const_generics(2)]
12796#[target_feature(enable = "neon,rcpc3")]
12797#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12798#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12799#[cfg(target_has_atomic = "64")]
12800pub unsafe fn vldap1_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x1_t) -> uint64x1_t {
12801    static_assert!(LANE == 0);
12802    transmute(vldap1_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12803}
12804#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_u64)"]
12806#[doc = "## Safety"]
12807#[doc = "  * Neon intrinsic unsafe"]
12808#[inline(always)]
12809#[rustc_legacy_const_generics(2)]
12810#[target_feature(enable = "neon,rcpc3")]
12811#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12812#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12813#[cfg(target_has_atomic = "64")]
12814pub unsafe fn vldap1q_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x2_t) -> uint64x2_t {
12815    static_assert_uimm_bits!(LANE, 1);
12816    transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12817}
12818#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_p64)"]
12820#[doc = "## Safety"]
12821#[doc = "  * Neon intrinsic unsafe"]
12822#[inline(always)]
12823#[rustc_legacy_const_generics(2)]
12824#[target_feature(enable = "neon,rcpc3")]
12825#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12826#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12827#[cfg(target_has_atomic = "64")]
12828pub unsafe fn vldap1_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x1_t) -> poly64x1_t {
12829    static_assert!(LANE == 0);
12830    transmute(vldap1_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12831}
12832#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_p64)"]
12834#[doc = "## Safety"]
12835#[doc = "  * Neon intrinsic unsafe"]
12836#[inline(always)]
12837#[rustc_legacy_const_generics(2)]
12838#[target_feature(enable = "neon,rcpc3")]
12839#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12840#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12841#[cfg(target_has_atomic = "64")]
12842pub unsafe fn vldap1q_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x2_t) -> poly64x2_t {
12843    static_assert_uimm_bits!(LANE, 1);
12844    transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12845}
12846#[doc = "Lookup table read with 2-bit indices"]
12847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_f16)"]
12848#[doc = "## Safety"]
12849#[doc = "  * Neon intrinsic unsafe"]
12850#[inline(always)]
12851#[target_feature(enable = "neon,lut")]
12852#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12853#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12854#[rustc_legacy_const_generics(2)]
12855pub unsafe fn vluti2_lane_f16<const INDEX: i32>(a: float16x4_t, b: uint8x8_t) -> float16x8_t {
12856    static_assert!(INDEX >= 0 && INDEX <= 3);
12857    transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
12858}
12859#[doc = "Lookup table read with 2-bit indices"]
12860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_f16)"]
12861#[doc = "## Safety"]
12862#[doc = "  * Neon intrinsic unsafe"]
12863#[inline(always)]
12864#[target_feature(enable = "neon,lut")]
12865#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12866#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12867#[rustc_legacy_const_generics(2)]
12868pub unsafe fn vluti2q_lane_f16<const INDEX: i32>(a: float16x8_t, b: uint8x8_t) -> float16x8_t {
12869    static_assert!(INDEX >= 0 && INDEX <= 3);
12870    transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
12871}
12872#[doc = "Lookup table read with 2-bit indices"]
12873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u8)"]
12874#[doc = "## Safety"]
12875#[doc = "  * Neon intrinsic unsafe"]
12876#[inline(always)]
12877#[target_feature(enable = "neon,lut")]
12878#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12879#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12880#[rustc_legacy_const_generics(2)]
12881pub unsafe fn vluti2_lane_u8<const INDEX: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t {
12882    static_assert!(INDEX >= 0 && INDEX <= 1);
12883    transmute(vluti2_lane_s8::<INDEX>(transmute(a), b))
12884}
12885#[doc = "Lookup table read with 2-bit indices"]
12886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u8)"]
12887#[doc = "## Safety"]
12888#[doc = "  * Neon intrinsic unsafe"]
12889#[inline(always)]
12890#[target_feature(enable = "neon,lut")]
12891#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12892#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12893#[rustc_legacy_const_generics(2)]
12894pub unsafe fn vluti2q_lane_u8<const INDEX: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
12895    static_assert!(INDEX >= 0 && INDEX <= 1);
12896    transmute(vluti2q_lane_s8::<INDEX>(transmute(a), b))
12897}
12898#[doc = "Lookup table read with 2-bit indices"]
12899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u16)"]
12900#[doc = "## Safety"]
12901#[doc = "  * Neon intrinsic unsafe"]
12902#[inline(always)]
12903#[target_feature(enable = "neon,lut")]
12904#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12905#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12906#[rustc_legacy_const_generics(2)]
12907pub unsafe fn vluti2_lane_u16<const INDEX: i32>(a: uint16x4_t, b: uint8x8_t) -> uint16x8_t {
12908    static_assert!(INDEX >= 0 && INDEX <= 3);
12909    transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
12910}
12911#[doc = "Lookup table read with 2-bit indices"]
12912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u16)"]
12913#[doc = "## Safety"]
12914#[doc = "  * Neon intrinsic unsafe"]
12915#[inline(always)]
12916#[target_feature(enable = "neon,lut")]
12917#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12918#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12919#[rustc_legacy_const_generics(2)]
12920pub unsafe fn vluti2q_lane_u16<const INDEX: i32>(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t {
12921    static_assert!(INDEX >= 0 && INDEX <= 3);
12922    transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
12923}
12924#[doc = "Lookup table read with 2-bit indices"]
12925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p8)"]
12926#[doc = "## Safety"]
12927#[doc = "  * Neon intrinsic unsafe"]
12928#[inline(always)]
12929#[target_feature(enable = "neon,lut")]
12930#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12931#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12932#[rustc_legacy_const_generics(2)]
12933pub unsafe fn vluti2_lane_p8<const INDEX: i32>(a: poly8x8_t, b: uint8x8_t) -> poly8x16_t {
12934    static_assert!(INDEX >= 0 && INDEX <= 1);
12935    transmute(vluti2_lane_s8::<INDEX>(transmute(a), b))
12936}
12937#[doc = "Lookup table read with 2-bit indices"]
12938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p8)"]
12939#[doc = "## Safety"]
12940#[doc = "  * Neon intrinsic unsafe"]
12941#[inline(always)]
12942#[target_feature(enable = "neon,lut")]
12943#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12944#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12945#[rustc_legacy_const_generics(2)]
12946pub unsafe fn vluti2q_lane_p8<const INDEX: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
12947    static_assert!(INDEX >= 0 && INDEX <= 1);
12948    transmute(vluti2q_lane_s8::<INDEX>(transmute(a), b))
12949}
12950#[doc = "Lookup table read with 2-bit indices"]
12951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p16)"]
12952#[doc = "## Safety"]
12953#[doc = "  * Neon intrinsic unsafe"]
12954#[inline(always)]
12955#[target_feature(enable = "neon,lut")]
12956#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12957#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12958#[rustc_legacy_const_generics(2)]
12959pub unsafe fn vluti2_lane_p16<const INDEX: i32>(a: poly16x4_t, b: uint8x8_t) -> poly16x8_t {
12960    static_assert!(INDEX >= 0 && INDEX <= 3);
12961    transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
12962}
12963#[doc = "Lookup table read with 2-bit indices"]
12964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p16)"]
12965#[doc = "## Safety"]
12966#[doc = "  * Neon intrinsic unsafe"]
12967#[inline(always)]
12968#[target_feature(enable = "neon,lut")]
12969#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12970#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12971#[rustc_legacy_const_generics(2)]
12972pub unsafe fn vluti2q_lane_p16<const INDEX: i32>(a: poly16x8_t, b: uint8x8_t) -> poly16x8_t {
12973    static_assert!(INDEX >= 0 && INDEX <= 3);
12974    transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
12975}
12976#[doc = "Lookup table read with 2-bit indices"]
12977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s8)"]
12978#[doc = "## Safety"]
12979#[doc = "  * Neon intrinsic unsafe"]
12980#[inline(always)]
12981#[target_feature(enable = "neon,lut")]
12982#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12983#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12984#[rustc_legacy_const_generics(2)]
12985pub unsafe fn vluti2_lane_s8<const LANE: i32>(a: int8x8_t, b: uint8x8_t) -> int8x16_t {
12986    static_assert!(LANE >= 0 && LANE <= 1);
12987    unsafe extern "unadjusted" {
12988        #[cfg_attr(
12989            any(target_arch = "aarch64", target_arch = "arm64ec"),
12990            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v8i8"
12991        )]
12992        fn _vluti2_lane_s8(a: int8x8_t, b: uint8x8_t, n: i32) -> int8x16_t;
12993    }
12994    _vluti2_lane_s8(a, b, LANE)
12995}
12996#[doc = "Lookup table read with 2-bit indices"]
12997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s8)"]
12998#[doc = "## Safety"]
12999#[doc = "  * Neon intrinsic unsafe"]
13000#[inline(always)]
13001#[target_feature(enable = "neon,lut")]
13002#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13003#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13004#[rustc_legacy_const_generics(2)]
13005pub unsafe fn vluti2q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
13006    static_assert!(LANE >= 0 && LANE <= 1);
13007    unsafe extern "unadjusted" {
13008        #[cfg_attr(
13009            any(target_arch = "aarch64", target_arch = "arm64ec"),
13010            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v16i8"
13011        )]
13012        fn _vluti2q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
13013    }
13014    _vluti2q_lane_s8(a, b, LANE)
13015}
13016#[doc = "Lookup table read with 2-bit indices"]
13017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s16)"]
13018#[doc = "## Safety"]
13019#[doc = "  * Neon intrinsic unsafe"]
13020#[inline(always)]
13021#[target_feature(enable = "neon,lut")]
13022#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13023#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13024#[rustc_legacy_const_generics(2)]
13025pub unsafe fn vluti2_lane_s16<const LANE: i32>(a: int16x4_t, b: uint8x8_t) -> int16x8_t {
13026    static_assert!(LANE >= 0 && LANE <= 3);
13027    unsafe extern "unadjusted" {
13028        #[cfg_attr(
13029            any(target_arch = "aarch64", target_arch = "arm64ec"),
13030            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v4i16"
13031        )]
13032        fn _vluti2_lane_s16(a: int16x4_t, b: uint8x8_t, n: i32) -> int16x8_t;
13033    }
13034    _vluti2_lane_s16(a, b, LANE)
13035}
13036#[doc = "Lookup table read with 2-bit indices"]
13037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s16)"]
13038#[doc = "## Safety"]
13039#[doc = "  * Neon intrinsic unsafe"]
13040#[inline(always)]
13041#[target_feature(enable = "neon,lut")]
13042#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13043#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13044#[rustc_legacy_const_generics(2)]
13045pub unsafe fn vluti2q_lane_s16<const LANE: i32>(a: int16x8_t, b: uint8x8_t) -> int16x8_t {
13046    static_assert!(LANE >= 0 && LANE <= 3);
13047    unsafe extern "unadjusted" {
13048        #[cfg_attr(
13049            any(target_arch = "aarch64", target_arch = "arm64ec"),
13050            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v8i16"
13051        )]
13052        fn _vluti2q_lane_s16(a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
13053    }
13054    _vluti2q_lane_s16(a, b, LANE)
13055}
13056#[doc = "Lookup table read with 2-bit indices"]
13057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_f16)"]
13058#[doc = "## Safety"]
13059#[doc = "  * Neon intrinsic unsafe"]
13060#[inline(always)]
13061#[target_feature(enable = "neon,lut")]
13062#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13063#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13064#[rustc_legacy_const_generics(2)]
13065pub unsafe fn vluti2_laneq_f16<const INDEX: i32>(a: float16x4_t, b: uint8x16_t) -> float16x8_t {
13066    static_assert!(INDEX >= 0 && INDEX <= 7);
13067    transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13068}
13069#[doc = "Lookup table read with 2-bit indices"]
13070#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_f16)"]
13071#[doc = "## Safety"]
13072#[doc = "  * Neon intrinsic unsafe"]
13073#[inline(always)]
13074#[target_feature(enable = "neon,lut")]
13075#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13076#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13077#[rustc_legacy_const_generics(2)]
13078pub unsafe fn vluti2q_laneq_f16<const INDEX: i32>(a: float16x8_t, b: uint8x16_t) -> float16x8_t {
13079    static_assert!(INDEX >= 0 && INDEX <= 7);
13080    transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13081}
13082#[doc = "Lookup table read with 2-bit indices"]
13083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_u8)"]
13084#[doc = "## Safety"]
13085#[doc = "  * Neon intrinsic unsafe"]
13086#[inline(always)]
13087#[target_feature(enable = "neon,lut")]
13088#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13089#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13090#[rustc_legacy_const_generics(2)]
13091pub unsafe fn vluti2_laneq_u8<const INDEX: i32>(a: uint8x8_t, b: uint8x16_t) -> uint8x16_t {
13092    static_assert!(INDEX >= 0 && INDEX <= 3);
13093    transmute(vluti2_laneq_s8::<INDEX>(transmute(a), b))
13094}
13095#[doc = "Lookup table read with 2-bit indices"]
13096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_u8)"]
13097#[doc = "## Safety"]
13098#[doc = "  * Neon intrinsic unsafe"]
13099#[inline(always)]
13100#[target_feature(enable = "neon,lut")]
13101#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13102#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13103#[rustc_legacy_const_generics(2)]
13104pub unsafe fn vluti2q_laneq_u8<const INDEX: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13105    static_assert!(INDEX >= 0 && INDEX <= 3);
13106    transmute(vluti2q_laneq_s8::<INDEX>(transmute(a), b))
13107}
13108#[doc = "Lookup table read with 2-bit indices"]
13109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_u16)"]
13110#[doc = "## Safety"]
13111#[doc = "  * Neon intrinsic unsafe"]
13112#[inline(always)]
13113#[target_feature(enable = "neon,lut")]
13114#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13115#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13116#[rustc_legacy_const_generics(2)]
13117pub unsafe fn vluti2_laneq_u16<const INDEX: i32>(a: uint16x4_t, b: uint8x16_t) -> uint16x8_t {
13118    static_assert!(INDEX >= 0 && INDEX <= 7);
13119    transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13120}
13121#[doc = "Lookup table read with 2-bit indices"]
13122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_u16)"]
13123#[doc = "## Safety"]
13124#[doc = "  * Neon intrinsic unsafe"]
13125#[inline(always)]
13126#[target_feature(enable = "neon,lut")]
13127#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13128#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13129#[rustc_legacy_const_generics(2)]
13130pub unsafe fn vluti2q_laneq_u16<const INDEX: i32>(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
13131    static_assert!(INDEX >= 0 && INDEX <= 7);
13132    transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13133}
13134#[doc = "Lookup table read with 2-bit indices"]
13135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_p8)"]
13136#[doc = "## Safety"]
13137#[doc = "  * Neon intrinsic unsafe"]
13138#[inline(always)]
13139#[target_feature(enable = "neon,lut")]
13140#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13141#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13142#[rustc_legacy_const_generics(2)]
13143pub unsafe fn vluti2_laneq_p8<const INDEX: i32>(a: poly8x8_t, b: uint8x16_t) -> poly8x16_t {
13144    static_assert!(INDEX >= 0 && INDEX <= 3);
13145    transmute(vluti2_laneq_s8::<INDEX>(transmute(a), b))
13146}
13147#[doc = "Lookup table read with 2-bit indices"]
13148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_p8)"]
13149#[doc = "## Safety"]
13150#[doc = "  * Neon intrinsic unsafe"]
13151#[inline(always)]
13152#[target_feature(enable = "neon,lut")]
13153#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13154#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13155#[rustc_legacy_const_generics(2)]
13156pub unsafe fn vluti2q_laneq_p8<const INDEX: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13157    static_assert!(INDEX >= 0 && INDEX <= 3);
13158    transmute(vluti2q_laneq_s8::<INDEX>(transmute(a), b))
13159}
13160#[doc = "Lookup table read with 2-bit indices"]
13161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_p16)"]
13162#[doc = "## Safety"]
13163#[doc = "  * Neon intrinsic unsafe"]
13164#[inline(always)]
13165#[target_feature(enable = "neon,lut")]
13166#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13167#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13168#[rustc_legacy_const_generics(2)]
13169pub unsafe fn vluti2_laneq_p16<const INDEX: i32>(a: poly16x4_t, b: uint8x16_t) -> poly16x8_t {
13170    static_assert!(INDEX >= 0 && INDEX <= 7);
13171    transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13172}
13173#[doc = "Lookup table read with 2-bit indices"]
13174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_p16)"]
13175#[doc = "## Safety"]
13176#[doc = "  * Neon intrinsic unsafe"]
13177#[inline(always)]
13178#[target_feature(enable = "neon,lut")]
13179#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13180#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13181#[rustc_legacy_const_generics(2)]
13182pub unsafe fn vluti2q_laneq_p16<const INDEX: i32>(a: poly16x8_t, b: uint8x16_t) -> poly16x8_t {
13183    static_assert!(INDEX >= 0 && INDEX <= 7);
13184    transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13185}
13186#[doc = "Lookup table read with 2-bit indices"]
13187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_s8)"]
13188#[doc = "## Safety"]
13189#[doc = "  * Neon intrinsic unsafe"]
13190#[inline(always)]
13191#[target_feature(enable = "neon,lut")]
13192#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13193#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13194#[rustc_legacy_const_generics(2)]
13195pub unsafe fn vluti2_laneq_s8<const INDEX: i32>(a: int8x8_t, b: uint8x16_t) -> int8x16_t {
13196    static_assert!(INDEX >= 0 && INDEX <= 3);
13197    unsafe extern "unadjusted" {
13198        #[cfg_attr(
13199            any(target_arch = "aarch64", target_arch = "arm64ec"),
13200            link_name = "llvm.aarch64.neon.vluti2.laneq.v16i8.v8i8"
13201        )]
13202        fn _vluti2_laneq_s8(a: int8x8_t, b: uint8x16_t, n: i32) -> int8x16_t;
13203    }
13204    _vluti2_laneq_s8(a, b, INDEX)
13205}
13206#[doc = "Lookup table read with 2-bit indices"]
13207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_s8)"]
13208#[doc = "## Safety"]
13209#[doc = "  * Neon intrinsic unsafe"]
13210#[inline(always)]
13211#[target_feature(enable = "neon,lut")]
13212#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13213#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13214#[rustc_legacy_const_generics(2)]
13215pub unsafe fn vluti2q_laneq_s8<const INDEX: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13216    static_assert!(INDEX >= 0 && INDEX <= 3);
13217    unsafe extern "unadjusted" {
13218        #[cfg_attr(
13219            any(target_arch = "aarch64", target_arch = "arm64ec"),
13220            link_name = "llvm.aarch64.neon.vluti2.laneq.v16i8.v16i8"
13221        )]
13222        fn _vluti2q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13223    }
13224    _vluti2q_laneq_s8(a, b, INDEX)
13225}
13226#[doc = "Lookup table read with 2-bit indices"]
13227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_s16)"]
13228#[doc = "## Safety"]
13229#[doc = "  * Neon intrinsic unsafe"]
13230#[inline(always)]
13231#[target_feature(enable = "neon,lut")]
13232#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13233#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13234#[rustc_legacy_const_generics(2)]
13235pub unsafe fn vluti2_laneq_s16<const INDEX: i32>(a: int16x4_t, b: uint8x16_t) -> int16x8_t {
13236    static_assert!(INDEX >= 0 && INDEX <= 7);
13237    unsafe extern "unadjusted" {
13238        #[cfg_attr(
13239            any(target_arch = "aarch64", target_arch = "arm64ec"),
13240            link_name = "llvm.aarch64.neon.vluti2.laneq.v8i16.v4i16"
13241        )]
13242        fn _vluti2_laneq_s16(a: int16x4_t, b: uint8x16_t, n: i32) -> int16x8_t;
13243    }
13244    _vluti2_laneq_s16(a, b, INDEX)
13245}
13246#[doc = "Lookup table read with 2-bit indices"]
13247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_s16)"]
13248#[doc = "## Safety"]
13249#[doc = "  * Neon intrinsic unsafe"]
13250#[inline(always)]
13251#[target_feature(enable = "neon,lut")]
13252#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13253#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13254#[rustc_legacy_const_generics(2)]
13255pub unsafe fn vluti2q_laneq_s16<const INDEX: i32>(a: int16x8_t, b: uint8x16_t) -> int16x8_t {
13256    static_assert!(INDEX >= 0 && INDEX <= 7);
13257    unsafe extern "unadjusted" {
13258        #[cfg_attr(
13259            any(target_arch = "aarch64", target_arch = "arm64ec"),
13260            link_name = "llvm.aarch64.neon.vluti2.laneq.v8i16.v8i16"
13261        )]
13262        fn _vluti2q_laneq_s16(a: int16x8_t, b: uint8x16_t, n: i32) -> int16x8_t;
13263    }
13264    _vluti2q_laneq_s16(a, b, INDEX)
13265}
13266#[doc = "Lookup table read with 4-bit indices"]
13267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_f16_x2)"]
13268#[doc = "## Safety"]
13269#[doc = "  * Neon intrinsic unsafe"]
13270#[inline(always)]
13271#[target_feature(enable = "neon,lut,fp16")]
13272#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13273#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13274#[rustc_legacy_const_generics(2)]
13275pub unsafe fn vluti4q_lane_f16_x2<const LANE: i32>(a: float16x8x2_t, b: uint8x8_t) -> float16x8_t {
13276    static_assert!(LANE >= 0 && LANE <= 1);
13277    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13278}
13279#[doc = "Lookup table read with 4-bit indices"]
13280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u16_x2)"]
13281#[doc = "## Safety"]
13282#[doc = "  * Neon intrinsic unsafe"]
13283#[inline(always)]
13284#[target_feature(enable = "neon,lut")]
13285#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13286#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13287#[rustc_legacy_const_generics(2)]
13288pub unsafe fn vluti4q_lane_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x8_t) -> uint16x8_t {
13289    static_assert!(LANE >= 0 && LANE <= 1);
13290    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13291}
13292#[doc = "Lookup table read with 4-bit indices"]
13293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p16_x2)"]
13294#[doc = "## Safety"]
13295#[doc = "  * Neon intrinsic unsafe"]
13296#[inline(always)]
13297#[target_feature(enable = "neon,lut")]
13298#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13299#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13300#[rustc_legacy_const_generics(2)]
13301pub unsafe fn vluti4q_lane_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x8_t) -> poly16x8_t {
13302    static_assert!(LANE >= 0 && LANE <= 1);
13303    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13304}
13305#[doc = "Lookup table read with 4-bit indices"]
13306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s16_x2)"]
13307#[doc = "## Safety"]
13308#[doc = "  * Neon intrinsic unsafe"]
13309#[inline(always)]
13310#[target_feature(enable = "neon,lut")]
13311#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13312#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13313#[rustc_legacy_const_generics(2)]
13314pub unsafe fn vluti4q_lane_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x8_t) -> int16x8_t {
13315    static_assert!(LANE >= 0 && LANE <= 1);
13316    unsafe extern "unadjusted" {
13317        #[cfg_attr(
13318            any(target_arch = "aarch64", target_arch = "arm64ec"),
13319            link_name = "llvm.aarch64.neon.vluti4q.lane.x2.v8i16"
13320        )]
13321        fn _vluti4q_lane_s16_x2(a: int16x8_t, a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
13322    }
13323    _vluti4q_lane_s16_x2(a.0, a.1, b, LANE)
13324}
13325#[doc = "Lookup table read with 4-bit indices"]
13326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s8)"]
13327#[doc = "## Safety"]
13328#[doc = "  * Neon intrinsic unsafe"]
13329#[inline(always)]
13330#[target_feature(enable = "neon,lut")]
13331#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13332#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13333#[rustc_legacy_const_generics(2)]
13334pub unsafe fn vluti4q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
13335    static_assert!(LANE == 0);
13336    unsafe extern "unadjusted" {
13337        #[cfg_attr(
13338            any(target_arch = "aarch64", target_arch = "arm64ec"),
13339            link_name = "llvm.aarch64.neon.vluti4q.lane.v8i8"
13340        )]
13341        fn _vluti4q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
13342    }
13343    _vluti4q_lane_s8(a, b, LANE)
13344}
13345#[doc = "Lookup table read with 4-bit indices"]
13346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u8)"]
13347#[doc = "## Safety"]
13348#[doc = "  * Neon intrinsic unsafe"]
13349#[inline(always)]
13350#[target_feature(enable = "neon,lut")]
13351#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13352#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13353#[rustc_legacy_const_generics(2)]
13354pub unsafe fn vluti4q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
13355    static_assert!(LANE == 0);
13356    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13357}
13358#[doc = "Lookup table read with 4-bit indices"]
13359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p8)"]
13360#[doc = "## Safety"]
13361#[doc = "  * Neon intrinsic unsafe"]
13362#[inline(always)]
13363#[target_feature(enable = "neon,lut")]
13364#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13365#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13366#[rustc_legacy_const_generics(2)]
13367pub unsafe fn vluti4q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
13368    static_assert!(LANE == 0);
13369    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13370}
13371#[doc = "Lookup table read with 4-bit indices"]
13372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_f16_x2)"]
13373#[doc = "## Safety"]
13374#[doc = "  * Neon intrinsic unsafe"]
13375#[inline(always)]
13376#[target_feature(enable = "neon,lut,fp16")]
13377#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13378#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13379#[rustc_legacy_const_generics(2)]
13380pub unsafe fn vluti4q_laneq_f16_x2<const LANE: i32>(
13381    a: float16x8x2_t,
13382    b: uint8x16_t,
13383) -> float16x8_t {
13384    static_assert!(LANE >= 0 && LANE <= 3);
13385    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13386}
13387#[doc = "Lookup table read with 4-bit indices"]
13388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u16_x2)"]
13389#[doc = "## Safety"]
13390#[doc = "  * Neon intrinsic unsafe"]
13391#[inline(always)]
13392#[target_feature(enable = "neon,lut")]
13393#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13394#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13395#[rustc_legacy_const_generics(2)]
13396pub unsafe fn vluti4q_laneq_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x16_t) -> uint16x8_t {
13397    static_assert!(LANE >= 0 && LANE <= 3);
13398    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13399}
13400#[doc = "Lookup table read with 4-bit indices"]
13401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p16_x2)"]
13402#[doc = "## Safety"]
13403#[doc = "  * Neon intrinsic unsafe"]
13404#[inline(always)]
13405#[target_feature(enable = "neon,lut")]
13406#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13407#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13408#[rustc_legacy_const_generics(2)]
13409pub unsafe fn vluti4q_laneq_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x16_t) -> poly16x8_t {
13410    static_assert!(LANE >= 0 && LANE <= 3);
13411    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13412}
13413#[doc = "Lookup table read with 4-bit indices"]
13414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s16_x2)"]
13415#[doc = "## Safety"]
13416#[doc = "  * Neon intrinsic unsafe"]
13417#[inline(always)]
13418#[target_feature(enable = "neon,lut")]
13419#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13420#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13421#[rustc_legacy_const_generics(2)]
13422pub unsafe fn vluti4q_laneq_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x16_t) -> int16x8_t {
13423    static_assert!(LANE >= 0 && LANE <= 3);
13424    unsafe extern "unadjusted" {
13425        #[cfg_attr(
13426            any(target_arch = "aarch64", target_arch = "arm64ec"),
13427            link_name = "llvm.aarch64.neon.vluti4q.laneq.x2.v8i16"
13428        )]
13429        fn _vluti4q_laneq_s16_x2(a: int16x8_t, b: int16x8_t, c: uint8x16_t, n: i32) -> int16x8_t;
13430    }
13431    _vluti4q_laneq_s16_x2(a.0, a.1, b, LANE)
13432}
13433#[doc = "Lookup table read with 4-bit indices"]
13434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s8)"]
13435#[doc = "## Safety"]
13436#[doc = "  * Neon intrinsic unsafe"]
13437#[inline(always)]
13438#[target_feature(enable = "neon,lut")]
13439#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13440#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13441#[rustc_legacy_const_generics(2)]
13442pub unsafe fn vluti4q_laneq_s8<const LANE: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13443    static_assert!(LANE >= 0 && LANE <= 1);
13444    unsafe extern "unadjusted" {
13445        #[cfg_attr(
13446            any(target_arch = "aarch64", target_arch = "arm64ec"),
13447            link_name = "llvm.aarch64.neon.vluti4q.laneq.v16i8"
13448        )]
13449        fn _vluti4q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13450    }
13451    _vluti4q_laneq_s8(a, b, LANE)
13452}
13453#[doc = "Lookup table read with 4-bit indices"]
13454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u8)"]
13455#[doc = "## Safety"]
13456#[doc = "  * Neon intrinsic unsafe"]
13457#[inline(always)]
13458#[target_feature(enable = "neon,lut")]
13459#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13460#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13461#[rustc_legacy_const_generics(2)]
13462pub unsafe fn vluti4q_laneq_u8<const LANE: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13463    static_assert!(LANE >= 0 && LANE <= 1);
13464    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13465}
13466#[doc = "Lookup table read with 4-bit indices"]
13467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p8)"]
13468#[doc = "## Safety"]
13469#[doc = "  * Neon intrinsic unsafe"]
13470#[inline(always)]
13471#[target_feature(enable = "neon,lut")]
13472#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13473#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13474#[rustc_legacy_const_generics(2)]
13475pub unsafe fn vluti4q_laneq_p8<const LANE: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13476    static_assert!(LANE >= 0 && LANE <= 1);
13477    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13478}
13479#[doc = "Maximum (vector)"]
13480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"]
13481#[inline(always)]
13482#[target_feature(enable = "neon")]
13483#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13484#[cfg_attr(test, assert_instr(fmax))]
13485pub fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13486    unsafe extern "unadjusted" {
13487        #[cfg_attr(
13488            any(target_arch = "aarch64", target_arch = "arm64ec"),
13489            link_name = "llvm.aarch64.neon.fmax.v1f64"
13490        )]
13491        fn _vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13492    }
13493    unsafe { _vmax_f64(a, b) }
13494}
13495#[doc = "Maximum (vector)"]
13496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"]
13497#[inline(always)]
13498#[target_feature(enable = "neon")]
13499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13500#[cfg_attr(test, assert_instr(fmax))]
13501pub fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13502    unsafe extern "unadjusted" {
13503        #[cfg_attr(
13504            any(target_arch = "aarch64", target_arch = "arm64ec"),
13505            link_name = "llvm.aarch64.neon.fmax.v2f64"
13506        )]
13507        fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13508    }
13509    unsafe { _vmaxq_f64(a, b) }
13510}
13511#[doc = "Maximum (vector)"]
13512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxh_f16)"]
13513#[inline(always)]
13514#[target_feature(enable = "neon,fp16")]
13515#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13516#[cfg(not(target_arch = "arm64ec"))]
13517#[cfg_attr(test, assert_instr(fmax))]
13518pub fn vmaxh_f16(a: f16, b: f16) -> f16 {
13519    unsafe extern "unadjusted" {
13520        #[cfg_attr(
13521            any(target_arch = "aarch64", target_arch = "arm64ec"),
13522            link_name = "llvm.aarch64.neon.fmax.f16"
13523        )]
13524        fn _vmaxh_f16(a: f16, b: f16) -> f16;
13525    }
13526    unsafe { _vmaxh_f16(a, b) }
13527}
13528#[doc = "Floating-point Maximum Number (vector)"]
13529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"]
13530#[inline(always)]
13531#[target_feature(enable = "neon")]
13532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13533#[cfg_attr(test, assert_instr(fmaxnm))]
13534pub fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13535    unsafe { simd_fmax(a, b) }
13536}
13537#[doc = "Floating-point Maximum Number (vector)"]
13538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"]
13539#[inline(always)]
13540#[target_feature(enable = "neon")]
13541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13542#[cfg_attr(test, assert_instr(fmaxnm))]
13543pub fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13544    unsafe { simd_fmax(a, b) }
13545}
13546#[doc = "Floating-point Maximum Number"]
13547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmh_f16)"]
13548#[inline(always)]
13549#[target_feature(enable = "neon,fp16")]
13550#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13551#[cfg(not(target_arch = "arm64ec"))]
13552#[cfg_attr(test, assert_instr(fmaxnm))]
13553pub fn vmaxnmh_f16(a: f16, b: f16) -> f16 {
13554    f16::max(a, b)
13555}
13556#[doc = "Floating-point maximum number across vector"]
13557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f16)"]
13558#[inline(always)]
13559#[target_feature(enable = "neon,fp16")]
13560#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13561#[cfg(not(target_arch = "arm64ec"))]
13562#[cfg_attr(test, assert_instr(fmaxnmv))]
13563pub fn vmaxnmv_f16(a: float16x4_t) -> f16 {
13564    unsafe { simd_reduce_max(a) }
13565}
13566#[doc = "Floating-point maximum number across vector"]
13567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f16)"]
13568#[inline(always)]
13569#[target_feature(enable = "neon,fp16")]
13570#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13571#[cfg(not(target_arch = "arm64ec"))]
13572#[cfg_attr(test, assert_instr(fmaxnmv))]
13573pub fn vmaxnmvq_f16(a: float16x8_t) -> f16 {
13574    unsafe { simd_reduce_max(a) }
13575}
13576#[doc = "Floating-point maximum number across vector"]
13577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"]
13578#[inline(always)]
13579#[target_feature(enable = "neon")]
13580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13581#[cfg_attr(test, assert_instr(fmaxnmp))]
13582pub fn vmaxnmv_f32(a: float32x2_t) -> f32 {
13583    unsafe { simd_reduce_max(a) }
13584}
13585#[doc = "Floating-point maximum number across vector"]
13586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"]
13587#[inline(always)]
13588#[target_feature(enable = "neon")]
13589#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13590#[cfg_attr(test, assert_instr(fmaxnmp))]
13591pub fn vmaxnmvq_f64(a: float64x2_t) -> f64 {
13592    unsafe { simd_reduce_max(a) }
13593}
13594#[doc = "Floating-point maximum number across vector"]
13595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"]
13596#[inline(always)]
13597#[target_feature(enable = "neon")]
13598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13599#[cfg_attr(test, assert_instr(fmaxnmv))]
13600pub fn vmaxnmvq_f32(a: float32x4_t) -> f32 {
13601    unsafe { simd_reduce_max(a) }
13602}
13603#[doc = "Floating-point maximum number across vector"]
13604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f16)"]
13605#[inline(always)]
13606#[target_feature(enable = "neon,fp16")]
13607#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13608#[cfg(not(target_arch = "arm64ec"))]
13609#[cfg_attr(test, assert_instr(fmaxv))]
13610pub fn vmaxv_f16(a: float16x4_t) -> f16 {
13611    unsafe extern "unadjusted" {
13612        #[cfg_attr(
13613            any(target_arch = "aarch64", target_arch = "arm64ec"),
13614            link_name = "llvm.aarch64.neon.fmaxv.f16.v4f16"
13615        )]
13616        fn _vmaxv_f16(a: float16x4_t) -> f16;
13617    }
13618    unsafe { _vmaxv_f16(a) }
13619}
13620#[doc = "Floating-point maximum number across vector"]
13621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f16)"]
13622#[inline(always)]
13623#[target_feature(enable = "neon,fp16")]
13624#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13625#[cfg(not(target_arch = "arm64ec"))]
13626#[cfg_attr(test, assert_instr(fmaxv))]
13627pub fn vmaxvq_f16(a: float16x8_t) -> f16 {
13628    unsafe extern "unadjusted" {
13629        #[cfg_attr(
13630            any(target_arch = "aarch64", target_arch = "arm64ec"),
13631            link_name = "llvm.aarch64.neon.fmaxv.f16.v8f16"
13632        )]
13633        fn _vmaxvq_f16(a: float16x8_t) -> f16;
13634    }
13635    unsafe { _vmaxvq_f16(a) }
13636}
13637#[doc = "Horizontal vector max."]
13638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"]
13639#[inline(always)]
13640#[target_feature(enable = "neon")]
13641#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13642#[cfg_attr(test, assert_instr(fmaxp))]
13643pub fn vmaxv_f32(a: float32x2_t) -> f32 {
13644    unsafe extern "unadjusted" {
13645        #[cfg_attr(
13646            any(target_arch = "aarch64", target_arch = "arm64ec"),
13647            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
13648        )]
13649        fn _vmaxv_f32(a: float32x2_t) -> f32;
13650    }
13651    unsafe { _vmaxv_f32(a) }
13652}
13653#[doc = "Horizontal vector max."]
13654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"]
13655#[inline(always)]
13656#[target_feature(enable = "neon")]
13657#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13658#[cfg_attr(test, assert_instr(fmaxv))]
13659pub fn vmaxvq_f32(a: float32x4_t) -> f32 {
13660    unsafe extern "unadjusted" {
13661        #[cfg_attr(
13662            any(target_arch = "aarch64", target_arch = "arm64ec"),
13663            link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32"
13664        )]
13665        fn _vmaxvq_f32(a: float32x4_t) -> f32;
13666    }
13667    unsafe { _vmaxvq_f32(a) }
13668}
13669#[doc = "Horizontal vector max."]
13670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"]
13671#[inline(always)]
13672#[target_feature(enable = "neon")]
13673#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13674#[cfg_attr(test, assert_instr(fmaxp))]
13675pub fn vmaxvq_f64(a: float64x2_t) -> f64 {
13676    unsafe extern "unadjusted" {
13677        #[cfg_attr(
13678            any(target_arch = "aarch64", target_arch = "arm64ec"),
13679            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
13680        )]
13681        fn _vmaxvq_f64(a: float64x2_t) -> f64;
13682    }
13683    unsafe { _vmaxvq_f64(a) }
13684}
13685#[doc = "Horizontal vector max."]
13686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"]
13687#[inline(always)]
13688#[target_feature(enable = "neon")]
13689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13690#[cfg_attr(test, assert_instr(smaxv))]
13691pub fn vmaxv_s8(a: int8x8_t) -> i8 {
13692    unsafe { simd_reduce_max(a) }
13693}
13694#[doc = "Horizontal vector max."]
13695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"]
13696#[inline(always)]
13697#[target_feature(enable = "neon")]
13698#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13699#[cfg_attr(test, assert_instr(smaxv))]
13700pub fn vmaxvq_s8(a: int8x16_t) -> i8 {
13701    unsafe { simd_reduce_max(a) }
13702}
13703#[doc = "Horizontal vector max."]
13704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"]
13705#[inline(always)]
13706#[target_feature(enable = "neon")]
13707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13708#[cfg_attr(test, assert_instr(smaxv))]
13709pub fn vmaxv_s16(a: int16x4_t) -> i16 {
13710    unsafe { simd_reduce_max(a) }
13711}
13712#[doc = "Horizontal vector max."]
13713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"]
13714#[inline(always)]
13715#[target_feature(enable = "neon")]
13716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13717#[cfg_attr(test, assert_instr(smaxv))]
13718pub fn vmaxvq_s16(a: int16x8_t) -> i16 {
13719    unsafe { simd_reduce_max(a) }
13720}
13721#[doc = "Horizontal vector max."]
13722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"]
13723#[inline(always)]
13724#[target_feature(enable = "neon")]
13725#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13726#[cfg_attr(test, assert_instr(smaxp))]
13727pub fn vmaxv_s32(a: int32x2_t) -> i32 {
13728    unsafe { simd_reduce_max(a) }
13729}
13730#[doc = "Horizontal vector max."]
13731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"]
13732#[inline(always)]
13733#[target_feature(enable = "neon")]
13734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13735#[cfg_attr(test, assert_instr(smaxv))]
13736pub fn vmaxvq_s32(a: int32x4_t) -> i32 {
13737    unsafe { simd_reduce_max(a) }
13738}
13739#[doc = "Horizontal vector max."]
13740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"]
13741#[inline(always)]
13742#[target_feature(enable = "neon")]
13743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13744#[cfg_attr(test, assert_instr(umaxv))]
13745pub fn vmaxv_u8(a: uint8x8_t) -> u8 {
13746    unsafe { simd_reduce_max(a) }
13747}
13748#[doc = "Horizontal vector max."]
13749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"]
13750#[inline(always)]
13751#[target_feature(enable = "neon")]
13752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13753#[cfg_attr(test, assert_instr(umaxv))]
13754pub fn vmaxvq_u8(a: uint8x16_t) -> u8 {
13755    unsafe { simd_reduce_max(a) }
13756}
13757#[doc = "Horizontal vector max."]
13758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"]
13759#[inline(always)]
13760#[target_feature(enable = "neon")]
13761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13762#[cfg_attr(test, assert_instr(umaxv))]
13763pub fn vmaxv_u16(a: uint16x4_t) -> u16 {
13764    unsafe { simd_reduce_max(a) }
13765}
13766#[doc = "Horizontal vector max."]
13767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"]
13768#[inline(always)]
13769#[target_feature(enable = "neon")]
13770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13771#[cfg_attr(test, assert_instr(umaxv))]
13772pub fn vmaxvq_u16(a: uint16x8_t) -> u16 {
13773    unsafe { simd_reduce_max(a) }
13774}
13775#[doc = "Horizontal vector max."]
13776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"]
13777#[inline(always)]
13778#[target_feature(enable = "neon")]
13779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13780#[cfg_attr(test, assert_instr(umaxp))]
13781pub fn vmaxv_u32(a: uint32x2_t) -> u32 {
13782    unsafe { simd_reduce_max(a) }
13783}
13784#[doc = "Horizontal vector max."]
13785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"]
13786#[inline(always)]
13787#[target_feature(enable = "neon")]
13788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13789#[cfg_attr(test, assert_instr(umaxv))]
13790pub fn vmaxvq_u32(a: uint32x4_t) -> u32 {
13791    unsafe { simd_reduce_max(a) }
13792}
13793#[doc = "Minimum (vector)"]
13794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"]
13795#[inline(always)]
13796#[target_feature(enable = "neon")]
13797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13798#[cfg_attr(test, assert_instr(fmin))]
13799pub fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13800    unsafe extern "unadjusted" {
13801        #[cfg_attr(
13802            any(target_arch = "aarch64", target_arch = "arm64ec"),
13803            link_name = "llvm.aarch64.neon.fmin.v1f64"
13804        )]
13805        fn _vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13806    }
13807    unsafe { _vmin_f64(a, b) }
13808}
13809#[doc = "Minimum (vector)"]
13810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"]
13811#[inline(always)]
13812#[target_feature(enable = "neon")]
13813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13814#[cfg_attr(test, assert_instr(fmin))]
13815pub fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13816    unsafe extern "unadjusted" {
13817        #[cfg_attr(
13818            any(target_arch = "aarch64", target_arch = "arm64ec"),
13819            link_name = "llvm.aarch64.neon.fmin.v2f64"
13820        )]
13821        fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13822    }
13823    unsafe { _vminq_f64(a, b) }
13824}
13825#[doc = "Minimum (vector)"]
13826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminh_f16)"]
13827#[inline(always)]
13828#[target_feature(enable = "neon,fp16")]
13829#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13830#[cfg(not(target_arch = "arm64ec"))]
13831#[cfg_attr(test, assert_instr(fmin))]
13832pub fn vminh_f16(a: f16, b: f16) -> f16 {
13833    unsafe extern "unadjusted" {
13834        #[cfg_attr(
13835            any(target_arch = "aarch64", target_arch = "arm64ec"),
13836            link_name = "llvm.aarch64.neon.fmin.f16"
13837        )]
13838        fn _vminh_f16(a: f16, b: f16) -> f16;
13839    }
13840    unsafe { _vminh_f16(a, b) }
13841}
13842#[doc = "Floating-point Minimum Number (vector)"]
13843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"]
13844#[inline(always)]
13845#[target_feature(enable = "neon")]
13846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13847#[cfg_attr(test, assert_instr(fminnm))]
13848pub fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13849    unsafe { simd_fmin(a, b) }
13850}
13851#[doc = "Floating-point Minimum Number (vector)"]
13852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"]
13853#[inline(always)]
13854#[target_feature(enable = "neon")]
13855#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13856#[cfg_attr(test, assert_instr(fminnm))]
13857pub fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13858    unsafe { simd_fmin(a, b) }
13859}
13860#[doc = "Floating-point Minimum Number"]
13861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmh_f16)"]
13862#[inline(always)]
13863#[target_feature(enable = "neon,fp16")]
13864#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13865#[cfg(not(target_arch = "arm64ec"))]
13866#[cfg_attr(test, assert_instr(fminnm))]
13867pub fn vminnmh_f16(a: f16, b: f16) -> f16 {
13868    f16::min(a, b)
13869}
13870#[doc = "Floating-point minimum number across vector"]
13871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f16)"]
13872#[inline(always)]
13873#[target_feature(enable = "neon,fp16")]
13874#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13875#[cfg(not(target_arch = "arm64ec"))]
13876#[cfg_attr(test, assert_instr(fminnmv))]
13877pub fn vminnmv_f16(a: float16x4_t) -> f16 {
13878    unsafe { simd_reduce_min(a) }
13879}
13880#[doc = "Floating-point minimum number across vector"]
13881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f16)"]
13882#[inline(always)]
13883#[target_feature(enable = "neon,fp16")]
13884#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13885#[cfg(not(target_arch = "arm64ec"))]
13886#[cfg_attr(test, assert_instr(fminnmv))]
13887pub fn vminnmvq_f16(a: float16x8_t) -> f16 {
13888    unsafe { simd_reduce_min(a) }
13889}
13890#[doc = "Floating-point minimum number across vector"]
13891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"]
13892#[inline(always)]
13893#[target_feature(enable = "neon")]
13894#[cfg_attr(test, assert_instr(fminnmp))]
13895#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13896pub fn vminnmv_f32(a: float32x2_t) -> f32 {
13897    unsafe { simd_reduce_min(a) }
13898}
13899#[doc = "Floating-point minimum number across vector"]
13900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"]
13901#[inline(always)]
13902#[target_feature(enable = "neon")]
13903#[cfg_attr(test, assert_instr(fminnmp))]
13904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13905pub fn vminnmvq_f64(a: float64x2_t) -> f64 {
13906    unsafe { simd_reduce_min(a) }
13907}
13908#[doc = "Floating-point minimum number across vector"]
13909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"]
13910#[inline(always)]
13911#[target_feature(enable = "neon")]
13912#[cfg_attr(test, assert_instr(fminnmv))]
13913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13914pub fn vminnmvq_f32(a: float32x4_t) -> f32 {
13915    unsafe { simd_reduce_min(a) }
13916}
13917#[doc = "Floating-point minimum number across vector"]
13918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f16)"]
13919#[inline(always)]
13920#[target_feature(enable = "neon,fp16")]
13921#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13922#[cfg(not(target_arch = "arm64ec"))]
13923#[cfg_attr(test, assert_instr(fminv))]
13924pub fn vminv_f16(a: float16x4_t) -> f16 {
13925    unsafe extern "unadjusted" {
13926        #[cfg_attr(
13927            any(target_arch = "aarch64", target_arch = "arm64ec"),
13928            link_name = "llvm.aarch64.neon.fminv.f16.v4f16"
13929        )]
13930        fn _vminv_f16(a: float16x4_t) -> f16;
13931    }
13932    unsafe { _vminv_f16(a) }
13933}
13934#[doc = "Floating-point minimum number across vector"]
13935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f16)"]
13936#[inline(always)]
13937#[target_feature(enable = "neon,fp16")]
13938#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13939#[cfg(not(target_arch = "arm64ec"))]
13940#[cfg_attr(test, assert_instr(fminv))]
13941pub fn vminvq_f16(a: float16x8_t) -> f16 {
13942    unsafe extern "unadjusted" {
13943        #[cfg_attr(
13944            any(target_arch = "aarch64", target_arch = "arm64ec"),
13945            link_name = "llvm.aarch64.neon.fminv.f16.v8f16"
13946        )]
13947        fn _vminvq_f16(a: float16x8_t) -> f16;
13948    }
13949    unsafe { _vminvq_f16(a) }
13950}
13951#[doc = "Horizontal vector min."]
13952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"]
13953#[inline(always)]
13954#[target_feature(enable = "neon")]
13955#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13956#[cfg_attr(test, assert_instr(fminp))]
13957pub fn vminv_f32(a: float32x2_t) -> f32 {
13958    unsafe extern "unadjusted" {
13959        #[cfg_attr(
13960            any(target_arch = "aarch64", target_arch = "arm64ec"),
13961            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
13962        )]
13963        fn _vminv_f32(a: float32x2_t) -> f32;
13964    }
13965    unsafe { _vminv_f32(a) }
13966}
13967#[doc = "Horizontal vector min."]
13968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"]
13969#[inline(always)]
13970#[target_feature(enable = "neon")]
13971#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13972#[cfg_attr(test, assert_instr(fminv))]
13973pub fn vminvq_f32(a: float32x4_t) -> f32 {
13974    unsafe extern "unadjusted" {
13975        #[cfg_attr(
13976            any(target_arch = "aarch64", target_arch = "arm64ec"),
13977            link_name = "llvm.aarch64.neon.fminv.f32.v4f32"
13978        )]
13979        fn _vminvq_f32(a: float32x4_t) -> f32;
13980    }
13981    unsafe { _vminvq_f32(a) }
13982}
13983#[doc = "Horizontal vector min."]
13984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"]
13985#[inline(always)]
13986#[target_feature(enable = "neon")]
13987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13988#[cfg_attr(test, assert_instr(fminp))]
13989pub fn vminvq_f64(a: float64x2_t) -> f64 {
13990    unsafe extern "unadjusted" {
13991        #[cfg_attr(
13992            any(target_arch = "aarch64", target_arch = "arm64ec"),
13993            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
13994        )]
13995        fn _vminvq_f64(a: float64x2_t) -> f64;
13996    }
13997    unsafe { _vminvq_f64(a) }
13998}
13999#[doc = "Horizontal vector min."]
14000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"]
14001#[inline(always)]
14002#[target_feature(enable = "neon")]
14003#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14004#[cfg_attr(test, assert_instr(sminv))]
14005pub fn vminv_s8(a: int8x8_t) -> i8 {
14006    unsafe { simd_reduce_min(a) }
14007}
14008#[doc = "Horizontal vector min."]
14009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"]
14010#[inline(always)]
14011#[target_feature(enable = "neon")]
14012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14013#[cfg_attr(test, assert_instr(sminv))]
14014pub fn vminvq_s8(a: int8x16_t) -> i8 {
14015    unsafe { simd_reduce_min(a) }
14016}
14017#[doc = "Horizontal vector min."]
14018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"]
14019#[inline(always)]
14020#[target_feature(enable = "neon")]
14021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14022#[cfg_attr(test, assert_instr(sminv))]
14023pub fn vminv_s16(a: int16x4_t) -> i16 {
14024    unsafe { simd_reduce_min(a) }
14025}
14026#[doc = "Horizontal vector min."]
14027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"]
14028#[inline(always)]
14029#[target_feature(enable = "neon")]
14030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14031#[cfg_attr(test, assert_instr(sminv))]
14032pub fn vminvq_s16(a: int16x8_t) -> i16 {
14033    unsafe { simd_reduce_min(a) }
14034}
14035#[doc = "Horizontal vector min."]
14036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"]
14037#[inline(always)]
14038#[target_feature(enable = "neon")]
14039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14040#[cfg_attr(test, assert_instr(sminp))]
14041pub fn vminv_s32(a: int32x2_t) -> i32 {
14042    unsafe { simd_reduce_min(a) }
14043}
14044#[doc = "Horizontal vector min."]
14045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"]
14046#[inline(always)]
14047#[target_feature(enable = "neon")]
14048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14049#[cfg_attr(test, assert_instr(sminv))]
14050pub fn vminvq_s32(a: int32x4_t) -> i32 {
14051    unsafe { simd_reduce_min(a) }
14052}
14053#[doc = "Horizontal vector min."]
14054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"]
14055#[inline(always)]
14056#[target_feature(enable = "neon")]
14057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14058#[cfg_attr(test, assert_instr(uminv))]
14059pub fn vminv_u8(a: uint8x8_t) -> u8 {
14060    unsafe { simd_reduce_min(a) }
14061}
14062#[doc = "Horizontal vector min."]
14063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"]
14064#[inline(always)]
14065#[target_feature(enable = "neon")]
14066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14067#[cfg_attr(test, assert_instr(uminv))]
14068pub fn vminvq_u8(a: uint8x16_t) -> u8 {
14069    unsafe { simd_reduce_min(a) }
14070}
14071#[doc = "Horizontal vector min."]
14072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"]
14073#[inline(always)]
14074#[target_feature(enable = "neon")]
14075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14076#[cfg_attr(test, assert_instr(uminv))]
14077pub fn vminv_u16(a: uint16x4_t) -> u16 {
14078    unsafe { simd_reduce_min(a) }
14079}
14080#[doc = "Horizontal vector min."]
14081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"]
14082#[inline(always)]
14083#[target_feature(enable = "neon")]
14084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14085#[cfg_attr(test, assert_instr(uminv))]
14086pub fn vminvq_u16(a: uint16x8_t) -> u16 {
14087    unsafe { simd_reduce_min(a) }
14088}
14089#[doc = "Horizontal vector min."]
14090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"]
14091#[inline(always)]
14092#[target_feature(enable = "neon")]
14093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14094#[cfg_attr(test, assert_instr(uminp))]
14095pub fn vminv_u32(a: uint32x2_t) -> u32 {
14096    unsafe { simd_reduce_min(a) }
14097}
14098#[doc = "Horizontal vector min."]
14099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"]
14100#[inline(always)]
14101#[target_feature(enable = "neon")]
14102#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14103#[cfg_attr(test, assert_instr(uminv))]
14104pub fn vminvq_u32(a: uint32x4_t) -> u32 {
14105    unsafe { simd_reduce_min(a) }
14106}
14107#[doc = "Floating-point multiply-add to accumulator"]
14108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"]
14109#[inline(always)]
14110#[target_feature(enable = "neon")]
14111#[cfg_attr(test, assert_instr(fmul))]
14112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14113pub fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14114    unsafe { simd_add(a, simd_mul(b, c)) }
14115}
14116#[doc = "Floating-point multiply-add to accumulator"]
14117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"]
14118#[inline(always)]
14119#[target_feature(enable = "neon")]
14120#[cfg_attr(test, assert_instr(fmul))]
14121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14122pub fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14123    unsafe { simd_add(a, simd_mul(b, c)) }
14124}
14125#[doc = "Multiply-add long"]
14126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"]
14127#[inline(always)]
14128#[target_feature(enable = "neon")]
14129#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14130#[rustc_legacy_const_generics(3)]
14131#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14132pub fn vmlal_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14133    static_assert_uimm_bits!(LANE, 2);
14134    unsafe {
14135        vmlal_high_s16(
14136            a,
14137            b,
14138            simd_shuffle!(
14139                c,
14140                c,
14141                [
14142                    LANE as u32,
14143                    LANE as u32,
14144                    LANE as u32,
14145                    LANE as u32,
14146                    LANE as u32,
14147                    LANE as u32,
14148                    LANE as u32,
14149                    LANE as u32
14150                ]
14151            ),
14152        )
14153    }
14154}
14155#[doc = "Multiply-add long"]
14156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"]
14157#[inline(always)]
14158#[target_feature(enable = "neon")]
14159#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14160#[rustc_legacy_const_generics(3)]
14161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14162pub fn vmlal_high_laneq_s16<const LANE: i32>(
14163    a: int32x4_t,
14164    b: int16x8_t,
14165    c: int16x8_t,
14166) -> int32x4_t {
14167    static_assert_uimm_bits!(LANE, 3);
14168    unsafe {
14169        vmlal_high_s16(
14170            a,
14171            b,
14172            simd_shuffle!(
14173                c,
14174                c,
14175                [
14176                    LANE as u32,
14177                    LANE as u32,
14178                    LANE as u32,
14179                    LANE as u32,
14180                    LANE as u32,
14181                    LANE as u32,
14182                    LANE as u32,
14183                    LANE as u32
14184                ]
14185            ),
14186        )
14187    }
14188}
14189#[doc = "Multiply-add long"]
14190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"]
14191#[inline(always)]
14192#[target_feature(enable = "neon")]
14193#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14194#[rustc_legacy_const_generics(3)]
14195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14196pub fn vmlal_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14197    static_assert_uimm_bits!(LANE, 1);
14198    unsafe {
14199        vmlal_high_s32(
14200            a,
14201            b,
14202            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14203        )
14204    }
14205}
14206#[doc = "Multiply-add long"]
14207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"]
14208#[inline(always)]
14209#[target_feature(enable = "neon")]
14210#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14211#[rustc_legacy_const_generics(3)]
14212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14213pub fn vmlal_high_laneq_s32<const LANE: i32>(
14214    a: int64x2_t,
14215    b: int32x4_t,
14216    c: int32x4_t,
14217) -> int64x2_t {
14218    static_assert_uimm_bits!(LANE, 2);
14219    unsafe {
14220        vmlal_high_s32(
14221            a,
14222            b,
14223            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14224        )
14225    }
14226}
14227#[doc = "Multiply-add long"]
14228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"]
14229#[inline(always)]
14230#[target_feature(enable = "neon")]
14231#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14232#[rustc_legacy_const_generics(3)]
14233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14234pub fn vmlal_high_lane_u16<const LANE: i32>(
14235    a: uint32x4_t,
14236    b: uint16x8_t,
14237    c: uint16x4_t,
14238) -> uint32x4_t {
14239    static_assert_uimm_bits!(LANE, 2);
14240    unsafe {
14241        vmlal_high_u16(
14242            a,
14243            b,
14244            simd_shuffle!(
14245                c,
14246                c,
14247                [
14248                    LANE as u32,
14249                    LANE as u32,
14250                    LANE as u32,
14251                    LANE as u32,
14252                    LANE as u32,
14253                    LANE as u32,
14254                    LANE as u32,
14255                    LANE as u32
14256                ]
14257            ),
14258        )
14259    }
14260}
14261#[doc = "Multiply-add long"]
14262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"]
14263#[inline(always)]
14264#[target_feature(enable = "neon")]
14265#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14266#[rustc_legacy_const_generics(3)]
14267#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14268pub fn vmlal_high_laneq_u16<const LANE: i32>(
14269    a: uint32x4_t,
14270    b: uint16x8_t,
14271    c: uint16x8_t,
14272) -> uint32x4_t {
14273    static_assert_uimm_bits!(LANE, 3);
14274    unsafe {
14275        vmlal_high_u16(
14276            a,
14277            b,
14278            simd_shuffle!(
14279                c,
14280                c,
14281                [
14282                    LANE as u32,
14283                    LANE as u32,
14284                    LANE as u32,
14285                    LANE as u32,
14286                    LANE as u32,
14287                    LANE as u32,
14288                    LANE as u32,
14289                    LANE as u32
14290                ]
14291            ),
14292        )
14293    }
14294}
14295#[doc = "Multiply-add long"]
14296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"]
14297#[inline(always)]
14298#[target_feature(enable = "neon")]
14299#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14300#[rustc_legacy_const_generics(3)]
14301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14302pub fn vmlal_high_lane_u32<const LANE: i32>(
14303    a: uint64x2_t,
14304    b: uint32x4_t,
14305    c: uint32x2_t,
14306) -> uint64x2_t {
14307    static_assert_uimm_bits!(LANE, 1);
14308    unsafe {
14309        vmlal_high_u32(
14310            a,
14311            b,
14312            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14313        )
14314    }
14315}
14316#[doc = "Multiply-add long"]
14317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"]
14318#[inline(always)]
14319#[target_feature(enable = "neon")]
14320#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14321#[rustc_legacy_const_generics(3)]
14322#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14323pub fn vmlal_high_laneq_u32<const LANE: i32>(
14324    a: uint64x2_t,
14325    b: uint32x4_t,
14326    c: uint32x4_t,
14327) -> uint64x2_t {
14328    static_assert_uimm_bits!(LANE, 2);
14329    unsafe {
14330        vmlal_high_u32(
14331            a,
14332            b,
14333            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14334        )
14335    }
14336}
14337#[doc = "Multiply-add long"]
14338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"]
14339#[inline(always)]
14340#[target_feature(enable = "neon")]
14341#[cfg_attr(test, assert_instr(smlal2))]
14342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14343pub fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14344    vmlal_high_s16(a, b, vdupq_n_s16(c))
14345}
14346#[doc = "Multiply-add long"]
14347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"]
14348#[inline(always)]
14349#[target_feature(enable = "neon")]
14350#[cfg_attr(test, assert_instr(smlal2))]
14351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14352pub fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14353    vmlal_high_s32(a, b, vdupq_n_s32(c))
14354}
14355#[doc = "Multiply-add long"]
14356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"]
14357#[inline(always)]
14358#[target_feature(enable = "neon")]
14359#[cfg_attr(test, assert_instr(umlal2))]
14360#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14361pub fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14362    vmlal_high_u16(a, b, vdupq_n_u16(c))
14363}
14364#[doc = "Multiply-add long"]
14365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"]
14366#[inline(always)]
14367#[target_feature(enable = "neon")]
14368#[cfg_attr(test, assert_instr(umlal2))]
14369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14370pub fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14371    vmlal_high_u32(a, b, vdupq_n_u32(c))
14372}
14373#[doc = "Signed multiply-add long"]
14374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"]
14375#[inline(always)]
14376#[target_feature(enable = "neon")]
14377#[cfg_attr(test, assert_instr(smlal2))]
14378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14379pub fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14380    unsafe {
14381        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14382        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14383        vmlal_s8(a, b, c)
14384    }
14385}
14386#[doc = "Signed multiply-add long"]
14387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"]
14388#[inline(always)]
14389#[target_feature(enable = "neon")]
14390#[cfg_attr(test, assert_instr(smlal2))]
14391#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14392pub fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14393    unsafe {
14394        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14395        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14396        vmlal_s16(a, b, c)
14397    }
14398}
14399#[doc = "Signed multiply-add long"]
14400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"]
14401#[inline(always)]
14402#[target_feature(enable = "neon")]
14403#[cfg_attr(test, assert_instr(smlal2))]
14404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14405pub fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14406    unsafe {
14407        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14408        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14409        vmlal_s32(a, b, c)
14410    }
14411}
14412#[doc = "Unsigned multiply-add long"]
14413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"]
14414#[inline(always)]
14415#[target_feature(enable = "neon")]
14416#[cfg_attr(test, assert_instr(umlal2))]
14417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14418pub fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14419    unsafe {
14420        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14421        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14422        vmlal_u8(a, b, c)
14423    }
14424}
14425#[doc = "Unsigned multiply-add long"]
14426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"]
14427#[inline(always)]
14428#[target_feature(enable = "neon")]
14429#[cfg_attr(test, assert_instr(umlal2))]
14430#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14431pub fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14432    unsafe {
14433        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14434        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14435        vmlal_u16(a, b, c)
14436    }
14437}
14438#[doc = "Unsigned multiply-add long"]
14439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"]
14440#[inline(always)]
14441#[target_feature(enable = "neon")]
14442#[cfg_attr(test, assert_instr(umlal2))]
14443#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14444pub fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14445    unsafe {
14446        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14447        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14448        vmlal_u32(a, b, c)
14449    }
14450}
14451#[doc = "Floating-point multiply-subtract from accumulator"]
14452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"]
14453#[inline(always)]
14454#[target_feature(enable = "neon")]
14455#[cfg_attr(test, assert_instr(fmul))]
14456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14457pub fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14458    unsafe { simd_sub(a, simd_mul(b, c)) }
14459}
14460#[doc = "Floating-point multiply-subtract from accumulator"]
14461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"]
14462#[inline(always)]
14463#[target_feature(enable = "neon")]
14464#[cfg_attr(test, assert_instr(fmul))]
14465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14466pub fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14467    unsafe { simd_sub(a, simd_mul(b, c)) }
14468}
14469#[doc = "Multiply-subtract long"]
14470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"]
14471#[inline(always)]
14472#[target_feature(enable = "neon")]
14473#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14474#[rustc_legacy_const_generics(3)]
14475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14476pub fn vmlsl_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14477    static_assert_uimm_bits!(LANE, 2);
14478    unsafe {
14479        vmlsl_high_s16(
14480            a,
14481            b,
14482            simd_shuffle!(
14483                c,
14484                c,
14485                [
14486                    LANE as u32,
14487                    LANE as u32,
14488                    LANE as u32,
14489                    LANE as u32,
14490                    LANE as u32,
14491                    LANE as u32,
14492                    LANE as u32,
14493                    LANE as u32
14494                ]
14495            ),
14496        )
14497    }
14498}
14499#[doc = "Multiply-subtract long"]
14500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"]
14501#[inline(always)]
14502#[target_feature(enable = "neon")]
14503#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14504#[rustc_legacy_const_generics(3)]
14505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14506pub fn vmlsl_high_laneq_s16<const LANE: i32>(
14507    a: int32x4_t,
14508    b: int16x8_t,
14509    c: int16x8_t,
14510) -> int32x4_t {
14511    static_assert_uimm_bits!(LANE, 3);
14512    unsafe {
14513        vmlsl_high_s16(
14514            a,
14515            b,
14516            simd_shuffle!(
14517                c,
14518                c,
14519                [
14520                    LANE as u32,
14521                    LANE as u32,
14522                    LANE as u32,
14523                    LANE as u32,
14524                    LANE as u32,
14525                    LANE as u32,
14526                    LANE as u32,
14527                    LANE as u32
14528                ]
14529            ),
14530        )
14531    }
14532}
14533#[doc = "Multiply-subtract long"]
14534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"]
14535#[inline(always)]
14536#[target_feature(enable = "neon")]
14537#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14538#[rustc_legacy_const_generics(3)]
14539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14540pub fn vmlsl_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14541    static_assert_uimm_bits!(LANE, 1);
14542    unsafe {
14543        vmlsl_high_s32(
14544            a,
14545            b,
14546            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14547        )
14548    }
14549}
14550#[doc = "Multiply-subtract long"]
14551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"]
14552#[inline(always)]
14553#[target_feature(enable = "neon")]
14554#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14555#[rustc_legacy_const_generics(3)]
14556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14557pub fn vmlsl_high_laneq_s32<const LANE: i32>(
14558    a: int64x2_t,
14559    b: int32x4_t,
14560    c: int32x4_t,
14561) -> int64x2_t {
14562    static_assert_uimm_bits!(LANE, 2);
14563    unsafe {
14564        vmlsl_high_s32(
14565            a,
14566            b,
14567            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14568        )
14569    }
14570}
14571#[doc = "Multiply-subtract long"]
14572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"]
14573#[inline(always)]
14574#[target_feature(enable = "neon")]
14575#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14576#[rustc_legacy_const_generics(3)]
14577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14578pub fn vmlsl_high_lane_u16<const LANE: i32>(
14579    a: uint32x4_t,
14580    b: uint16x8_t,
14581    c: uint16x4_t,
14582) -> uint32x4_t {
14583    static_assert_uimm_bits!(LANE, 2);
14584    unsafe {
14585        vmlsl_high_u16(
14586            a,
14587            b,
14588            simd_shuffle!(
14589                c,
14590                c,
14591                [
14592                    LANE as u32,
14593                    LANE as u32,
14594                    LANE as u32,
14595                    LANE as u32,
14596                    LANE as u32,
14597                    LANE as u32,
14598                    LANE as u32,
14599                    LANE as u32
14600                ]
14601            ),
14602        )
14603    }
14604}
14605#[doc = "Multiply-subtract long"]
14606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"]
14607#[inline(always)]
14608#[target_feature(enable = "neon")]
14609#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14610#[rustc_legacy_const_generics(3)]
14611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14612pub fn vmlsl_high_laneq_u16<const LANE: i32>(
14613    a: uint32x4_t,
14614    b: uint16x8_t,
14615    c: uint16x8_t,
14616) -> uint32x4_t {
14617    static_assert_uimm_bits!(LANE, 3);
14618    unsafe {
14619        vmlsl_high_u16(
14620            a,
14621            b,
14622            simd_shuffle!(
14623                c,
14624                c,
14625                [
14626                    LANE as u32,
14627                    LANE as u32,
14628                    LANE as u32,
14629                    LANE as u32,
14630                    LANE as u32,
14631                    LANE as u32,
14632                    LANE as u32,
14633                    LANE as u32
14634                ]
14635            ),
14636        )
14637    }
14638}
14639#[doc = "Multiply-subtract long"]
14640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"]
14641#[inline(always)]
14642#[target_feature(enable = "neon")]
14643#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14644#[rustc_legacy_const_generics(3)]
14645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14646pub fn vmlsl_high_lane_u32<const LANE: i32>(
14647    a: uint64x2_t,
14648    b: uint32x4_t,
14649    c: uint32x2_t,
14650) -> uint64x2_t {
14651    static_assert_uimm_bits!(LANE, 1);
14652    unsafe {
14653        vmlsl_high_u32(
14654            a,
14655            b,
14656            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14657        )
14658    }
14659}
14660#[doc = "Multiply-subtract long"]
14661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"]
14662#[inline(always)]
14663#[target_feature(enable = "neon")]
14664#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14665#[rustc_legacy_const_generics(3)]
14666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14667pub fn vmlsl_high_laneq_u32<const LANE: i32>(
14668    a: uint64x2_t,
14669    b: uint32x4_t,
14670    c: uint32x4_t,
14671) -> uint64x2_t {
14672    static_assert_uimm_bits!(LANE, 2);
14673    unsafe {
14674        vmlsl_high_u32(
14675            a,
14676            b,
14677            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14678        )
14679    }
14680}
14681#[doc = "Multiply-subtract long"]
14682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"]
14683#[inline(always)]
14684#[target_feature(enable = "neon")]
14685#[cfg_attr(test, assert_instr(smlsl2))]
14686#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14687pub fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14688    vmlsl_high_s16(a, b, vdupq_n_s16(c))
14689}
14690#[doc = "Multiply-subtract long"]
14691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"]
14692#[inline(always)]
14693#[target_feature(enable = "neon")]
14694#[cfg_attr(test, assert_instr(smlsl2))]
14695#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14696pub fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14697    vmlsl_high_s32(a, b, vdupq_n_s32(c))
14698}
14699#[doc = "Multiply-subtract long"]
14700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"]
14701#[inline(always)]
14702#[target_feature(enable = "neon")]
14703#[cfg_attr(test, assert_instr(umlsl2))]
14704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14705pub fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14706    vmlsl_high_u16(a, b, vdupq_n_u16(c))
14707}
14708#[doc = "Multiply-subtract long"]
14709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"]
14710#[inline(always)]
14711#[target_feature(enable = "neon")]
14712#[cfg_attr(test, assert_instr(umlsl2))]
14713#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14714pub fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14715    vmlsl_high_u32(a, b, vdupq_n_u32(c))
14716}
14717#[doc = "Signed multiply-subtract long"]
14718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"]
14719#[inline(always)]
14720#[target_feature(enable = "neon")]
14721#[cfg_attr(test, assert_instr(smlsl2))]
14722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14723pub fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14724    unsafe {
14725        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14726        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14727        vmlsl_s8(a, b, c)
14728    }
14729}
14730#[doc = "Signed multiply-subtract long"]
14731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"]
14732#[inline(always)]
14733#[target_feature(enable = "neon")]
14734#[cfg_attr(test, assert_instr(smlsl2))]
14735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14736pub fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14737    unsafe {
14738        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14739        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14740        vmlsl_s16(a, b, c)
14741    }
14742}
14743#[doc = "Signed multiply-subtract long"]
14744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"]
14745#[inline(always)]
14746#[target_feature(enable = "neon")]
14747#[cfg_attr(test, assert_instr(smlsl2))]
14748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14749pub fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14750    unsafe {
14751        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14752        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14753        vmlsl_s32(a, b, c)
14754    }
14755}
14756#[doc = "Unsigned multiply-subtract long"]
14757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"]
14758#[inline(always)]
14759#[target_feature(enable = "neon")]
14760#[cfg_attr(test, assert_instr(umlsl2))]
14761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14762pub fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14763    unsafe {
14764        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14765        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14766        vmlsl_u8(a, b, c)
14767    }
14768}
14769#[doc = "Unsigned multiply-subtract long"]
14770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"]
14771#[inline(always)]
14772#[target_feature(enable = "neon")]
14773#[cfg_attr(test, assert_instr(umlsl2))]
14774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14775pub fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14776    unsafe {
14777        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14778        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14779        vmlsl_u16(a, b, c)
14780    }
14781}
14782#[doc = "Unsigned multiply-subtract long"]
14783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"]
14784#[inline(always)]
14785#[target_feature(enable = "neon")]
14786#[cfg_attr(test, assert_instr(umlsl2))]
14787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14788pub fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14789    unsafe {
14790        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14791        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14792        vmlsl_u32(a, b, c)
14793    }
14794}
14795#[doc = "Vector move"]
14796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"]
14797#[inline(always)]
14798#[target_feature(enable = "neon")]
14799#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14800#[cfg_attr(test, assert_instr(sxtl2))]
14801pub fn vmovl_high_s8(a: int8x16_t) -> int16x8_t {
14802    unsafe {
14803        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14804        vmovl_s8(a)
14805    }
14806}
14807#[doc = "Vector move"]
14808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"]
14809#[inline(always)]
14810#[target_feature(enable = "neon")]
14811#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14812#[cfg_attr(test, assert_instr(sxtl2))]
14813pub fn vmovl_high_s16(a: int16x8_t) -> int32x4_t {
14814    unsafe {
14815        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14816        vmovl_s16(a)
14817    }
14818}
14819#[doc = "Vector move"]
14820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"]
14821#[inline(always)]
14822#[target_feature(enable = "neon")]
14823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14824#[cfg_attr(test, assert_instr(sxtl2))]
14825pub fn vmovl_high_s32(a: int32x4_t) -> int64x2_t {
14826    unsafe {
14827        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
14828        vmovl_s32(a)
14829    }
14830}
14831#[doc = "Vector move"]
14832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"]
14833#[inline(always)]
14834#[target_feature(enable = "neon")]
14835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14836#[cfg_attr(test, assert_instr(uxtl2))]
14837pub fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t {
14838    unsafe {
14839        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14840        vmovl_u8(a)
14841    }
14842}
14843#[doc = "Vector move"]
14844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"]
14845#[inline(always)]
14846#[target_feature(enable = "neon")]
14847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14848#[cfg_attr(test, assert_instr(uxtl2))]
14849pub fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t {
14850    unsafe {
14851        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14852        vmovl_u16(a)
14853    }
14854}
14855#[doc = "Vector move"]
14856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"]
14857#[inline(always)]
14858#[target_feature(enable = "neon")]
14859#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14860#[cfg_attr(test, assert_instr(uxtl2))]
14861pub fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t {
14862    unsafe {
14863        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
14864        vmovl_u32(a)
14865    }
14866}
14867#[doc = "Extract narrow"]
14868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"]
14869#[inline(always)]
14870#[target_feature(enable = "neon")]
14871#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14872#[cfg_attr(test, assert_instr(xtn2))]
14873pub fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
14874    unsafe {
14875        let c: int8x8_t = simd_cast(b);
14876        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14877    }
14878}
14879#[doc = "Extract narrow"]
14880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"]
14881#[inline(always)]
14882#[target_feature(enable = "neon")]
14883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14884#[cfg_attr(test, assert_instr(xtn2))]
14885pub fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
14886    unsafe {
14887        let c: int16x4_t = simd_cast(b);
14888        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14889    }
14890}
14891#[doc = "Extract narrow"]
14892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"]
14893#[inline(always)]
14894#[target_feature(enable = "neon")]
14895#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14896#[cfg_attr(test, assert_instr(xtn2))]
14897pub fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
14898    unsafe {
14899        let c: int32x2_t = simd_cast(b);
14900        simd_shuffle!(a, c, [0, 1, 2, 3])
14901    }
14902}
14903#[doc = "Extract narrow"]
14904#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"]
14905#[inline(always)]
14906#[target_feature(enable = "neon")]
14907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14908#[cfg_attr(test, assert_instr(xtn2))]
14909pub fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
14910    unsafe {
14911        let c: uint8x8_t = simd_cast(b);
14912        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14913    }
14914}
14915#[doc = "Extract narrow"]
14916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"]
14917#[inline(always)]
14918#[target_feature(enable = "neon")]
14919#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14920#[cfg_attr(test, assert_instr(xtn2))]
14921pub fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
14922    unsafe {
14923        let c: uint16x4_t = simd_cast(b);
14924        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14925    }
14926}
14927#[doc = "Extract narrow"]
14928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"]
14929#[inline(always)]
14930#[target_feature(enable = "neon")]
14931#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14932#[cfg_attr(test, assert_instr(xtn2))]
14933pub fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
14934    unsafe {
14935        let c: uint32x2_t = simd_cast(b);
14936        simd_shuffle!(a, c, [0, 1, 2, 3])
14937    }
14938}
14939#[doc = "Multiply"]
14940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"]
14941#[inline(always)]
14942#[target_feature(enable = "neon")]
14943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14944#[cfg_attr(test, assert_instr(fmul))]
14945pub fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14946    unsafe { simd_mul(a, b) }
14947}
14948#[doc = "Multiply"]
14949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"]
14950#[inline(always)]
14951#[target_feature(enable = "neon")]
14952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14953#[cfg_attr(test, assert_instr(fmul))]
14954pub fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
14955    unsafe { simd_mul(a, b) }
14956}
14957#[doc = "Floating-point multiply"]
14958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"]
14959#[inline(always)]
14960#[target_feature(enable = "neon")]
14961#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14962#[rustc_legacy_const_generics(2)]
14963#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14964pub fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14965    static_assert!(LANE == 0);
14966    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14967}
14968#[doc = "Floating-point multiply"]
14969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f16)"]
14970#[inline(always)]
14971#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14972#[rustc_legacy_const_generics(2)]
14973#[target_feature(enable = "neon,fp16")]
14974#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
14975#[cfg(not(target_arch = "arm64ec"))]
14976pub fn vmul_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
14977    static_assert_uimm_bits!(LANE, 3);
14978    unsafe {
14979        simd_mul(
14980            a,
14981            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14982        )
14983    }
14984}
14985#[doc = "Floating-point multiply"]
14986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f16)"]
14987#[inline(always)]
14988#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14989#[rustc_legacy_const_generics(2)]
14990#[target_feature(enable = "neon,fp16")]
14991#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
14992#[cfg(not(target_arch = "arm64ec"))]
14993pub fn vmulq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
14994    static_assert_uimm_bits!(LANE, 3);
14995    unsafe {
14996        simd_mul(
14997            a,
14998            simd_shuffle!(
14999                b,
15000                b,
15001                [
15002                    LANE as u32,
15003                    LANE as u32,
15004                    LANE as u32,
15005                    LANE as u32,
15006                    LANE as u32,
15007                    LANE as u32,
15008                    LANE as u32,
15009                    LANE as u32
15010                ]
15011            ),
15012        )
15013    }
15014}
15015#[doc = "Floating-point multiply"]
15016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"]
15017#[inline(always)]
15018#[target_feature(enable = "neon")]
15019#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15020#[rustc_legacy_const_generics(2)]
15021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15022pub fn vmul_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
15023    static_assert_uimm_bits!(LANE, 1);
15024    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15025}
15026#[doc = "Vector multiply by scalar"]
15027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"]
15028#[inline(always)]
15029#[target_feature(enable = "neon")]
15030#[cfg_attr(test, assert_instr(fmul))]
15031#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15032pub fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t {
15033    unsafe { simd_mul(a, vdup_n_f64(b)) }
15034}
15035#[doc = "Vector multiply by scalar"]
15036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"]
15037#[inline(always)]
15038#[target_feature(enable = "neon")]
15039#[cfg_attr(test, assert_instr(fmul))]
15040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15041pub fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t {
15042    unsafe { simd_mul(a, vdupq_n_f64(b)) }
15043}
15044#[doc = "Floating-point multiply"]
15045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"]
15046#[inline(always)]
15047#[target_feature(enable = "neon")]
15048#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15049#[rustc_legacy_const_generics(2)]
15050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15051pub fn vmuld_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15052    static_assert!(LANE == 0);
15053    unsafe {
15054        let b: f64 = simd_extract!(b, LANE as u32);
15055        a * b
15056    }
15057}
15058#[doc = "Add"]
15059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_f16)"]
15060#[inline(always)]
15061#[target_feature(enable = "neon,fp16")]
15062#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15063#[cfg(not(target_arch = "arm64ec"))]
15064#[cfg_attr(test, assert_instr(fmul))]
15065pub fn vmulh_f16(a: f16, b: f16) -> f16 {
15066    a * b
15067}
15068#[doc = "Floating-point multiply"]
15069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_lane_f16)"]
15070#[inline(always)]
15071#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15072#[rustc_legacy_const_generics(2)]
15073#[target_feature(enable = "neon,fp16")]
15074#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15075#[cfg(not(target_arch = "arm64ec"))]
15076pub fn vmulh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
15077    static_assert_uimm_bits!(LANE, 2);
15078    unsafe {
15079        let b: f16 = simd_extract!(b, LANE as u32);
15080        a * b
15081    }
15082}
15083#[doc = "Floating-point multiply"]
15084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_laneq_f16)"]
15085#[inline(always)]
15086#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15087#[rustc_legacy_const_generics(2)]
15088#[target_feature(enable = "neon,fp16")]
15089#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15090#[cfg(not(target_arch = "arm64ec"))]
15091pub fn vmulh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
15092    static_assert_uimm_bits!(LANE, 3);
15093    unsafe {
15094        let b: f16 = simd_extract!(b, LANE as u32);
15095        a * b
15096    }
15097}
15098#[doc = "Multiply long"]
15099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"]
15100#[inline(always)]
15101#[target_feature(enable = "neon")]
15102#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15103#[rustc_legacy_const_generics(2)]
15104#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15105pub fn vmull_high_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
15106    static_assert_uimm_bits!(LANE, 2);
15107    unsafe {
15108        vmull_high_s16(
15109            a,
15110            simd_shuffle!(
15111                b,
15112                b,
15113                [
15114                    LANE as u32,
15115                    LANE as u32,
15116                    LANE as u32,
15117                    LANE as u32,
15118                    LANE as u32,
15119                    LANE as u32,
15120                    LANE as u32,
15121                    LANE as u32
15122                ]
15123            ),
15124        )
15125    }
15126}
15127#[doc = "Multiply long"]
15128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"]
15129#[inline(always)]
15130#[target_feature(enable = "neon")]
15131#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15132#[rustc_legacy_const_generics(2)]
15133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15134pub fn vmull_high_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15135    static_assert_uimm_bits!(LANE, 3);
15136    unsafe {
15137        vmull_high_s16(
15138            a,
15139            simd_shuffle!(
15140                b,
15141                b,
15142                [
15143                    LANE as u32,
15144                    LANE as u32,
15145                    LANE as u32,
15146                    LANE as u32,
15147                    LANE as u32,
15148                    LANE as u32,
15149                    LANE as u32,
15150                    LANE as u32
15151                ]
15152            ),
15153        )
15154    }
15155}
15156#[doc = "Multiply long"]
15157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"]
15158#[inline(always)]
15159#[target_feature(enable = "neon")]
15160#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15161#[rustc_legacy_const_generics(2)]
15162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15163pub fn vmull_high_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
15164    static_assert_uimm_bits!(LANE, 1);
15165    unsafe {
15166        vmull_high_s32(
15167            a,
15168            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15169        )
15170    }
15171}
15172#[doc = "Multiply long"]
15173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"]
15174#[inline(always)]
15175#[target_feature(enable = "neon")]
15176#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15177#[rustc_legacy_const_generics(2)]
15178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15179pub fn vmull_high_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15180    static_assert_uimm_bits!(LANE, 2);
15181    unsafe {
15182        vmull_high_s32(
15183            a,
15184            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15185        )
15186    }
15187}
15188#[doc = "Multiply long"]
15189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"]
15190#[inline(always)]
15191#[target_feature(enable = "neon")]
15192#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15193#[rustc_legacy_const_generics(2)]
15194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15195pub fn vmull_high_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t {
15196    static_assert_uimm_bits!(LANE, 2);
15197    unsafe {
15198        vmull_high_u16(
15199            a,
15200            simd_shuffle!(
15201                b,
15202                b,
15203                [
15204                    LANE as u32,
15205                    LANE as u32,
15206                    LANE as u32,
15207                    LANE as u32,
15208                    LANE as u32,
15209                    LANE as u32,
15210                    LANE as u32,
15211                    LANE as u32
15212                ]
15213            ),
15214        )
15215    }
15216}
15217#[doc = "Multiply long"]
15218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"]
15219#[inline(always)]
15220#[target_feature(enable = "neon")]
15221#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15222#[rustc_legacy_const_generics(2)]
15223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15224pub fn vmull_high_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15225    static_assert_uimm_bits!(LANE, 3);
15226    unsafe {
15227        vmull_high_u16(
15228            a,
15229            simd_shuffle!(
15230                b,
15231                b,
15232                [
15233                    LANE as u32,
15234                    LANE as u32,
15235                    LANE as u32,
15236                    LANE as u32,
15237                    LANE as u32,
15238                    LANE as u32,
15239                    LANE as u32,
15240                    LANE as u32
15241                ]
15242            ),
15243        )
15244    }
15245}
15246#[doc = "Multiply long"]
15247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"]
15248#[inline(always)]
15249#[target_feature(enable = "neon")]
15250#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15251#[rustc_legacy_const_generics(2)]
15252#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15253pub fn vmull_high_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t {
15254    static_assert_uimm_bits!(LANE, 1);
15255    unsafe {
15256        vmull_high_u32(
15257            a,
15258            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15259        )
15260    }
15261}
15262#[doc = "Multiply long"]
15263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"]
15264#[inline(always)]
15265#[target_feature(enable = "neon")]
15266#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15267#[rustc_legacy_const_generics(2)]
15268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15269pub fn vmull_high_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15270    static_assert_uimm_bits!(LANE, 2);
15271    unsafe {
15272        vmull_high_u32(
15273            a,
15274            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15275        )
15276    }
15277}
15278#[doc = "Multiply long"]
15279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"]
15280#[inline(always)]
15281#[target_feature(enable = "neon")]
15282#[cfg_attr(test, assert_instr(smull2))]
15283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15284pub fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
15285    vmull_high_s16(a, vdupq_n_s16(b))
15286}
15287#[doc = "Multiply long"]
15288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"]
15289#[inline(always)]
15290#[target_feature(enable = "neon")]
15291#[cfg_attr(test, assert_instr(smull2))]
15292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15293pub fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
15294    vmull_high_s32(a, vdupq_n_s32(b))
15295}
15296#[doc = "Multiply long"]
15297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"]
15298#[inline(always)]
15299#[target_feature(enable = "neon")]
15300#[cfg_attr(test, assert_instr(umull2))]
15301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15302pub fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t {
15303    vmull_high_u16(a, vdupq_n_u16(b))
15304}
15305#[doc = "Multiply long"]
15306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"]
15307#[inline(always)]
15308#[target_feature(enable = "neon")]
15309#[cfg_attr(test, assert_instr(umull2))]
15310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15311pub fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t {
15312    vmull_high_u32(a, vdupq_n_u32(b))
15313}
15314#[doc = "Polynomial multiply long"]
15315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"]
15316#[inline(always)]
15317#[target_feature(enable = "neon,aes")]
15318#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15319#[cfg_attr(test, assert_instr(pmull2))]
15320pub fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 {
15321    unsafe { vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) }
15322}
15323#[doc = "Polynomial multiply long"]
15324#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"]
15325#[inline(always)]
15326#[target_feature(enable = "neon")]
15327#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15328#[cfg_attr(test, assert_instr(pmull2))]
15329pub fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t {
15330    unsafe {
15331        let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15332        let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15333        vmull_p8(a, b)
15334    }
15335}
15336#[doc = "Signed multiply long"]
15337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"]
15338#[inline(always)]
15339#[target_feature(enable = "neon")]
15340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15341#[cfg_attr(test, assert_instr(smull2))]
15342pub fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
15343    unsafe {
15344        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15345        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15346        vmull_s8(a, b)
15347    }
15348}
15349#[doc = "Signed multiply long"]
15350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"]
15351#[inline(always)]
15352#[target_feature(enable = "neon")]
15353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15354#[cfg_attr(test, assert_instr(smull2))]
15355pub fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15356    unsafe {
15357        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15358        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15359        vmull_s16(a, b)
15360    }
15361}
15362#[doc = "Signed multiply long"]
15363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"]
15364#[inline(always)]
15365#[target_feature(enable = "neon")]
15366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15367#[cfg_attr(test, assert_instr(smull2))]
15368pub fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15369    unsafe {
15370        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
15371        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
15372        vmull_s32(a, b)
15373    }
15374}
15375#[doc = "Unsigned multiply long"]
15376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"]
15377#[inline(always)]
15378#[target_feature(enable = "neon")]
15379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15380#[cfg_attr(test, assert_instr(umull2))]
15381pub fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
15382    unsafe {
15383        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15384        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15385        vmull_u8(a, b)
15386    }
15387}
15388#[doc = "Unsigned multiply long"]
15389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"]
15390#[inline(always)]
15391#[target_feature(enable = "neon")]
15392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15393#[cfg_attr(test, assert_instr(umull2))]
15394pub fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15395    unsafe {
15396        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15397        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15398        vmull_u16(a, b)
15399    }
15400}
15401#[doc = "Unsigned multiply long"]
15402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"]
15403#[inline(always)]
15404#[target_feature(enable = "neon")]
15405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15406#[cfg_attr(test, assert_instr(umull2))]
15407pub fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15408    unsafe {
15409        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
15410        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
15411        vmull_u32(a, b)
15412    }
15413}
15414#[doc = "Polynomial multiply long"]
15415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"]
15416#[inline(always)]
15417#[target_feature(enable = "neon,aes")]
15418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15419#[cfg_attr(test, assert_instr(pmull))]
15420pub fn vmull_p64(a: p64, b: p64) -> p128 {
15421    unsafe extern "unadjusted" {
15422        #[cfg_attr(
15423            any(target_arch = "aarch64", target_arch = "arm64ec"),
15424            link_name = "llvm.aarch64.neon.pmull64"
15425        )]
15426        fn _vmull_p64(a: p64, b: p64) -> int8x16_t;
15427    }
15428    unsafe { transmute(_vmull_p64(a, b)) }
15429}
15430#[doc = "Floating-point multiply"]
15431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"]
15432#[inline(always)]
15433#[target_feature(enable = "neon")]
15434#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15435#[rustc_legacy_const_generics(2)]
15436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15437pub fn vmulq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15438    static_assert!(LANE == 0);
15439    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15440}
15441#[doc = "Floating-point multiply"]
15442#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"]
15443#[inline(always)]
15444#[target_feature(enable = "neon")]
15445#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15446#[rustc_legacy_const_generics(2)]
15447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15448pub fn vmulq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15449    static_assert_uimm_bits!(LANE, 1);
15450    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15451}
15452#[doc = "Floating-point multiply"]
15453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"]
15454#[inline(always)]
15455#[target_feature(enable = "neon")]
15456#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15457#[rustc_legacy_const_generics(2)]
15458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15459pub fn vmuls_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15460    static_assert_uimm_bits!(LANE, 1);
15461    unsafe {
15462        let b: f32 = simd_extract!(b, LANE as u32);
15463        a * b
15464    }
15465}
15466#[doc = "Floating-point multiply"]
15467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"]
15468#[inline(always)]
15469#[target_feature(enable = "neon")]
15470#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15471#[rustc_legacy_const_generics(2)]
15472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15473pub fn vmuls_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15474    static_assert_uimm_bits!(LANE, 2);
15475    unsafe {
15476        let b: f32 = simd_extract!(b, LANE as u32);
15477        a * b
15478    }
15479}
15480#[doc = "Floating-point multiply"]
15481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"]
15482#[inline(always)]
15483#[target_feature(enable = "neon")]
15484#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15485#[rustc_legacy_const_generics(2)]
15486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15487pub fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15488    static_assert_uimm_bits!(LANE, 1);
15489    unsafe {
15490        let b: f64 = simd_extract!(b, LANE as u32);
15491        a * b
15492    }
15493}
15494#[doc = "Floating-point multiply extended"]
15495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f16)"]
15496#[inline(always)]
15497#[target_feature(enable = "neon,fp16")]
15498#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15499#[cfg(not(target_arch = "arm64ec"))]
15500#[cfg_attr(test, assert_instr(fmulx))]
15501pub fn vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15502    unsafe extern "unadjusted" {
15503        #[cfg_attr(
15504            any(target_arch = "aarch64", target_arch = "arm64ec"),
15505            link_name = "llvm.aarch64.neon.fmulx.v4f16"
15506        )]
15507        fn _vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15508    }
15509    unsafe { _vmulx_f16(a, b) }
15510}
15511#[doc = "Floating-point multiply extended"]
15512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f16)"]
15513#[inline(always)]
15514#[target_feature(enable = "neon,fp16")]
15515#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15516#[cfg(not(target_arch = "arm64ec"))]
15517#[cfg_attr(test, assert_instr(fmulx))]
15518pub fn vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15519    unsafe extern "unadjusted" {
15520        #[cfg_attr(
15521            any(target_arch = "aarch64", target_arch = "arm64ec"),
15522            link_name = "llvm.aarch64.neon.fmulx.v8f16"
15523        )]
15524        fn _vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15525    }
15526    unsafe { _vmulxq_f16(a, b) }
15527}
15528#[doc = "Floating-point multiply extended"]
15529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"]
15530#[inline(always)]
15531#[target_feature(enable = "neon")]
15532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15533#[cfg_attr(test, assert_instr(fmulx))]
15534pub fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15535    unsafe extern "unadjusted" {
15536        #[cfg_attr(
15537            any(target_arch = "aarch64", target_arch = "arm64ec"),
15538            link_name = "llvm.aarch64.neon.fmulx.v2f32"
15539        )]
15540        fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
15541    }
15542    unsafe { _vmulx_f32(a, b) }
15543}
15544#[doc = "Floating-point multiply extended"]
15545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"]
15546#[inline(always)]
15547#[target_feature(enable = "neon")]
15548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15549#[cfg_attr(test, assert_instr(fmulx))]
15550pub fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15551    unsafe extern "unadjusted" {
15552        #[cfg_attr(
15553            any(target_arch = "aarch64", target_arch = "arm64ec"),
15554            link_name = "llvm.aarch64.neon.fmulx.v4f32"
15555        )]
15556        fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15557    }
15558    unsafe { _vmulxq_f32(a, b) }
15559}
15560#[doc = "Floating-point multiply extended"]
15561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"]
15562#[inline(always)]
15563#[target_feature(enable = "neon")]
15564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15565#[cfg_attr(test, assert_instr(fmulx))]
15566pub fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15567    unsafe extern "unadjusted" {
15568        #[cfg_attr(
15569            any(target_arch = "aarch64", target_arch = "arm64ec"),
15570            link_name = "llvm.aarch64.neon.fmulx.v1f64"
15571        )]
15572        fn _vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
15573    }
15574    unsafe { _vmulx_f64(a, b) }
15575}
15576#[doc = "Floating-point multiply extended"]
15577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"]
15578#[inline(always)]
15579#[target_feature(enable = "neon")]
15580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15581#[cfg_attr(test, assert_instr(fmulx))]
15582pub fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15583    unsafe extern "unadjusted" {
15584        #[cfg_attr(
15585            any(target_arch = "aarch64", target_arch = "arm64ec"),
15586            link_name = "llvm.aarch64.neon.fmulx.v2f64"
15587        )]
15588        fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15589    }
15590    unsafe { _vmulxq_f64(a, b) }
15591}
15592#[doc = "Floating-point multiply extended"]
15593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f16)"]
15594#[inline(always)]
15595#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15596#[rustc_legacy_const_generics(2)]
15597#[target_feature(enable = "neon,fp16")]
15598#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15599#[cfg(not(target_arch = "arm64ec"))]
15600pub fn vmulx_lane_f16<const LANE: i32>(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15601    static_assert_uimm_bits!(LANE, 2);
15602    unsafe {
15603        vmulx_f16(
15604            a,
15605            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15606        )
15607    }
15608}
15609#[doc = "Floating-point multiply extended"]
15610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f16)"]
15611#[inline(always)]
15612#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15613#[rustc_legacy_const_generics(2)]
15614#[target_feature(enable = "neon,fp16")]
15615#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15616#[cfg(not(target_arch = "arm64ec"))]
15617pub fn vmulx_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
15618    static_assert_uimm_bits!(LANE, 3);
15619    unsafe {
15620        vmulx_f16(
15621            a,
15622            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15623        )
15624    }
15625}
15626#[doc = "Floating-point multiply extended"]
15627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f16)"]
15628#[inline(always)]
15629#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15630#[rustc_legacy_const_generics(2)]
15631#[target_feature(enable = "neon,fp16")]
15632#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15633#[cfg(not(target_arch = "arm64ec"))]
15634pub fn vmulxq_lane_f16<const LANE: i32>(a: float16x8_t, b: float16x4_t) -> float16x8_t {
15635    static_assert_uimm_bits!(LANE, 2);
15636    unsafe {
15637        vmulxq_f16(
15638            a,
15639            simd_shuffle!(
15640                b,
15641                b,
15642                [
15643                    LANE as u32,
15644                    LANE as u32,
15645                    LANE as u32,
15646                    LANE as u32,
15647                    LANE as u32,
15648                    LANE as u32,
15649                    LANE as u32,
15650                    LANE as u32
15651                ]
15652            ),
15653        )
15654    }
15655}
15656#[doc = "Floating-point multiply extended"]
15657#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f16)"]
15658#[inline(always)]
15659#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15660#[rustc_legacy_const_generics(2)]
15661#[target_feature(enable = "neon,fp16")]
15662#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15663#[cfg(not(target_arch = "arm64ec"))]
15664pub fn vmulxq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15665    static_assert_uimm_bits!(LANE, 3);
15666    unsafe {
15667        vmulxq_f16(
15668            a,
15669            simd_shuffle!(
15670                b,
15671                b,
15672                [
15673                    LANE as u32,
15674                    LANE as u32,
15675                    LANE as u32,
15676                    LANE as u32,
15677                    LANE as u32,
15678                    LANE as u32,
15679                    LANE as u32,
15680                    LANE as u32
15681                ]
15682            ),
15683        )
15684    }
15685}
15686#[doc = "Floating-point multiply extended"]
15687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"]
15688#[inline(always)]
15689#[target_feature(enable = "neon")]
15690#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15691#[rustc_legacy_const_generics(2)]
15692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15693pub fn vmulx_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15694    static_assert_uimm_bits!(LANE, 1);
15695    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15696}
15697#[doc = "Floating-point multiply extended"]
15698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"]
15699#[inline(always)]
15700#[target_feature(enable = "neon")]
15701#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15702#[rustc_legacy_const_generics(2)]
15703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15704pub fn vmulx_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
15705    static_assert_uimm_bits!(LANE, 2);
15706    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15707}
15708#[doc = "Floating-point multiply extended"]
15709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"]
15710#[inline(always)]
15711#[target_feature(enable = "neon")]
15712#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15713#[rustc_legacy_const_generics(2)]
15714#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15715pub fn vmulxq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
15716    static_assert_uimm_bits!(LANE, 1);
15717    unsafe {
15718        vmulxq_f32(
15719            a,
15720            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15721        )
15722    }
15723}
15724#[doc = "Floating-point multiply extended"]
15725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"]
15726#[inline(always)]
15727#[target_feature(enable = "neon")]
15728#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15729#[rustc_legacy_const_generics(2)]
15730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15731pub fn vmulxq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15732    static_assert_uimm_bits!(LANE, 2);
15733    unsafe {
15734        vmulxq_f32(
15735            a,
15736            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15737        )
15738    }
15739}
15740#[doc = "Floating-point multiply extended"]
15741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"]
15742#[inline(always)]
15743#[target_feature(enable = "neon")]
15744#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15745#[rustc_legacy_const_generics(2)]
15746#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15747pub fn vmulxq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15748    static_assert_uimm_bits!(LANE, 1);
15749    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15750}
15751#[doc = "Floating-point multiply extended"]
15752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"]
15753#[inline(always)]
15754#[target_feature(enable = "neon")]
15755#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15756#[rustc_legacy_const_generics(2)]
15757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15758pub fn vmulx_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15759    static_assert!(LANE == 0);
15760    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15761}
15762#[doc = "Floating-point multiply extended"]
15763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"]
15764#[inline(always)]
15765#[target_feature(enable = "neon")]
15766#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15767#[rustc_legacy_const_generics(2)]
15768#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15769pub fn vmulx_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
15770    static_assert_uimm_bits!(LANE, 1);
15771    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15772}
15773#[doc = "Vector multiply by scalar"]
15774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_n_f16)"]
15775#[inline(always)]
15776#[cfg_attr(test, assert_instr(fmulx))]
15777#[target_feature(enable = "neon,fp16")]
15778#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15779#[cfg(not(target_arch = "arm64ec"))]
15780pub fn vmulx_n_f16(a: float16x4_t, b: f16) -> float16x4_t {
15781    vmulx_f16(a, vdup_n_f16(b))
15782}
15783#[doc = "Vector multiply by scalar"]
15784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_n_f16)"]
15785#[inline(always)]
15786#[cfg_attr(test, assert_instr(fmulx))]
15787#[target_feature(enable = "neon,fp16")]
15788#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15789#[cfg(not(target_arch = "arm64ec"))]
15790pub fn vmulxq_n_f16(a: float16x8_t, b: f16) -> float16x8_t {
15791    vmulxq_f16(a, vdupq_n_f16(b))
15792}
15793#[doc = "Floating-point multiply extended"]
15794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"]
15795#[inline(always)]
15796#[target_feature(enable = "neon")]
15797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15798#[cfg_attr(test, assert_instr(fmulx))]
15799pub fn vmulxd_f64(a: f64, b: f64) -> f64 {
15800    unsafe extern "unadjusted" {
15801        #[cfg_attr(
15802            any(target_arch = "aarch64", target_arch = "arm64ec"),
15803            link_name = "llvm.aarch64.neon.fmulx.f64"
15804        )]
15805        fn _vmulxd_f64(a: f64, b: f64) -> f64;
15806    }
15807    unsafe { _vmulxd_f64(a, b) }
15808}
15809#[doc = "Floating-point multiply extended"]
15810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"]
15811#[inline(always)]
15812#[target_feature(enable = "neon")]
15813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15814#[cfg_attr(test, assert_instr(fmulx))]
15815pub fn vmulxs_f32(a: f32, b: f32) -> f32 {
15816    unsafe extern "unadjusted" {
15817        #[cfg_attr(
15818            any(target_arch = "aarch64", target_arch = "arm64ec"),
15819            link_name = "llvm.aarch64.neon.fmulx.f32"
15820        )]
15821        fn _vmulxs_f32(a: f32, b: f32) -> f32;
15822    }
15823    unsafe { _vmulxs_f32(a, b) }
15824}
15825#[doc = "Floating-point multiply extended"]
15826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"]
15827#[inline(always)]
15828#[target_feature(enable = "neon")]
15829#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15830#[rustc_legacy_const_generics(2)]
15831#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15832pub fn vmulxd_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15833    static_assert!(LANE == 0);
15834    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15835}
15836#[doc = "Floating-point multiply extended"]
15837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"]
15838#[inline(always)]
15839#[target_feature(enable = "neon")]
15840#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15841#[rustc_legacy_const_generics(2)]
15842#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15843pub fn vmulxd_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15844    static_assert_uimm_bits!(LANE, 1);
15845    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15846}
15847#[doc = "Floating-point multiply extended"]
15848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"]
15849#[inline(always)]
15850#[target_feature(enable = "neon")]
15851#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15852#[rustc_legacy_const_generics(2)]
15853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15854pub fn vmulxs_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15855    static_assert_uimm_bits!(LANE, 1);
15856    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15857}
15858#[doc = "Floating-point multiply extended"]
15859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"]
15860#[inline(always)]
15861#[target_feature(enable = "neon")]
15862#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15863#[rustc_legacy_const_generics(2)]
15864#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15865pub fn vmulxs_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15866    static_assert_uimm_bits!(LANE, 2);
15867    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15868}
15869#[doc = "Floating-point multiply extended"]
15870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_f16)"]
15871#[inline(always)]
15872#[target_feature(enable = "neon,fp16")]
15873#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15874#[cfg(not(target_arch = "arm64ec"))]
15875#[cfg_attr(test, assert_instr(fmulx))]
15876pub fn vmulxh_f16(a: f16, b: f16) -> f16 {
15877    unsafe extern "unadjusted" {
15878        #[cfg_attr(
15879            any(target_arch = "aarch64", target_arch = "arm64ec"),
15880            link_name = "llvm.aarch64.neon.fmulx.f16"
15881        )]
15882        fn _vmulxh_f16(a: f16, b: f16) -> f16;
15883    }
15884    unsafe { _vmulxh_f16(a, b) }
15885}
15886#[doc = "Floating-point multiply extended"]
15887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_lane_f16)"]
15888#[inline(always)]
15889#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15890#[rustc_legacy_const_generics(2)]
15891#[target_feature(enable = "neon,fp16")]
15892#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15893#[cfg(not(target_arch = "arm64ec"))]
15894pub fn vmulxh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
15895    static_assert_uimm_bits!(LANE, 2);
15896    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15897}
15898#[doc = "Floating-point multiply extended"]
15899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_laneq_f16)"]
15900#[inline(always)]
15901#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15902#[rustc_legacy_const_generics(2)]
15903#[target_feature(enable = "neon,fp16")]
15904#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15905#[cfg(not(target_arch = "arm64ec"))]
15906pub fn vmulxh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
15907    static_assert_uimm_bits!(LANE, 3);
15908    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15909}
15910#[doc = "Floating-point multiply extended"]
15911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"]
15912#[inline(always)]
15913#[target_feature(enable = "neon")]
15914#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15915#[rustc_legacy_const_generics(2)]
15916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15917pub fn vmulxq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15918    static_assert!(LANE == 0);
15919    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15920}
15921#[doc = "Negate"]
15922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"]
15923#[inline(always)]
15924#[target_feature(enable = "neon")]
15925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15926#[cfg_attr(test, assert_instr(fneg))]
15927pub fn vneg_f64(a: float64x1_t) -> float64x1_t {
15928    unsafe { simd_neg(a) }
15929}
15930#[doc = "Negate"]
15931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"]
15932#[inline(always)]
15933#[target_feature(enable = "neon")]
15934#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15935#[cfg_attr(test, assert_instr(fneg))]
15936pub fn vnegq_f64(a: float64x2_t) -> float64x2_t {
15937    unsafe { simd_neg(a) }
15938}
15939#[doc = "Negate"]
15940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"]
15941#[inline(always)]
15942#[target_feature(enable = "neon")]
15943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15944#[cfg_attr(test, assert_instr(neg))]
15945pub fn vneg_s64(a: int64x1_t) -> int64x1_t {
15946    unsafe { simd_neg(a) }
15947}
15948#[doc = "Negate"]
15949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"]
15950#[inline(always)]
15951#[target_feature(enable = "neon")]
15952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15953#[cfg_attr(test, assert_instr(neg))]
15954pub fn vnegq_s64(a: int64x2_t) -> int64x2_t {
15955    unsafe { simd_neg(a) }
15956}
15957#[doc = "Negate"]
15958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"]
15959#[inline(always)]
15960#[target_feature(enable = "neon")]
15961#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15962#[cfg_attr(test, assert_instr(neg))]
15963pub fn vnegd_s64(a: i64) -> i64 {
15964    a.wrapping_neg()
15965}
15966#[doc = "Negate"]
15967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegh_f16)"]
15968#[inline(always)]
15969#[target_feature(enable = "neon,fp16")]
15970#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15971#[cfg(not(target_arch = "arm64ec"))]
15972#[cfg_attr(test, assert_instr(fneg))]
15973pub fn vnegh_f16(a: f16) -> f16 {
15974    -a
15975}
15976#[doc = "Floating-point add pairwise"]
15977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"]
15978#[inline(always)]
15979#[target_feature(enable = "neon")]
15980#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15981#[cfg_attr(test, assert_instr(nop))]
15982pub fn vpaddd_f64(a: float64x2_t) -> f64 {
15983    unsafe {
15984        let a1: f64 = simd_extract!(a, 0);
15985        let a2: f64 = simd_extract!(a, 1);
15986        a1 + a2
15987    }
15988}
15989#[doc = "Floating-point add pairwise"]
15990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"]
15991#[inline(always)]
15992#[target_feature(enable = "neon")]
15993#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15994#[cfg_attr(test, assert_instr(nop))]
15995pub fn vpadds_f32(a: float32x2_t) -> f32 {
15996    unsafe {
15997        let a1: f32 = simd_extract!(a, 0);
15998        let a2: f32 = simd_extract!(a, 1);
15999        a1 + a2
16000    }
16001}
16002#[doc = "Add pairwise"]
16003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"]
16004#[inline(always)]
16005#[target_feature(enable = "neon")]
16006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16007#[cfg_attr(test, assert_instr(addp))]
16008pub fn vpaddd_s64(a: int64x2_t) -> i64 {
16009    unsafe { simd_reduce_add_ordered(a, 0) }
16010}
16011#[doc = "Add pairwise"]
16012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"]
16013#[inline(always)]
16014#[target_feature(enable = "neon")]
16015#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16016#[cfg_attr(test, assert_instr(addp))]
16017pub fn vpaddd_u64(a: uint64x2_t) -> u64 {
16018    unsafe { simd_reduce_add_ordered(a, 0) }
16019}
16020#[doc = "Floating-point add pairwise"]
16021#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"]
16022#[inline(always)]
16023#[target_feature(enable = "neon,fp16")]
16024#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16025#[cfg(not(target_arch = "arm64ec"))]
16026#[cfg_attr(test, assert_instr(faddp))]
16027pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16028    unsafe {
16029        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>());
16030        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>());
16031        simd_add(even, odd)
16032    }
16033}
16034#[doc = "Floating-point add pairwise"]
16035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"]
16036#[inline(always)]
16037#[target_feature(enable = "neon")]
16038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16039#[cfg_attr(test, assert_instr(faddp))]
16040pub fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16041    unsafe {
16042        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>());
16043        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>());
16044        simd_add(even, odd)
16045    }
16046}
16047#[doc = "Floating-point add pairwise"]
16048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"]
16049#[inline(always)]
16050#[target_feature(enable = "neon")]
16051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16052#[cfg_attr(test, assert_instr(faddp))]
16053pub fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16054    unsafe {
16055        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>());
16056        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>());
16057        simd_add(even, odd)
16058    }
16059}
16060#[doc = "Add Pairwise"]
16061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"]
16062#[inline(always)]
16063#[target_feature(enable = "neon")]
16064#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16065#[cfg_attr(test, assert_instr(addp))]
16066pub fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16067    unsafe {
16068        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<16>());
16069        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<16>());
16070        simd_add(even, odd)
16071    }
16072}
16073#[doc = "Add Pairwise"]
16074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"]
16075#[inline(always)]
16076#[target_feature(enable = "neon")]
16077#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16078#[cfg_attr(test, assert_instr(addp))]
16079pub fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16080    unsafe {
16081        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>());
16082        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>());
16083        simd_add(even, odd)
16084    }
16085}
16086#[doc = "Add Pairwise"]
16087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"]
16088#[inline(always)]
16089#[target_feature(enable = "neon")]
16090#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16091#[cfg_attr(test, assert_instr(addp))]
16092pub fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16093    unsafe {
16094        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>());
16095        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>());
16096        simd_add(even, odd)
16097    }
16098}
16099#[doc = "Add Pairwise"]
16100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"]
16101#[inline(always)]
16102#[target_feature(enable = "neon")]
16103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16104#[cfg_attr(test, assert_instr(addp))]
16105pub fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
16106    unsafe {
16107        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>());
16108        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>());
16109        simd_add(even, odd)
16110    }
16111}
16112#[doc = "Add Pairwise"]
16113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
16114#[inline(always)]
16115#[target_feature(enable = "neon")]
16116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16117#[cfg_attr(test, assert_instr(addp))]
16118pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16119    unsafe {
16120        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<16>());
16121        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<16>());
16122        simd_add(even, odd)
16123    }
16124}
16125#[doc = "Add Pairwise"]
16126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
16127#[inline(always)]
16128#[target_feature(enable = "neon")]
16129#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16130#[cfg_attr(test, assert_instr(addp))]
16131pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16132    unsafe {
16133        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>());
16134        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>());
16135        simd_add(even, odd)
16136    }
16137}
16138#[doc = "Add Pairwise"]
16139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
16140#[inline(always)]
16141#[target_feature(enable = "neon")]
16142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16143#[cfg_attr(test, assert_instr(addp))]
16144pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16145    unsafe {
16146        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>());
16147        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>());
16148        simd_add(even, odd)
16149    }
16150}
16151#[doc = "Add Pairwise"]
16152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
16153#[inline(always)]
16154#[target_feature(enable = "neon")]
16155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16156#[cfg_attr(test, assert_instr(addp))]
16157pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
16158    unsafe {
16159        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>());
16160        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>());
16161        simd_add(even, odd)
16162    }
16163}
16164#[doc = "Floating-point add pairwise"]
16165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f16)"]
16166#[inline(always)]
16167#[target_feature(enable = "neon,fp16")]
16168#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16169#[cfg(not(target_arch = "arm64ec"))]
16170#[cfg_attr(test, assert_instr(fmaxp))]
16171pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16172    unsafe extern "unadjusted" {
16173        #[cfg_attr(
16174            any(target_arch = "aarch64", target_arch = "arm64ec"),
16175            link_name = "llvm.aarch64.neon.fmaxp.v4f16"
16176        )]
16177        fn _vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16178    }
16179    unsafe { _vpmax_f16(a, b) }
16180}
16181#[doc = "Floating-point add pairwise"]
16182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f16)"]
16183#[inline(always)]
16184#[target_feature(enable = "neon,fp16")]
16185#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16186#[cfg(not(target_arch = "arm64ec"))]
16187#[cfg_attr(test, assert_instr(fmaxp))]
16188pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16189    unsafe extern "unadjusted" {
16190        #[cfg_attr(
16191            any(target_arch = "aarch64", target_arch = "arm64ec"),
16192            link_name = "llvm.aarch64.neon.fmaxp.v8f16"
16193        )]
16194        fn _vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16195    }
16196    unsafe { _vpmaxq_f16(a, b) }
16197}
16198#[doc = "Floating-point add pairwise"]
16199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f16)"]
16200#[inline(always)]
16201#[target_feature(enable = "neon,fp16")]
16202#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16203#[cfg(not(target_arch = "arm64ec"))]
16204#[cfg_attr(test, assert_instr(fmaxnmp))]
16205pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16206    unsafe extern "unadjusted" {
16207        #[cfg_attr(
16208            any(target_arch = "aarch64", target_arch = "arm64ec"),
16209            link_name = "llvm.aarch64.neon.fmaxnmp.v4f16"
16210        )]
16211        fn _vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16212    }
16213    unsafe { _vpmaxnm_f16(a, b) }
16214}
16215#[doc = "Floating-point add pairwise"]
16216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f16)"]
16217#[inline(always)]
16218#[target_feature(enable = "neon,fp16")]
16219#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16220#[cfg(not(target_arch = "arm64ec"))]
16221#[cfg_attr(test, assert_instr(fmaxnmp))]
16222pub fn vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16223    unsafe extern "unadjusted" {
16224        #[cfg_attr(
16225            any(target_arch = "aarch64", target_arch = "arm64ec"),
16226            link_name = "llvm.aarch64.neon.fmaxnmp.v8f16"
16227        )]
16228        fn _vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16229    }
16230    unsafe { _vpmaxnmq_f16(a, b) }
16231}
16232#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"]
16234#[inline(always)]
16235#[target_feature(enable = "neon")]
16236#[cfg_attr(test, assert_instr(fmaxnmp))]
16237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16238pub fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16239    unsafe extern "unadjusted" {
16240        #[cfg_attr(
16241            any(target_arch = "aarch64", target_arch = "arm64ec"),
16242            link_name = "llvm.aarch64.neon.fmaxnmp.v2f32"
16243        )]
16244        fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16245    }
16246    unsafe { _vpmaxnm_f32(a, b) }
16247}
16248#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"]
16250#[inline(always)]
16251#[target_feature(enable = "neon")]
16252#[cfg_attr(test, assert_instr(fmaxnmp))]
16253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16254pub fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16255    unsafe extern "unadjusted" {
16256        #[cfg_attr(
16257            any(target_arch = "aarch64", target_arch = "arm64ec"),
16258            link_name = "llvm.aarch64.neon.fmaxnmp.v4f32"
16259        )]
16260        fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16261    }
16262    unsafe { _vpmaxnmq_f32(a, b) }
16263}
16264#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"]
16266#[inline(always)]
16267#[target_feature(enable = "neon")]
16268#[cfg_attr(test, assert_instr(fmaxnmp))]
16269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16270pub fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16271    unsafe extern "unadjusted" {
16272        #[cfg_attr(
16273            any(target_arch = "aarch64", target_arch = "arm64ec"),
16274            link_name = "llvm.aarch64.neon.fmaxnmp.v2f64"
16275        )]
16276        fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16277    }
16278    unsafe { _vpmaxnmq_f64(a, b) }
16279}
16280#[doc = "Floating-point maximum number pairwise"]
16281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"]
16282#[inline(always)]
16283#[target_feature(enable = "neon")]
16284#[cfg_attr(test, assert_instr(fmaxnmp))]
16285#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16286pub fn vpmaxnmqd_f64(a: float64x2_t) -> f64 {
16287    unsafe extern "unadjusted" {
16288        #[cfg_attr(
16289            any(target_arch = "aarch64", target_arch = "arm64ec"),
16290            link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64"
16291        )]
16292        fn _vpmaxnmqd_f64(a: float64x2_t) -> f64;
16293    }
16294    unsafe { _vpmaxnmqd_f64(a) }
16295}
16296#[doc = "Floating-point maximum number pairwise"]
16297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"]
16298#[inline(always)]
16299#[target_feature(enable = "neon")]
16300#[cfg_attr(test, assert_instr(fmaxnmp))]
16301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16302pub fn vpmaxnms_f32(a: float32x2_t) -> f32 {
16303    unsafe extern "unadjusted" {
16304        #[cfg_attr(
16305            any(target_arch = "aarch64", target_arch = "arm64ec"),
16306            link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32"
16307        )]
16308        fn _vpmaxnms_f32(a: float32x2_t) -> f32;
16309    }
16310    unsafe { _vpmaxnms_f32(a) }
16311}
16312#[doc = "Folding maximum of adjacent pairs"]
16313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"]
16314#[inline(always)]
16315#[target_feature(enable = "neon")]
16316#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16317#[cfg_attr(test, assert_instr(fmaxp))]
16318pub fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16319    unsafe extern "unadjusted" {
16320        #[cfg_attr(
16321            any(target_arch = "aarch64", target_arch = "arm64ec"),
16322            link_name = "llvm.aarch64.neon.fmaxp.v4f32"
16323        )]
16324        fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16325    }
16326    unsafe { _vpmaxq_f32(a, b) }
16327}
16328#[doc = "Folding maximum of adjacent pairs"]
16329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"]
16330#[inline(always)]
16331#[target_feature(enable = "neon")]
16332#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16333#[cfg_attr(test, assert_instr(fmaxp))]
16334pub fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16335    unsafe extern "unadjusted" {
16336        #[cfg_attr(
16337            any(target_arch = "aarch64", target_arch = "arm64ec"),
16338            link_name = "llvm.aarch64.neon.fmaxp.v2f64"
16339        )]
16340        fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16341    }
16342    unsafe { _vpmaxq_f64(a, b) }
16343}
16344#[doc = "Folding maximum of adjacent pairs"]
16345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"]
16346#[inline(always)]
16347#[target_feature(enable = "neon")]
16348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16349#[cfg_attr(test, assert_instr(smaxp))]
16350pub fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16351    unsafe extern "unadjusted" {
16352        #[cfg_attr(
16353            any(target_arch = "aarch64", target_arch = "arm64ec"),
16354            link_name = "llvm.aarch64.neon.smaxp.v16i8"
16355        )]
16356        fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16357    }
16358    unsafe { _vpmaxq_s8(a, b) }
16359}
16360#[doc = "Folding maximum of adjacent pairs"]
16361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"]
16362#[inline(always)]
16363#[target_feature(enable = "neon")]
16364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16365#[cfg_attr(test, assert_instr(smaxp))]
16366pub fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16367    unsafe extern "unadjusted" {
16368        #[cfg_attr(
16369            any(target_arch = "aarch64", target_arch = "arm64ec"),
16370            link_name = "llvm.aarch64.neon.smaxp.v8i16"
16371        )]
16372        fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16373    }
16374    unsafe { _vpmaxq_s16(a, b) }
16375}
16376#[doc = "Folding maximum of adjacent pairs"]
16377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"]
16378#[inline(always)]
16379#[target_feature(enable = "neon")]
16380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16381#[cfg_attr(test, assert_instr(smaxp))]
16382pub fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16383    unsafe extern "unadjusted" {
16384        #[cfg_attr(
16385            any(target_arch = "aarch64", target_arch = "arm64ec"),
16386            link_name = "llvm.aarch64.neon.smaxp.v4i32"
16387        )]
16388        fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16389    }
16390    unsafe { _vpmaxq_s32(a, b) }
16391}
16392#[doc = "Folding maximum of adjacent pairs"]
16393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"]
16394#[inline(always)]
16395#[target_feature(enable = "neon")]
16396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16397#[cfg_attr(test, assert_instr(umaxp))]
16398pub fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16399    unsafe extern "unadjusted" {
16400        #[cfg_attr(
16401            any(target_arch = "aarch64", target_arch = "arm64ec"),
16402            link_name = "llvm.aarch64.neon.umaxp.v16i8"
16403        )]
16404        fn _vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16405    }
16406    unsafe { _vpmaxq_u8(a, b) }
16407}
16408#[doc = "Folding maximum of adjacent pairs"]
16409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"]
16410#[inline(always)]
16411#[target_feature(enable = "neon")]
16412#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16413#[cfg_attr(test, assert_instr(umaxp))]
16414pub fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16415    unsafe extern "unadjusted" {
16416        #[cfg_attr(
16417            any(target_arch = "aarch64", target_arch = "arm64ec"),
16418            link_name = "llvm.aarch64.neon.umaxp.v8i16"
16419        )]
16420        fn _vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16421    }
16422    unsafe { _vpmaxq_u16(a, b) }
16423}
16424#[doc = "Folding maximum of adjacent pairs"]
16425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"]
16426#[inline(always)]
16427#[target_feature(enable = "neon")]
16428#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16429#[cfg_attr(test, assert_instr(umaxp))]
16430pub fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16431    unsafe extern "unadjusted" {
16432        #[cfg_attr(
16433            any(target_arch = "aarch64", target_arch = "arm64ec"),
16434            link_name = "llvm.aarch64.neon.umaxp.v4i32"
16435        )]
16436        fn _vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16437    }
16438    unsafe { _vpmaxq_u32(a, b) }
16439}
16440#[doc = "Floating-point maximum pairwise"]
16441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"]
16442#[inline(always)]
16443#[target_feature(enable = "neon")]
16444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16445#[cfg_attr(test, assert_instr(fmaxp))]
16446pub fn vpmaxqd_f64(a: float64x2_t) -> f64 {
16447    unsafe extern "unadjusted" {
16448        #[cfg_attr(
16449            any(target_arch = "aarch64", target_arch = "arm64ec"),
16450            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
16451        )]
16452        fn _vpmaxqd_f64(a: float64x2_t) -> f64;
16453    }
16454    unsafe { _vpmaxqd_f64(a) }
16455}
16456#[doc = "Floating-point maximum pairwise"]
16457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"]
16458#[inline(always)]
16459#[target_feature(enable = "neon")]
16460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16461#[cfg_attr(test, assert_instr(fmaxp))]
16462pub fn vpmaxs_f32(a: float32x2_t) -> f32 {
16463    unsafe extern "unadjusted" {
16464        #[cfg_attr(
16465            any(target_arch = "aarch64", target_arch = "arm64ec"),
16466            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
16467        )]
16468        fn _vpmaxs_f32(a: float32x2_t) -> f32;
16469    }
16470    unsafe { _vpmaxs_f32(a) }
16471}
16472#[doc = "Floating-point add pairwise"]
16473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f16)"]
16474#[inline(always)]
16475#[target_feature(enable = "neon,fp16")]
16476#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16477#[cfg(not(target_arch = "arm64ec"))]
16478#[cfg_attr(test, assert_instr(fminp))]
16479pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16480    unsafe extern "unadjusted" {
16481        #[cfg_attr(
16482            any(target_arch = "aarch64", target_arch = "arm64ec"),
16483            link_name = "llvm.aarch64.neon.fminp.v4f16"
16484        )]
16485        fn _vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16486    }
16487    unsafe { _vpmin_f16(a, b) }
16488}
16489#[doc = "Floating-point add pairwise"]
16490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f16)"]
16491#[inline(always)]
16492#[target_feature(enable = "neon,fp16")]
16493#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16494#[cfg(not(target_arch = "arm64ec"))]
16495#[cfg_attr(test, assert_instr(fminp))]
16496pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16497    unsafe extern "unadjusted" {
16498        #[cfg_attr(
16499            any(target_arch = "aarch64", target_arch = "arm64ec"),
16500            link_name = "llvm.aarch64.neon.fminp.v8f16"
16501        )]
16502        fn _vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16503    }
16504    unsafe { _vpminq_f16(a, b) }
16505}
16506#[doc = "Floating-point add pairwise"]
16507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f16)"]
16508#[inline(always)]
16509#[target_feature(enable = "neon,fp16")]
16510#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16511#[cfg(not(target_arch = "arm64ec"))]
16512#[cfg_attr(test, assert_instr(fminnmp))]
16513pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16514    unsafe extern "unadjusted" {
16515        #[cfg_attr(
16516            any(target_arch = "aarch64", target_arch = "arm64ec"),
16517            link_name = "llvm.aarch64.neon.fminnmp.v4f16"
16518        )]
16519        fn _vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16520    }
16521    unsafe { _vpminnm_f16(a, b) }
16522}
16523#[doc = "Floating-point add pairwise"]
16524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f16)"]
16525#[inline(always)]
16526#[target_feature(enable = "neon,fp16")]
16527#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16528#[cfg(not(target_arch = "arm64ec"))]
16529#[cfg_attr(test, assert_instr(fminnmp))]
16530pub fn vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16531    unsafe extern "unadjusted" {
16532        #[cfg_attr(
16533            any(target_arch = "aarch64", target_arch = "arm64ec"),
16534            link_name = "llvm.aarch64.neon.fminnmp.v8f16"
16535        )]
16536        fn _vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16537    }
16538    unsafe { _vpminnmq_f16(a, b) }
16539}
16540#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"]
16542#[inline(always)]
16543#[target_feature(enable = "neon")]
16544#[cfg_attr(test, assert_instr(fminnmp))]
16545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16546pub fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16547    unsafe extern "unadjusted" {
16548        #[cfg_attr(
16549            any(target_arch = "aarch64", target_arch = "arm64ec"),
16550            link_name = "llvm.aarch64.neon.fminnmp.v2f32"
16551        )]
16552        fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16553    }
16554    unsafe { _vpminnm_f32(a, b) }
16555}
16556#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"]
16558#[inline(always)]
16559#[target_feature(enable = "neon")]
16560#[cfg_attr(test, assert_instr(fminnmp))]
16561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16562pub fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16563    unsafe extern "unadjusted" {
16564        #[cfg_attr(
16565            any(target_arch = "aarch64", target_arch = "arm64ec"),
16566            link_name = "llvm.aarch64.neon.fminnmp.v4f32"
16567        )]
16568        fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16569    }
16570    unsafe { _vpminnmq_f32(a, b) }
16571}
16572#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"]
16574#[inline(always)]
16575#[target_feature(enable = "neon")]
16576#[cfg_attr(test, assert_instr(fminnmp))]
16577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16578pub fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16579    unsafe extern "unadjusted" {
16580        #[cfg_attr(
16581            any(target_arch = "aarch64", target_arch = "arm64ec"),
16582            link_name = "llvm.aarch64.neon.fminnmp.v2f64"
16583        )]
16584        fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16585    }
16586    unsafe { _vpminnmq_f64(a, b) }
16587}
16588#[doc = "Floating-point minimum number pairwise"]
16589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"]
16590#[inline(always)]
16591#[target_feature(enable = "neon")]
16592#[cfg_attr(test, assert_instr(fminnmp))]
16593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16594pub fn vpminnmqd_f64(a: float64x2_t) -> f64 {
16595    unsafe extern "unadjusted" {
16596        #[cfg_attr(
16597            any(target_arch = "aarch64", target_arch = "arm64ec"),
16598            link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64"
16599        )]
16600        fn _vpminnmqd_f64(a: float64x2_t) -> f64;
16601    }
16602    unsafe { _vpminnmqd_f64(a) }
16603}
16604#[doc = "Floating-point minimum number pairwise"]
16605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"]
16606#[inline(always)]
16607#[target_feature(enable = "neon")]
16608#[cfg_attr(test, assert_instr(fminnmp))]
16609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16610pub fn vpminnms_f32(a: float32x2_t) -> f32 {
16611    unsafe extern "unadjusted" {
16612        #[cfg_attr(
16613            any(target_arch = "aarch64", target_arch = "arm64ec"),
16614            link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32"
16615        )]
16616        fn _vpminnms_f32(a: float32x2_t) -> f32;
16617    }
16618    unsafe { _vpminnms_f32(a) }
16619}
16620#[doc = "Folding minimum of adjacent pairs"]
16621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"]
16622#[inline(always)]
16623#[target_feature(enable = "neon")]
16624#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16625#[cfg_attr(test, assert_instr(fminp))]
16626pub fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16627    unsafe extern "unadjusted" {
16628        #[cfg_attr(
16629            any(target_arch = "aarch64", target_arch = "arm64ec"),
16630            link_name = "llvm.aarch64.neon.fminp.v4f32"
16631        )]
16632        fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16633    }
16634    unsafe { _vpminq_f32(a, b) }
16635}
16636#[doc = "Folding minimum of adjacent pairs"]
16637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"]
16638#[inline(always)]
16639#[target_feature(enable = "neon")]
16640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16641#[cfg_attr(test, assert_instr(fminp))]
16642pub fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16643    unsafe extern "unadjusted" {
16644        #[cfg_attr(
16645            any(target_arch = "aarch64", target_arch = "arm64ec"),
16646            link_name = "llvm.aarch64.neon.fminp.v2f64"
16647        )]
16648        fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16649    }
16650    unsafe { _vpminq_f64(a, b) }
16651}
16652#[doc = "Folding minimum of adjacent pairs"]
16653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"]
16654#[inline(always)]
16655#[target_feature(enable = "neon")]
16656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16657#[cfg_attr(test, assert_instr(sminp))]
16658pub fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16659    unsafe extern "unadjusted" {
16660        #[cfg_attr(
16661            any(target_arch = "aarch64", target_arch = "arm64ec"),
16662            link_name = "llvm.aarch64.neon.sminp.v16i8"
16663        )]
16664        fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16665    }
16666    unsafe { _vpminq_s8(a, b) }
16667}
16668#[doc = "Folding minimum of adjacent pairs"]
16669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"]
16670#[inline(always)]
16671#[target_feature(enable = "neon")]
16672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16673#[cfg_attr(test, assert_instr(sminp))]
16674pub fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16675    unsafe extern "unadjusted" {
16676        #[cfg_attr(
16677            any(target_arch = "aarch64", target_arch = "arm64ec"),
16678            link_name = "llvm.aarch64.neon.sminp.v8i16"
16679        )]
16680        fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16681    }
16682    unsafe { _vpminq_s16(a, b) }
16683}
16684#[doc = "Folding minimum of adjacent pairs"]
16685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"]
16686#[inline(always)]
16687#[target_feature(enable = "neon")]
16688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16689#[cfg_attr(test, assert_instr(sminp))]
16690pub fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16691    unsafe extern "unadjusted" {
16692        #[cfg_attr(
16693            any(target_arch = "aarch64", target_arch = "arm64ec"),
16694            link_name = "llvm.aarch64.neon.sminp.v4i32"
16695        )]
16696        fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16697    }
16698    unsafe { _vpminq_s32(a, b) }
16699}
16700#[doc = "Folding minimum of adjacent pairs"]
16701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"]
16702#[inline(always)]
16703#[target_feature(enable = "neon")]
16704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16705#[cfg_attr(test, assert_instr(uminp))]
16706pub fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16707    unsafe extern "unadjusted" {
16708        #[cfg_attr(
16709            any(target_arch = "aarch64", target_arch = "arm64ec"),
16710            link_name = "llvm.aarch64.neon.uminp.v16i8"
16711        )]
16712        fn _vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16713    }
16714    unsafe { _vpminq_u8(a, b) }
16715}
16716#[doc = "Folding minimum of adjacent pairs"]
16717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"]
16718#[inline(always)]
16719#[target_feature(enable = "neon")]
16720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16721#[cfg_attr(test, assert_instr(uminp))]
16722pub fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16723    unsafe extern "unadjusted" {
16724        #[cfg_attr(
16725            any(target_arch = "aarch64", target_arch = "arm64ec"),
16726            link_name = "llvm.aarch64.neon.uminp.v8i16"
16727        )]
16728        fn _vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16729    }
16730    unsafe { _vpminq_u16(a, b) }
16731}
16732#[doc = "Folding minimum of adjacent pairs"]
16733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"]
16734#[inline(always)]
16735#[target_feature(enable = "neon")]
16736#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16737#[cfg_attr(test, assert_instr(uminp))]
16738pub fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16739    unsafe extern "unadjusted" {
16740        #[cfg_attr(
16741            any(target_arch = "aarch64", target_arch = "arm64ec"),
16742            link_name = "llvm.aarch64.neon.uminp.v4i32"
16743        )]
16744        fn _vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16745    }
16746    unsafe { _vpminq_u32(a, b) }
16747}
16748#[doc = "Floating-point minimum pairwise"]
16749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"]
16750#[inline(always)]
16751#[target_feature(enable = "neon")]
16752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16753#[cfg_attr(test, assert_instr(fminp))]
16754pub fn vpminqd_f64(a: float64x2_t) -> f64 {
16755    unsafe extern "unadjusted" {
16756        #[cfg_attr(
16757            any(target_arch = "aarch64", target_arch = "arm64ec"),
16758            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
16759        )]
16760        fn _vpminqd_f64(a: float64x2_t) -> f64;
16761    }
16762    unsafe { _vpminqd_f64(a) }
16763}
16764#[doc = "Floating-point minimum pairwise"]
16765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"]
16766#[inline(always)]
16767#[target_feature(enable = "neon")]
16768#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16769#[cfg_attr(test, assert_instr(fminp))]
16770pub fn vpmins_f32(a: float32x2_t) -> f32 {
16771    unsafe extern "unadjusted" {
16772        #[cfg_attr(
16773            any(target_arch = "aarch64", target_arch = "arm64ec"),
16774            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
16775        )]
16776        fn _vpmins_f32(a: float32x2_t) -> f32;
16777    }
16778    unsafe { _vpmins_f32(a) }
16779}
16780#[doc = "Signed saturating Absolute value"]
16781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"]
16782#[inline(always)]
16783#[target_feature(enable = "neon")]
16784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16785#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16786pub fn vqabs_s64(a: int64x1_t) -> int64x1_t {
16787    unsafe extern "unadjusted" {
16788        #[cfg_attr(
16789            any(target_arch = "aarch64", target_arch = "arm64ec"),
16790            link_name = "llvm.aarch64.neon.sqabs.v1i64"
16791        )]
16792        fn _vqabs_s64(a: int64x1_t) -> int64x1_t;
16793    }
16794    unsafe { _vqabs_s64(a) }
16795}
16796#[doc = "Signed saturating Absolute value"]
16797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"]
16798#[inline(always)]
16799#[target_feature(enable = "neon")]
16800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16801#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16802pub fn vqabsq_s64(a: int64x2_t) -> int64x2_t {
16803    unsafe extern "unadjusted" {
16804        #[cfg_attr(
16805            any(target_arch = "aarch64", target_arch = "arm64ec"),
16806            link_name = "llvm.aarch64.neon.sqabs.v2i64"
16807        )]
16808        fn _vqabsq_s64(a: int64x2_t) -> int64x2_t;
16809    }
16810    unsafe { _vqabsq_s64(a) }
16811}
16812#[doc = "Signed saturating absolute value"]
16813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"]
16814#[inline(always)]
16815#[target_feature(enable = "neon")]
16816#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16817#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16818pub fn vqabsb_s8(a: i8) -> i8 {
16819    unsafe { simd_extract!(vqabs_s8(vdup_n_s8(a)), 0) }
16820}
16821#[doc = "Signed saturating absolute value"]
16822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"]
16823#[inline(always)]
16824#[target_feature(enable = "neon")]
16825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16826#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16827pub fn vqabsh_s16(a: i16) -> i16 {
16828    unsafe { simd_extract!(vqabs_s16(vdup_n_s16(a)), 0) }
16829}
16830#[doc = "Signed saturating absolute value"]
16831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"]
16832#[inline(always)]
16833#[target_feature(enable = "neon")]
16834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16835#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16836pub fn vqabss_s32(a: i32) -> i32 {
16837    unsafe extern "unadjusted" {
16838        #[cfg_attr(
16839            any(target_arch = "aarch64", target_arch = "arm64ec"),
16840            link_name = "llvm.aarch64.neon.sqabs.i32"
16841        )]
16842        fn _vqabss_s32(a: i32) -> i32;
16843    }
16844    unsafe { _vqabss_s32(a) }
16845}
16846#[doc = "Signed saturating absolute value"]
16847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"]
16848#[inline(always)]
16849#[target_feature(enable = "neon")]
16850#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16851#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16852pub fn vqabsd_s64(a: i64) -> i64 {
16853    unsafe extern "unadjusted" {
16854        #[cfg_attr(
16855            any(target_arch = "aarch64", target_arch = "arm64ec"),
16856            link_name = "llvm.aarch64.neon.sqabs.i64"
16857        )]
16858        fn _vqabsd_s64(a: i64) -> i64;
16859    }
16860    unsafe { _vqabsd_s64(a) }
16861}
16862#[doc = "Saturating add"]
16863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"]
16864#[inline(always)]
16865#[target_feature(enable = "neon")]
16866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16867#[cfg_attr(test, assert_instr(sqadd))]
16868pub fn vqaddb_s8(a: i8, b: i8) -> i8 {
16869    let a: int8x8_t = vdup_n_s8(a);
16870    let b: int8x8_t = vdup_n_s8(b);
16871    unsafe { simd_extract!(vqadd_s8(a, b), 0) }
16872}
16873#[doc = "Saturating add"]
16874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"]
16875#[inline(always)]
16876#[target_feature(enable = "neon")]
16877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16878#[cfg_attr(test, assert_instr(sqadd))]
16879pub fn vqaddh_s16(a: i16, b: i16) -> i16 {
16880    let a: int16x4_t = vdup_n_s16(a);
16881    let b: int16x4_t = vdup_n_s16(b);
16882    unsafe { simd_extract!(vqadd_s16(a, b), 0) }
16883}
16884#[doc = "Saturating add"]
16885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"]
16886#[inline(always)]
16887#[target_feature(enable = "neon")]
16888#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16889#[cfg_attr(test, assert_instr(uqadd))]
16890pub fn vqaddb_u8(a: u8, b: u8) -> u8 {
16891    let a: uint8x8_t = vdup_n_u8(a);
16892    let b: uint8x8_t = vdup_n_u8(b);
16893    unsafe { simd_extract!(vqadd_u8(a, b), 0) }
16894}
16895#[doc = "Saturating add"]
16896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"]
16897#[inline(always)]
16898#[target_feature(enable = "neon")]
16899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16900#[cfg_attr(test, assert_instr(uqadd))]
16901pub fn vqaddh_u16(a: u16, b: u16) -> u16 {
16902    let a: uint16x4_t = vdup_n_u16(a);
16903    let b: uint16x4_t = vdup_n_u16(b);
16904    unsafe { simd_extract!(vqadd_u16(a, b), 0) }
16905}
16906#[doc = "Saturating add"]
16907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"]
16908#[inline(always)]
16909#[target_feature(enable = "neon")]
16910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16911#[cfg_attr(test, assert_instr(sqadd))]
16912pub fn vqadds_s32(a: i32, b: i32) -> i32 {
16913    unsafe extern "unadjusted" {
16914        #[cfg_attr(
16915            any(target_arch = "aarch64", target_arch = "arm64ec"),
16916            link_name = "llvm.aarch64.neon.sqadd.i32"
16917        )]
16918        fn _vqadds_s32(a: i32, b: i32) -> i32;
16919    }
16920    unsafe { _vqadds_s32(a, b) }
16921}
16922#[doc = "Saturating add"]
16923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"]
16924#[inline(always)]
16925#[target_feature(enable = "neon")]
16926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16927#[cfg_attr(test, assert_instr(sqadd))]
16928pub fn vqaddd_s64(a: i64, b: i64) -> i64 {
16929    unsafe extern "unadjusted" {
16930        #[cfg_attr(
16931            any(target_arch = "aarch64", target_arch = "arm64ec"),
16932            link_name = "llvm.aarch64.neon.sqadd.i64"
16933        )]
16934        fn _vqaddd_s64(a: i64, b: i64) -> i64;
16935    }
16936    unsafe { _vqaddd_s64(a, b) }
16937}
16938#[doc = "Saturating add"]
16939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"]
16940#[inline(always)]
16941#[target_feature(enable = "neon")]
16942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16943#[cfg_attr(test, assert_instr(uqadd))]
16944pub fn vqadds_u32(a: u32, b: u32) -> u32 {
16945    unsafe extern "unadjusted" {
16946        #[cfg_attr(
16947            any(target_arch = "aarch64", target_arch = "arm64ec"),
16948            link_name = "llvm.aarch64.neon.uqadd.i32"
16949        )]
16950        fn _vqadds_u32(a: u32, b: u32) -> u32;
16951    }
16952    unsafe { _vqadds_u32(a, b) }
16953}
16954#[doc = "Saturating add"]
16955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"]
16956#[inline(always)]
16957#[target_feature(enable = "neon")]
16958#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16959#[cfg_attr(test, assert_instr(uqadd))]
16960pub fn vqaddd_u64(a: u64, b: u64) -> u64 {
16961    unsafe extern "unadjusted" {
16962        #[cfg_attr(
16963            any(target_arch = "aarch64", target_arch = "arm64ec"),
16964            link_name = "llvm.aarch64.neon.uqadd.i64"
16965        )]
16966        fn _vqaddd_u64(a: u64, b: u64) -> u64;
16967    }
16968    unsafe { _vqaddd_u64(a, b) }
16969}
16970#[doc = "Signed saturating doubling multiply-add long"]
16971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"]
16972#[inline(always)]
16973#[target_feature(enable = "neon")]
16974#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16975#[rustc_legacy_const_generics(3)]
16976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16977pub fn vqdmlal_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
16978    static_assert_uimm_bits!(N, 2);
16979    vqaddq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
16980}
16981#[doc = "Signed saturating doubling multiply-add long"]
16982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"]
16983#[inline(always)]
16984#[target_feature(enable = "neon")]
16985#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16986#[rustc_legacy_const_generics(3)]
16987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16988pub fn vqdmlal_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16989    static_assert_uimm_bits!(N, 3);
16990    vqaddq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
16991}
16992#[doc = "Signed saturating doubling multiply-add long"]
16993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"]
16994#[inline(always)]
16995#[target_feature(enable = "neon")]
16996#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16997#[rustc_legacy_const_generics(3)]
16998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16999pub fn vqdmlal_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
17000    static_assert_uimm_bits!(N, 1);
17001    vqaddq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
17002}
17003#[doc = "Signed saturating doubling multiply-add long"]
17004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"]
17005#[inline(always)]
17006#[target_feature(enable = "neon")]
17007#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17008#[rustc_legacy_const_generics(3)]
17009#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17010pub fn vqdmlal_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17011    static_assert_uimm_bits!(N, 2);
17012    vqaddq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
17013}
17014#[doc = "Signed saturating doubling multiply-add long"]
17015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"]
17016#[inline(always)]
17017#[target_feature(enable = "neon")]
17018#[cfg_attr(test, assert_instr(sqdmlal2))]
17019#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17020pub fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
17021    vqaddq_s32(a, vqdmull_high_n_s16(b, c))
17022}
17023#[doc = "Signed saturating doubling multiply-add long"]
17024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"]
17025#[inline(always)]
17026#[target_feature(enable = "neon")]
17027#[cfg_attr(test, assert_instr(sqdmlal2))]
17028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17029pub fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17030    vqaddq_s32(a, vqdmull_high_s16(b, c))
17031}
17032#[doc = "Signed saturating doubling multiply-add long"]
17033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"]
17034#[inline(always)]
17035#[target_feature(enable = "neon")]
17036#[cfg_attr(test, assert_instr(sqdmlal2))]
17037#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17038pub fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
17039    vqaddq_s64(a, vqdmull_high_n_s32(b, c))
17040}
17041#[doc = "Signed saturating doubling multiply-add long"]
17042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"]
17043#[inline(always)]
17044#[target_feature(enable = "neon")]
17045#[cfg_attr(test, assert_instr(sqdmlal2))]
17046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17047pub fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17048    vqaddq_s64(a, vqdmull_high_s32(b, c))
17049}
17050#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
17051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"]
17052#[inline(always)]
17053#[target_feature(enable = "neon")]
17054#[cfg_attr(test, assert_instr(sqdmlal, N = 2))]
17055#[rustc_legacy_const_generics(3)]
17056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17057pub fn vqdmlal_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
17058    static_assert_uimm_bits!(N, 3);
17059    vqaddq_s32(a, vqdmull_laneq_s16::<N>(b, c))
17060}
17061#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
17062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"]
17063#[inline(always)]
17064#[target_feature(enable = "neon")]
17065#[cfg_attr(test, assert_instr(sqdmlal, N = 1))]
17066#[rustc_legacy_const_generics(3)]
17067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17068pub fn vqdmlal_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
17069    static_assert_uimm_bits!(N, 2);
17070    vqaddq_s64(a, vqdmull_laneq_s32::<N>(b, c))
17071}
17072#[doc = "Signed saturating doubling multiply-add long"]
17073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"]
17074#[inline(always)]
17075#[target_feature(enable = "neon")]
17076#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17077#[rustc_legacy_const_generics(3)]
17078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17079pub fn vqdmlalh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
17080    static_assert_uimm_bits!(LANE, 2);
17081    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
17082}
17083#[doc = "Signed saturating doubling multiply-add long"]
17084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"]
17085#[inline(always)]
17086#[target_feature(enable = "neon")]
17087#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17088#[rustc_legacy_const_generics(3)]
17089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17090pub fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
17091    static_assert_uimm_bits!(LANE, 3);
17092    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
17093}
17094#[doc = "Signed saturating doubling multiply-add long"]
17095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"]
17096#[inline(always)]
17097#[target_feature(enable = "neon")]
17098#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17099#[rustc_legacy_const_generics(3)]
17100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17101pub fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
17102    static_assert_uimm_bits!(LANE, 1);
17103    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
17104}
17105#[doc = "Signed saturating doubling multiply-add long"]
17106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"]
17107#[inline(always)]
17108#[target_feature(enable = "neon")]
17109#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17110#[rustc_legacy_const_generics(3)]
17111#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17112pub fn vqdmlals_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
17113    static_assert_uimm_bits!(LANE, 2);
17114    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
17115}
17116#[doc = "Signed saturating doubling multiply-add long"]
17117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"]
17118#[inline(always)]
17119#[target_feature(enable = "neon")]
17120#[cfg_attr(test, assert_instr(sqdmlal))]
17121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17122pub fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 {
17123    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
17124    unsafe { vqadds_s32(a, simd_extract!(x, 0)) }
17125}
17126#[doc = "Signed saturating doubling multiply-add long"]
17127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"]
17128#[inline(always)]
17129#[target_feature(enable = "neon")]
17130#[cfg_attr(test, assert_instr(sqdmlal))]
17131#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17132pub fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 {
17133    let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c));
17134    x
17135}
17136#[doc = "Signed saturating doubling multiply-subtract long"]
17137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"]
17138#[inline(always)]
17139#[target_feature(enable = "neon")]
17140#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17141#[rustc_legacy_const_generics(3)]
17142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17143pub fn vqdmlsl_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
17144    static_assert_uimm_bits!(N, 2);
17145    vqsubq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
17146}
17147#[doc = "Signed saturating doubling multiply-subtract long"]
17148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"]
17149#[inline(always)]
17150#[target_feature(enable = "neon")]
17151#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17152#[rustc_legacy_const_generics(3)]
17153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17154pub fn vqdmlsl_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17155    static_assert_uimm_bits!(N, 3);
17156    vqsubq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
17157}
17158#[doc = "Signed saturating doubling multiply-subtract long"]
17159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"]
17160#[inline(always)]
17161#[target_feature(enable = "neon")]
17162#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17163#[rustc_legacy_const_generics(3)]
17164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17165pub fn vqdmlsl_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
17166    static_assert_uimm_bits!(N, 1);
17167    vqsubq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
17168}
17169#[doc = "Signed saturating doubling multiply-subtract long"]
17170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"]
17171#[inline(always)]
17172#[target_feature(enable = "neon")]
17173#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17174#[rustc_legacy_const_generics(3)]
17175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17176pub fn vqdmlsl_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17177    static_assert_uimm_bits!(N, 2);
17178    vqsubq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
17179}
17180#[doc = "Signed saturating doubling multiply-subtract long"]
17181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"]
17182#[inline(always)]
17183#[target_feature(enable = "neon")]
17184#[cfg_attr(test, assert_instr(sqdmlsl2))]
17185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17186pub fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
17187    vqsubq_s32(a, vqdmull_high_n_s16(b, c))
17188}
17189#[doc = "Signed saturating doubling multiply-subtract long"]
17190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"]
17191#[inline(always)]
17192#[target_feature(enable = "neon")]
17193#[cfg_attr(test, assert_instr(sqdmlsl2))]
17194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17195pub fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17196    vqsubq_s32(a, vqdmull_high_s16(b, c))
17197}
17198#[doc = "Signed saturating doubling multiply-subtract long"]
17199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"]
17200#[inline(always)]
17201#[target_feature(enable = "neon")]
17202#[cfg_attr(test, assert_instr(sqdmlsl2))]
17203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17204pub fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
17205    vqsubq_s64(a, vqdmull_high_n_s32(b, c))
17206}
17207#[doc = "Signed saturating doubling multiply-subtract long"]
17208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"]
17209#[inline(always)]
17210#[target_feature(enable = "neon")]
17211#[cfg_attr(test, assert_instr(sqdmlsl2))]
17212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17213pub fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17214    vqsubq_s64(a, vqdmull_high_s32(b, c))
17215}
17216#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
17217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"]
17218#[inline(always)]
17219#[target_feature(enable = "neon")]
17220#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))]
17221#[rustc_legacy_const_generics(3)]
17222#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17223pub fn vqdmlsl_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
17224    static_assert_uimm_bits!(N, 3);
17225    vqsubq_s32(a, vqdmull_laneq_s16::<N>(b, c))
17226}
17227#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
17228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"]
17229#[inline(always)]
17230#[target_feature(enable = "neon")]
17231#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))]
17232#[rustc_legacy_const_generics(3)]
17233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17234pub fn vqdmlsl_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
17235    static_assert_uimm_bits!(N, 2);
17236    vqsubq_s64(a, vqdmull_laneq_s32::<N>(b, c))
17237}
17238#[doc = "Signed saturating doubling multiply-subtract long"]
17239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"]
17240#[inline(always)]
17241#[target_feature(enable = "neon")]
17242#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17243#[rustc_legacy_const_generics(3)]
17244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17245pub fn vqdmlslh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
17246    static_assert_uimm_bits!(LANE, 2);
17247    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
17248}
17249#[doc = "Signed saturating doubling multiply-subtract long"]
17250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"]
17251#[inline(always)]
17252#[target_feature(enable = "neon")]
17253#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17254#[rustc_legacy_const_generics(3)]
17255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17256pub fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
17257    static_assert_uimm_bits!(LANE, 3);
17258    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
17259}
17260#[doc = "Signed saturating doubling multiply-subtract long"]
17261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"]
17262#[inline(always)]
17263#[target_feature(enable = "neon")]
17264#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17265#[rustc_legacy_const_generics(3)]
17266#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17267pub fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
17268    static_assert_uimm_bits!(LANE, 1);
17269    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17270}
17271#[doc = "Signed saturating doubling multiply-subtract long"]
17272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"]
17273#[inline(always)]
17274#[target_feature(enable = "neon")]
17275#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17276#[rustc_legacy_const_generics(3)]
17277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17278pub fn vqdmlsls_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
17279    static_assert_uimm_bits!(LANE, 2);
17280    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17281}
17282#[doc = "Signed saturating doubling multiply-subtract long"]
17283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"]
17284#[inline(always)]
17285#[target_feature(enable = "neon")]
17286#[cfg_attr(test, assert_instr(sqdmlsl))]
17287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17288pub fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 {
17289    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
17290    unsafe { vqsubs_s32(a, simd_extract!(x, 0)) }
17291}
17292#[doc = "Signed saturating doubling multiply-subtract long"]
17293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"]
17294#[inline(always)]
17295#[target_feature(enable = "neon")]
17296#[cfg_attr(test, assert_instr(sqdmlsl))]
17297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17298pub fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 {
17299    let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c));
17300    x
17301}
17302#[doc = "Vector saturating doubling multiply high by scalar"]
17303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"]
17304#[inline(always)]
17305#[target_feature(enable = "neon")]
17306#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17307#[rustc_legacy_const_generics(2)]
17308#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17309pub fn vqdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
17310    static_assert_uimm_bits!(LANE, 2);
17311    unsafe { vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) }
17312}
17313#[doc = "Vector saturating doubling multiply high by scalar"]
17314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"]
17315#[inline(always)]
17316#[target_feature(enable = "neon")]
17317#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17318#[rustc_legacy_const_generics(2)]
17319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17320pub fn vqdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
17321    static_assert_uimm_bits!(LANE, 2);
17322    unsafe { vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) }
17323}
17324#[doc = "Vector saturating doubling multiply high by scalar"]
17325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"]
17326#[inline(always)]
17327#[target_feature(enable = "neon")]
17328#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17329#[rustc_legacy_const_generics(2)]
17330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17331pub fn vqdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
17332    static_assert_uimm_bits!(LANE, 1);
17333    unsafe { vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) }
17334}
17335#[doc = "Vector saturating doubling multiply high by scalar"]
17336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"]
17337#[inline(always)]
17338#[target_feature(enable = "neon")]
17339#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17340#[rustc_legacy_const_generics(2)]
17341#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17342pub fn vqdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
17343    static_assert_uimm_bits!(LANE, 1);
17344    unsafe { vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) }
17345}
17346#[doc = "Signed saturating doubling multiply returning high half"]
17347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"]
17348#[inline(always)]
17349#[target_feature(enable = "neon")]
17350#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17351#[rustc_legacy_const_generics(2)]
17352#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17353pub fn vqdmulhh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i16 {
17354    static_assert_uimm_bits!(N, 2);
17355    unsafe {
17356        let b: i16 = simd_extract!(b, N as u32);
17357        vqdmulhh_s16(a, b)
17358    }
17359}
17360#[doc = "Signed saturating doubling multiply returning high half"]
17361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"]
17362#[inline(always)]
17363#[target_feature(enable = "neon")]
17364#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17365#[rustc_legacy_const_generics(2)]
17366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17367pub fn vqdmulhh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i16 {
17368    static_assert_uimm_bits!(N, 3);
17369    unsafe {
17370        let b: i16 = simd_extract!(b, N as u32);
17371        vqdmulhh_s16(a, b)
17372    }
17373}
17374#[doc = "Signed saturating doubling multiply returning high half"]
17375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"]
17376#[inline(always)]
17377#[target_feature(enable = "neon")]
17378#[cfg_attr(test, assert_instr(sqdmulh))]
17379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17380pub fn vqdmulhh_s16(a: i16, b: i16) -> i16 {
17381    let a: int16x4_t = vdup_n_s16(a);
17382    let b: int16x4_t = vdup_n_s16(b);
17383    unsafe { simd_extract!(vqdmulh_s16(a, b), 0) }
17384}
17385#[doc = "Signed saturating doubling multiply returning high half"]
17386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"]
17387#[inline(always)]
17388#[target_feature(enable = "neon")]
17389#[cfg_attr(test, assert_instr(sqdmulh))]
17390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17391pub fn vqdmulhs_s32(a: i32, b: i32) -> i32 {
17392    let a: int32x2_t = vdup_n_s32(a);
17393    let b: int32x2_t = vdup_n_s32(b);
17394    unsafe { simd_extract!(vqdmulh_s32(a, b), 0) }
17395}
17396#[doc = "Signed saturating doubling multiply returning high half"]
17397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"]
17398#[inline(always)]
17399#[target_feature(enable = "neon")]
17400#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17401#[rustc_legacy_const_generics(2)]
17402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17403pub fn vqdmulhs_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i32 {
17404    static_assert_uimm_bits!(N, 1);
17405    unsafe {
17406        let b: i32 = simd_extract!(b, N as u32);
17407        vqdmulhs_s32(a, b)
17408    }
17409}
17410#[doc = "Signed saturating doubling multiply returning high half"]
17411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"]
17412#[inline(always)]
17413#[target_feature(enable = "neon")]
17414#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17415#[rustc_legacy_const_generics(2)]
17416#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17417pub fn vqdmulhs_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i32 {
17418    static_assert_uimm_bits!(N, 2);
17419    unsafe {
17420        let b: i32 = simd_extract!(b, N as u32);
17421        vqdmulhs_s32(a, b)
17422    }
17423}
17424#[doc = "Signed saturating doubling multiply long"]
17425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"]
17426#[inline(always)]
17427#[target_feature(enable = "neon")]
17428#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17429#[rustc_legacy_const_generics(2)]
17430#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17431pub fn vqdmull_high_lane_s16<const N: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
17432    static_assert_uimm_bits!(N, 2);
17433    unsafe {
17434        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17435        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17436        vqdmull_s16(a, b)
17437    }
17438}
17439#[doc = "Signed saturating doubling multiply long"]
17440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"]
17441#[inline(always)]
17442#[target_feature(enable = "neon")]
17443#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17444#[rustc_legacy_const_generics(2)]
17445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17446pub fn vqdmull_high_laneq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17447    static_assert_uimm_bits!(N, 2);
17448    unsafe {
17449        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17450        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17451        vqdmull_s32(a, b)
17452    }
17453}
17454#[doc = "Signed saturating doubling multiply long"]
17455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"]
17456#[inline(always)]
17457#[target_feature(enable = "neon")]
17458#[cfg_attr(test, assert_instr(sqdmull2, N = 1))]
17459#[rustc_legacy_const_generics(2)]
17460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17461pub fn vqdmull_high_lane_s32<const N: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
17462    static_assert_uimm_bits!(N, 1);
17463    unsafe {
17464        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17465        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17466        vqdmull_s32(a, b)
17467    }
17468}
17469#[doc = "Signed saturating doubling multiply long"]
17470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"]
17471#[inline(always)]
17472#[target_feature(enable = "neon")]
17473#[cfg_attr(test, assert_instr(sqdmull2, N = 4))]
17474#[rustc_legacy_const_generics(2)]
17475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17476pub fn vqdmull_high_laneq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17477    static_assert_uimm_bits!(N, 3);
17478    unsafe {
17479        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17480        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17481        vqdmull_s16(a, b)
17482    }
17483}
17484#[doc = "Signed saturating doubling multiply long"]
17485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"]
17486#[inline(always)]
17487#[target_feature(enable = "neon")]
17488#[cfg_attr(test, assert_instr(sqdmull2))]
17489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17490pub fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
17491    unsafe {
17492        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17493        let b: int16x4_t = vdup_n_s16(b);
17494        vqdmull_s16(a, b)
17495    }
17496}
17497#[doc = "Signed saturating doubling multiply long"]
17498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"]
17499#[inline(always)]
17500#[target_feature(enable = "neon")]
17501#[cfg_attr(test, assert_instr(sqdmull2))]
17502#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17503pub fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
17504    unsafe {
17505        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17506        let b: int32x2_t = vdup_n_s32(b);
17507        vqdmull_s32(a, b)
17508    }
17509}
17510#[doc = "Signed saturating doubling multiply long"]
17511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"]
17512#[inline(always)]
17513#[target_feature(enable = "neon")]
17514#[cfg_attr(test, assert_instr(sqdmull2))]
17515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17516pub fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17517    unsafe {
17518        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17519        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
17520        vqdmull_s16(a, b)
17521    }
17522}
17523#[doc = "Signed saturating doubling multiply long"]
17524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"]
17525#[inline(always)]
17526#[target_feature(enable = "neon")]
17527#[cfg_attr(test, assert_instr(sqdmull2))]
17528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17529pub fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17530    unsafe {
17531        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17532        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
17533        vqdmull_s32(a, b)
17534    }
17535}
17536#[doc = "Vector saturating doubling long multiply by scalar"]
17537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"]
17538#[inline(always)]
17539#[target_feature(enable = "neon")]
17540#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17541#[rustc_legacy_const_generics(2)]
17542#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17543pub fn vqdmull_laneq_s16<const N: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t {
17544    static_assert_uimm_bits!(N, 3);
17545    unsafe {
17546        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17547        vqdmull_s16(a, b)
17548    }
17549}
17550#[doc = "Vector saturating doubling long multiply by scalar"]
17551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"]
17552#[inline(always)]
17553#[target_feature(enable = "neon")]
17554#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17555#[rustc_legacy_const_generics(2)]
17556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17557pub fn vqdmull_laneq_s32<const N: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t {
17558    static_assert_uimm_bits!(N, 2);
17559    unsafe {
17560        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17561        vqdmull_s32(a, b)
17562    }
17563}
17564#[doc = "Signed saturating doubling multiply long"]
17565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"]
17566#[inline(always)]
17567#[target_feature(enable = "neon")]
17568#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17569#[rustc_legacy_const_generics(2)]
17570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17571pub fn vqdmullh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i32 {
17572    static_assert_uimm_bits!(N, 2);
17573    unsafe {
17574        let b: i16 = simd_extract!(b, N as u32);
17575        vqdmullh_s16(a, b)
17576    }
17577}
17578#[doc = "Signed saturating doubling multiply long"]
17579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"]
17580#[inline(always)]
17581#[target_feature(enable = "neon")]
17582#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17583#[rustc_legacy_const_generics(2)]
17584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17585pub fn vqdmulls_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i64 {
17586    static_assert_uimm_bits!(N, 2);
17587    unsafe {
17588        let b: i32 = simd_extract!(b, N as u32);
17589        vqdmulls_s32(a, b)
17590    }
17591}
17592#[doc = "Signed saturating doubling multiply long"]
17593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"]
17594#[inline(always)]
17595#[target_feature(enable = "neon")]
17596#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17597#[rustc_legacy_const_generics(2)]
17598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17599pub fn vqdmullh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i32 {
17600    static_assert_uimm_bits!(N, 3);
17601    unsafe {
17602        let b: i16 = simd_extract!(b, N as u32);
17603        vqdmullh_s16(a, b)
17604    }
17605}
17606#[doc = "Signed saturating doubling multiply long"]
17607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"]
17608#[inline(always)]
17609#[target_feature(enable = "neon")]
17610#[cfg_attr(test, assert_instr(sqdmull))]
17611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17612pub fn vqdmullh_s16(a: i16, b: i16) -> i32 {
17613    let a: int16x4_t = vdup_n_s16(a);
17614    let b: int16x4_t = vdup_n_s16(b);
17615    unsafe { simd_extract!(vqdmull_s16(a, b), 0) }
17616}
17617#[doc = "Signed saturating doubling multiply long"]
17618#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"]
17619#[inline(always)]
17620#[target_feature(enable = "neon")]
17621#[cfg_attr(test, assert_instr(sqdmull, N = 1))]
17622#[rustc_legacy_const_generics(2)]
17623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17624pub fn vqdmulls_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i64 {
17625    static_assert_uimm_bits!(N, 1);
17626    unsafe {
17627        let b: i32 = simd_extract!(b, N as u32);
17628        vqdmulls_s32(a, b)
17629    }
17630}
17631#[doc = "Signed saturating doubling multiply long"]
17632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"]
17633#[inline(always)]
17634#[target_feature(enable = "neon")]
17635#[cfg_attr(test, assert_instr(sqdmull))]
17636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17637pub fn vqdmulls_s32(a: i32, b: i32) -> i64 {
17638    unsafe extern "unadjusted" {
17639        #[cfg_attr(
17640            any(target_arch = "aarch64", target_arch = "arm64ec"),
17641            link_name = "llvm.aarch64.neon.sqdmulls.scalar"
17642        )]
17643        fn _vqdmulls_s32(a: i32, b: i32) -> i64;
17644    }
17645    unsafe { _vqdmulls_s32(a, b) }
17646}
17647#[doc = "Signed saturating extract narrow"]
17648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"]
17649#[inline(always)]
17650#[target_feature(enable = "neon")]
17651#[cfg_attr(test, assert_instr(sqxtn2))]
17652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17653pub fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
17654    unsafe {
17655        simd_shuffle!(
17656            a,
17657            vqmovn_s16(b),
17658            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17659        )
17660    }
17661}
17662#[doc = "Signed saturating extract narrow"]
17663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"]
17664#[inline(always)]
17665#[target_feature(enable = "neon")]
17666#[cfg_attr(test, assert_instr(sqxtn2))]
17667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17668pub fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
17669    unsafe { simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17670}
17671#[doc = "Signed saturating extract narrow"]
17672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"]
17673#[inline(always)]
17674#[target_feature(enable = "neon")]
17675#[cfg_attr(test, assert_instr(sqxtn2))]
17676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17677pub fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
17678    unsafe { simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]) }
17679}
17680#[doc = "Signed saturating extract narrow"]
17681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"]
17682#[inline(always)]
17683#[target_feature(enable = "neon")]
17684#[cfg_attr(test, assert_instr(uqxtn2))]
17685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17686pub fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
17687    unsafe {
17688        simd_shuffle!(
17689            a,
17690            vqmovn_u16(b),
17691            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17692        )
17693    }
17694}
17695#[doc = "Signed saturating extract narrow"]
17696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"]
17697#[inline(always)]
17698#[target_feature(enable = "neon")]
17699#[cfg_attr(test, assert_instr(uqxtn2))]
17700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17701pub fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
17702    unsafe { simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17703}
17704#[doc = "Signed saturating extract narrow"]
17705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"]
17706#[inline(always)]
17707#[target_feature(enable = "neon")]
17708#[cfg_attr(test, assert_instr(uqxtn2))]
17709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17710pub fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
17711    unsafe { simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]) }
17712}
17713#[doc = "Saturating extract narrow"]
17714#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"]
17715#[inline(always)]
17716#[target_feature(enable = "neon")]
17717#[cfg_attr(test, assert_instr(sqxtn))]
17718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17719pub fn vqmovnd_s64(a: i64) -> i32 {
17720    unsafe extern "unadjusted" {
17721        #[cfg_attr(
17722            any(target_arch = "aarch64", target_arch = "arm64ec"),
17723            link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64"
17724        )]
17725        fn _vqmovnd_s64(a: i64) -> i32;
17726    }
17727    unsafe { _vqmovnd_s64(a) }
17728}
17729#[doc = "Saturating extract narrow"]
17730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"]
17731#[inline(always)]
17732#[target_feature(enable = "neon")]
17733#[cfg_attr(test, assert_instr(uqxtn))]
17734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17735pub fn vqmovnd_u64(a: u64) -> u32 {
17736    unsafe extern "unadjusted" {
17737        #[cfg_attr(
17738            any(target_arch = "aarch64", target_arch = "arm64ec"),
17739            link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64"
17740        )]
17741        fn _vqmovnd_u64(a: u64) -> u32;
17742    }
17743    unsafe { _vqmovnd_u64(a) }
17744}
17745#[doc = "Saturating extract narrow"]
17746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"]
17747#[inline(always)]
17748#[target_feature(enable = "neon")]
17749#[cfg_attr(test, assert_instr(sqxtn))]
17750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17751pub fn vqmovnh_s16(a: i16) -> i8 {
17752    unsafe { simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0) }
17753}
17754#[doc = "Saturating extract narrow"]
17755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"]
17756#[inline(always)]
17757#[target_feature(enable = "neon")]
17758#[cfg_attr(test, assert_instr(sqxtn))]
17759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17760pub fn vqmovns_s32(a: i32) -> i16 {
17761    unsafe { simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0) }
17762}
17763#[doc = "Saturating extract narrow"]
17764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"]
17765#[inline(always)]
17766#[target_feature(enable = "neon")]
17767#[cfg_attr(test, assert_instr(uqxtn))]
17768#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17769pub fn vqmovnh_u16(a: u16) -> u8 {
17770    unsafe { simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0) }
17771}
17772#[doc = "Saturating extract narrow"]
17773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"]
17774#[inline(always)]
17775#[target_feature(enable = "neon")]
17776#[cfg_attr(test, assert_instr(uqxtn))]
17777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17778pub fn vqmovns_u32(a: u32) -> u16 {
17779    unsafe { simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0) }
17780}
17781#[doc = "Signed saturating extract unsigned narrow"]
17782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"]
17783#[inline(always)]
17784#[target_feature(enable = "neon")]
17785#[cfg_attr(test, assert_instr(sqxtun2))]
17786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17787pub fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
17788    unsafe {
17789        simd_shuffle!(
17790            a,
17791            vqmovun_s16(b),
17792            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17793        )
17794    }
17795}
17796#[doc = "Signed saturating extract unsigned narrow"]
17797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"]
17798#[inline(always)]
17799#[target_feature(enable = "neon")]
17800#[cfg_attr(test, assert_instr(sqxtun2))]
17801#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17802pub fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
17803    unsafe { simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17804}
17805#[doc = "Signed saturating extract unsigned narrow"]
17806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"]
17807#[inline(always)]
17808#[target_feature(enable = "neon")]
17809#[cfg_attr(test, assert_instr(sqxtun2))]
17810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17811pub fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
17812    unsafe { simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]) }
17813}
17814#[doc = "Signed saturating extract unsigned narrow"]
17815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"]
17816#[inline(always)]
17817#[target_feature(enable = "neon")]
17818#[cfg_attr(test, assert_instr(sqxtun))]
17819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17820pub fn vqmovunh_s16(a: i16) -> u8 {
17821    unsafe { simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0) }
17822}
17823#[doc = "Signed saturating extract unsigned narrow"]
17824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"]
17825#[inline(always)]
17826#[target_feature(enable = "neon")]
17827#[cfg_attr(test, assert_instr(sqxtun))]
17828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17829pub fn vqmovuns_s32(a: i32) -> u16 {
17830    unsafe { simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0) }
17831}
17832#[doc = "Signed saturating extract unsigned narrow"]
17833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"]
17834#[inline(always)]
17835#[target_feature(enable = "neon")]
17836#[cfg_attr(test, assert_instr(sqxtun))]
17837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17838pub fn vqmovund_s64(a: i64) -> u32 {
17839    unsafe { simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0) }
17840}
17841#[doc = "Signed saturating negate"]
17842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"]
17843#[inline(always)]
17844#[target_feature(enable = "neon")]
17845#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17846#[cfg_attr(test, assert_instr(sqneg))]
17847pub fn vqneg_s64(a: int64x1_t) -> int64x1_t {
17848    unsafe extern "unadjusted" {
17849        #[cfg_attr(
17850            any(target_arch = "aarch64", target_arch = "arm64ec"),
17851            link_name = "llvm.aarch64.neon.sqneg.v1i64"
17852        )]
17853        fn _vqneg_s64(a: int64x1_t) -> int64x1_t;
17854    }
17855    unsafe { _vqneg_s64(a) }
17856}
17857#[doc = "Signed saturating negate"]
17858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"]
17859#[inline(always)]
17860#[target_feature(enable = "neon")]
17861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17862#[cfg_attr(test, assert_instr(sqneg))]
17863pub fn vqnegq_s64(a: int64x2_t) -> int64x2_t {
17864    unsafe extern "unadjusted" {
17865        #[cfg_attr(
17866            any(target_arch = "aarch64", target_arch = "arm64ec"),
17867            link_name = "llvm.aarch64.neon.sqneg.v2i64"
17868        )]
17869        fn _vqnegq_s64(a: int64x2_t) -> int64x2_t;
17870    }
17871    unsafe { _vqnegq_s64(a) }
17872}
17873#[doc = "Signed saturating negate"]
17874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"]
17875#[inline(always)]
17876#[target_feature(enable = "neon")]
17877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17878#[cfg_attr(test, assert_instr(sqneg))]
17879pub fn vqnegb_s8(a: i8) -> i8 {
17880    unsafe { simd_extract!(vqneg_s8(vdup_n_s8(a)), 0) }
17881}
17882#[doc = "Signed saturating negate"]
17883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"]
17884#[inline(always)]
17885#[target_feature(enable = "neon")]
17886#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17887#[cfg_attr(test, assert_instr(sqneg))]
17888pub fn vqnegh_s16(a: i16) -> i16 {
17889    unsafe { simd_extract!(vqneg_s16(vdup_n_s16(a)), 0) }
17890}
17891#[doc = "Signed saturating negate"]
17892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"]
17893#[inline(always)]
17894#[target_feature(enable = "neon")]
17895#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17896#[cfg_attr(test, assert_instr(sqneg))]
17897pub fn vqnegs_s32(a: i32) -> i32 {
17898    unsafe { simd_extract!(vqneg_s32(vdup_n_s32(a)), 0) }
17899}
17900#[doc = "Signed saturating negate"]
17901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"]
17902#[inline(always)]
17903#[target_feature(enable = "neon")]
17904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17905#[cfg_attr(test, assert_instr(sqneg))]
17906pub fn vqnegd_s64(a: i64) -> i64 {
17907    unsafe { simd_extract!(vqneg_s64(vdup_n_s64(a)), 0) }
17908}
17909#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"]
17911#[inline(always)]
17912#[target_feature(enable = "rdm")]
17913#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17914#[rustc_legacy_const_generics(3)]
17915#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17916pub fn vqrdmlah_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17917    static_assert_uimm_bits!(LANE, 2);
17918    unsafe {
17919        let c: int16x4_t =
17920            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17921        vqrdmlah_s16(a, b, c)
17922    }
17923}
17924#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"]
17926#[inline(always)]
17927#[target_feature(enable = "rdm")]
17928#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17929#[rustc_legacy_const_generics(3)]
17930#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17931pub fn vqrdmlah_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17932    static_assert_uimm_bits!(LANE, 1);
17933    unsafe {
17934        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
17935        vqrdmlah_s32(a, b, c)
17936    }
17937}
17938#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"]
17940#[inline(always)]
17941#[target_feature(enable = "rdm")]
17942#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17943#[rustc_legacy_const_generics(3)]
17944#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17945pub fn vqrdmlah_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
17946    static_assert_uimm_bits!(LANE, 3);
17947    unsafe {
17948        let c: int16x4_t =
17949            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17950        vqrdmlah_s16(a, b, c)
17951    }
17952}
17953#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"]
17955#[inline(always)]
17956#[target_feature(enable = "rdm")]
17957#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17958#[rustc_legacy_const_generics(3)]
17959#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17960pub fn vqrdmlah_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
17961    static_assert_uimm_bits!(LANE, 2);
17962    unsafe {
17963        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
17964        vqrdmlah_s32(a, b, c)
17965    }
17966}
17967#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"]
17969#[inline(always)]
17970#[target_feature(enable = "rdm")]
17971#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17972#[rustc_legacy_const_generics(3)]
17973#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17974pub fn vqrdmlahq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
17975    static_assert_uimm_bits!(LANE, 2);
17976    unsafe {
17977        let c: int16x8_t = simd_shuffle!(
17978            c,
17979            c,
17980            [
17981                LANE as u32,
17982                LANE as u32,
17983                LANE as u32,
17984                LANE as u32,
17985                LANE as u32,
17986                LANE as u32,
17987                LANE as u32,
17988                LANE as u32
17989            ]
17990        );
17991        vqrdmlahq_s16(a, b, c)
17992    }
17993}
17994#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"]
17996#[inline(always)]
17997#[target_feature(enable = "rdm")]
17998#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17999#[rustc_legacy_const_generics(3)]
18000#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18001pub fn vqrdmlahq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
18002    static_assert_uimm_bits!(LANE, 1);
18003    unsafe {
18004        let c: int32x4_t =
18005            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18006        vqrdmlahq_s32(a, b, c)
18007    }
18008}
18009#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"]
18011#[inline(always)]
18012#[target_feature(enable = "rdm")]
18013#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18014#[rustc_legacy_const_generics(3)]
18015#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18016pub fn vqrdmlahq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18017    static_assert_uimm_bits!(LANE, 3);
18018    unsafe {
18019        let c: int16x8_t = simd_shuffle!(
18020            c,
18021            c,
18022            [
18023                LANE as u32,
18024                LANE as u32,
18025                LANE as u32,
18026                LANE as u32,
18027                LANE as u32,
18028                LANE as u32,
18029                LANE as u32,
18030                LANE as u32
18031            ]
18032        );
18033        vqrdmlahq_s16(a, b, c)
18034    }
18035}
18036#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"]
18038#[inline(always)]
18039#[target_feature(enable = "rdm")]
18040#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18041#[rustc_legacy_const_generics(3)]
18042#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18043pub fn vqrdmlahq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18044    static_assert_uimm_bits!(LANE, 2);
18045    unsafe {
18046        let c: int32x4_t =
18047            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18048        vqrdmlahq_s32(a, b, c)
18049    }
18050}
18051#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"]
18053#[inline(always)]
18054#[target_feature(enable = "rdm")]
18055#[cfg_attr(test, assert_instr(sqrdmlah))]
18056#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18057pub fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18058    unsafe extern "unadjusted" {
18059        #[cfg_attr(
18060            any(target_arch = "aarch64", target_arch = "arm64ec"),
18061            link_name = "llvm.aarch64.neon.sqrdmlah.v4i16"
18062        )]
18063        fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
18064    }
18065    unsafe { _vqrdmlah_s16(a, b, c) }
18066}
18067#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"]
18069#[inline(always)]
18070#[target_feature(enable = "rdm")]
18071#[cfg_attr(test, assert_instr(sqrdmlah))]
18072#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18073pub fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18074    unsafe extern "unadjusted" {
18075        #[cfg_attr(
18076            any(target_arch = "aarch64", target_arch = "arm64ec"),
18077            link_name = "llvm.aarch64.neon.sqrdmlah.v8i16"
18078        )]
18079        fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
18080    }
18081    unsafe { _vqrdmlahq_s16(a, b, c) }
18082}
18083#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"]
18085#[inline(always)]
18086#[target_feature(enable = "rdm")]
18087#[cfg_attr(test, assert_instr(sqrdmlah))]
18088#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18089pub fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18090    unsafe extern "unadjusted" {
18091        #[cfg_attr(
18092            any(target_arch = "aarch64", target_arch = "arm64ec"),
18093            link_name = "llvm.aarch64.neon.sqrdmlah.v2i32"
18094        )]
18095        fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
18096    }
18097    unsafe { _vqrdmlah_s32(a, b, c) }
18098}
18099#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"]
18101#[inline(always)]
18102#[target_feature(enable = "rdm")]
18103#[cfg_attr(test, assert_instr(sqrdmlah))]
18104#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18105pub fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18106    unsafe extern "unadjusted" {
18107        #[cfg_attr(
18108            any(target_arch = "aarch64", target_arch = "arm64ec"),
18109            link_name = "llvm.aarch64.neon.sqrdmlah.v4i32"
18110        )]
18111        fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
18112    }
18113    unsafe { _vqrdmlahq_s32(a, b, c) }
18114}
18115#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"]
18117#[inline(always)]
18118#[target_feature(enable = "rdm")]
18119#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18120#[rustc_legacy_const_generics(3)]
18121#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18122pub fn vqrdmlahh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
18123    static_assert_uimm_bits!(LANE, 2);
18124    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
18125}
18126#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"]
18128#[inline(always)]
18129#[target_feature(enable = "rdm")]
18130#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18131#[rustc_legacy_const_generics(3)]
18132#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18133pub fn vqrdmlahh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
18134    static_assert_uimm_bits!(LANE, 3);
18135    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
18136}
18137#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"]
18139#[inline(always)]
18140#[target_feature(enable = "rdm")]
18141#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18142#[rustc_legacy_const_generics(3)]
18143#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18144pub fn vqrdmlahs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
18145    static_assert_uimm_bits!(LANE, 1);
18146    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
18147}
18148#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"]
18150#[inline(always)]
18151#[target_feature(enable = "rdm")]
18152#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18153#[rustc_legacy_const_generics(3)]
18154#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18155pub fn vqrdmlahs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
18156    static_assert_uimm_bits!(LANE, 2);
18157    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
18158}
18159#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"]
18161#[inline(always)]
18162#[target_feature(enable = "rdm")]
18163#[cfg_attr(test, assert_instr(sqrdmlah))]
18164#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18165pub fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 {
18166    let a: int16x4_t = vdup_n_s16(a);
18167    let b: int16x4_t = vdup_n_s16(b);
18168    let c: int16x4_t = vdup_n_s16(c);
18169    unsafe { simd_extract!(vqrdmlah_s16(a, b, c), 0) }
18170}
18171#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"]
18173#[inline(always)]
18174#[target_feature(enable = "rdm")]
18175#[cfg_attr(test, assert_instr(sqrdmlah))]
18176#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18177pub fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 {
18178    let a: int32x2_t = vdup_n_s32(a);
18179    let b: int32x2_t = vdup_n_s32(b);
18180    let c: int32x2_t = vdup_n_s32(c);
18181    unsafe { simd_extract!(vqrdmlah_s32(a, b, c), 0) }
18182}
18183#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"]
18185#[inline(always)]
18186#[target_feature(enable = "rdm")]
18187#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18188#[rustc_legacy_const_generics(3)]
18189#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18190pub fn vqrdmlsh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18191    static_assert_uimm_bits!(LANE, 2);
18192    unsafe {
18193        let c: int16x4_t =
18194            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18195        vqrdmlsh_s16(a, b, c)
18196    }
18197}
18198#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"]
18200#[inline(always)]
18201#[target_feature(enable = "rdm")]
18202#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18203#[rustc_legacy_const_generics(3)]
18204#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18205pub fn vqrdmlsh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18206    static_assert_uimm_bits!(LANE, 1);
18207    unsafe {
18208        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18209        vqrdmlsh_s32(a, b, c)
18210    }
18211}
18212#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"]
18214#[inline(always)]
18215#[target_feature(enable = "rdm")]
18216#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18217#[rustc_legacy_const_generics(3)]
18218#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18219pub fn vqrdmlsh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
18220    static_assert_uimm_bits!(LANE, 3);
18221    unsafe {
18222        let c: int16x4_t =
18223            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18224        vqrdmlsh_s16(a, b, c)
18225    }
18226}
18227#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"]
18229#[inline(always)]
18230#[target_feature(enable = "rdm")]
18231#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18232#[rustc_legacy_const_generics(3)]
18233#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18234pub fn vqrdmlsh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
18235    static_assert_uimm_bits!(LANE, 2);
18236    unsafe {
18237        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18238        vqrdmlsh_s32(a, b, c)
18239    }
18240}
18241#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"]
18243#[inline(always)]
18244#[target_feature(enable = "rdm")]
18245#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18246#[rustc_legacy_const_generics(3)]
18247#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18248pub fn vqrdmlshq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
18249    static_assert_uimm_bits!(LANE, 2);
18250    unsafe {
18251        let c: int16x8_t = simd_shuffle!(
18252            c,
18253            c,
18254            [
18255                LANE as u32,
18256                LANE as u32,
18257                LANE as u32,
18258                LANE as u32,
18259                LANE as u32,
18260                LANE as u32,
18261                LANE as u32,
18262                LANE as u32
18263            ]
18264        );
18265        vqrdmlshq_s16(a, b, c)
18266    }
18267}
18268#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"]
18270#[inline(always)]
18271#[target_feature(enable = "rdm")]
18272#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18273#[rustc_legacy_const_generics(3)]
18274#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18275pub fn vqrdmlshq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
18276    static_assert_uimm_bits!(LANE, 1);
18277    unsafe {
18278        let c: int32x4_t =
18279            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18280        vqrdmlshq_s32(a, b, c)
18281    }
18282}
18283#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"]
18285#[inline(always)]
18286#[target_feature(enable = "rdm")]
18287#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18288#[rustc_legacy_const_generics(3)]
18289#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18290pub fn vqrdmlshq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18291    static_assert_uimm_bits!(LANE, 3);
18292    unsafe {
18293        let c: int16x8_t = simd_shuffle!(
18294            c,
18295            c,
18296            [
18297                LANE as u32,
18298                LANE as u32,
18299                LANE as u32,
18300                LANE as u32,
18301                LANE as u32,
18302                LANE as u32,
18303                LANE as u32,
18304                LANE as u32
18305            ]
18306        );
18307        vqrdmlshq_s16(a, b, c)
18308    }
18309}
18310#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"]
18312#[inline(always)]
18313#[target_feature(enable = "rdm")]
18314#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18315#[rustc_legacy_const_generics(3)]
18316#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18317pub fn vqrdmlshq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18318    static_assert_uimm_bits!(LANE, 2);
18319    unsafe {
18320        let c: int32x4_t =
18321            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18322        vqrdmlshq_s32(a, b, c)
18323    }
18324}
18325#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"]
18327#[inline(always)]
18328#[target_feature(enable = "rdm")]
18329#[cfg_attr(test, assert_instr(sqrdmlsh))]
18330#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18331pub fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18332    unsafe extern "unadjusted" {
18333        #[cfg_attr(
18334            any(target_arch = "aarch64", target_arch = "arm64ec"),
18335            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16"
18336        )]
18337        fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
18338    }
18339    unsafe { _vqrdmlsh_s16(a, b, c) }
18340}
18341#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"]
18343#[inline(always)]
18344#[target_feature(enable = "rdm")]
18345#[cfg_attr(test, assert_instr(sqrdmlsh))]
18346#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18347pub fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18348    unsafe extern "unadjusted" {
18349        #[cfg_attr(
18350            any(target_arch = "aarch64", target_arch = "arm64ec"),
18351            link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16"
18352        )]
18353        fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
18354    }
18355    unsafe { _vqrdmlshq_s16(a, b, c) }
18356}
18357#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"]
18359#[inline(always)]
18360#[target_feature(enable = "rdm")]
18361#[cfg_attr(test, assert_instr(sqrdmlsh))]
18362#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18363pub fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18364    unsafe extern "unadjusted" {
18365        #[cfg_attr(
18366            any(target_arch = "aarch64", target_arch = "arm64ec"),
18367            link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32"
18368        )]
18369        fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
18370    }
18371    unsafe { _vqrdmlsh_s32(a, b, c) }
18372}
18373#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"]
18375#[inline(always)]
18376#[target_feature(enable = "rdm")]
18377#[cfg_attr(test, assert_instr(sqrdmlsh))]
18378#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18379pub fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18380    unsafe extern "unadjusted" {
18381        #[cfg_attr(
18382            any(target_arch = "aarch64", target_arch = "arm64ec"),
18383            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32"
18384        )]
18385        fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
18386    }
18387    unsafe { _vqrdmlshq_s32(a, b, c) }
18388}
18389#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"]
18391#[inline(always)]
18392#[target_feature(enable = "rdm")]
18393#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18394#[rustc_legacy_const_generics(3)]
18395#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18396pub fn vqrdmlshh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
18397    static_assert_uimm_bits!(LANE, 2);
18398    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18399}
18400#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"]
18402#[inline(always)]
18403#[target_feature(enable = "rdm")]
18404#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18405#[rustc_legacy_const_generics(3)]
18406#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18407pub fn vqrdmlshh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
18408    static_assert_uimm_bits!(LANE, 3);
18409    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18410}
18411#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"]
18413#[inline(always)]
18414#[target_feature(enable = "rdm")]
18415#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18416#[rustc_legacy_const_generics(3)]
18417#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18418pub fn vqrdmlshs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
18419    static_assert_uimm_bits!(LANE, 1);
18420    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18421}
18422#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"]
18424#[inline(always)]
18425#[target_feature(enable = "rdm")]
18426#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18427#[rustc_legacy_const_generics(3)]
18428#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18429pub fn vqrdmlshs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
18430    static_assert_uimm_bits!(LANE, 2);
18431    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18432}
18433#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"]
18435#[inline(always)]
18436#[target_feature(enable = "rdm")]
18437#[cfg_attr(test, assert_instr(sqrdmlsh))]
18438#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18439pub fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 {
18440    let a: int16x4_t = vdup_n_s16(a);
18441    let b: int16x4_t = vdup_n_s16(b);
18442    let c: int16x4_t = vdup_n_s16(c);
18443    unsafe { simd_extract!(vqrdmlsh_s16(a, b, c), 0) }
18444}
18445#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"]
18447#[inline(always)]
18448#[target_feature(enable = "rdm")]
18449#[cfg_attr(test, assert_instr(sqrdmlsh))]
18450#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18451pub fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 {
18452    let a: int32x2_t = vdup_n_s32(a);
18453    let b: int32x2_t = vdup_n_s32(b);
18454    let c: int32x2_t = vdup_n_s32(c);
18455    unsafe { simd_extract!(vqrdmlsh_s32(a, b, c), 0) }
18456}
18457#[doc = "Signed saturating rounding doubling multiply returning high half"]
18458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"]
18459#[inline(always)]
18460#[target_feature(enable = "neon")]
18461#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18462#[rustc_legacy_const_generics(2)]
18463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18464pub fn vqrdmulhh_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> i16 {
18465    static_assert_uimm_bits!(LANE, 2);
18466    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18467}
18468#[doc = "Signed saturating rounding doubling multiply returning high half"]
18469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"]
18470#[inline(always)]
18471#[target_feature(enable = "neon")]
18472#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18473#[rustc_legacy_const_generics(2)]
18474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18475pub fn vqrdmulhh_laneq_s16<const LANE: i32>(a: i16, b: int16x8_t) -> i16 {
18476    static_assert_uimm_bits!(LANE, 3);
18477    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18478}
18479#[doc = "Signed saturating rounding doubling multiply returning high half"]
18480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"]
18481#[inline(always)]
18482#[target_feature(enable = "neon")]
18483#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18484#[rustc_legacy_const_generics(2)]
18485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18486pub fn vqrdmulhs_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> i32 {
18487    static_assert_uimm_bits!(LANE, 1);
18488    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18489}
18490#[doc = "Signed saturating rounding doubling multiply returning high half"]
18491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"]
18492#[inline(always)]
18493#[target_feature(enable = "neon")]
18494#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18495#[rustc_legacy_const_generics(2)]
18496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18497pub fn vqrdmulhs_laneq_s32<const LANE: i32>(a: i32, b: int32x4_t) -> i32 {
18498    static_assert_uimm_bits!(LANE, 2);
18499    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18500}
18501#[doc = "Signed saturating rounding doubling multiply returning high half"]
18502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"]
18503#[inline(always)]
18504#[target_feature(enable = "neon")]
18505#[cfg_attr(test, assert_instr(sqrdmulh))]
18506#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18507pub fn vqrdmulhh_s16(a: i16, b: i16) -> i16 {
18508    unsafe { simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) }
18509}
18510#[doc = "Signed saturating rounding doubling multiply returning high half"]
18511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"]
18512#[inline(always)]
18513#[target_feature(enable = "neon")]
18514#[cfg_attr(test, assert_instr(sqrdmulh))]
18515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18516pub fn vqrdmulhs_s32(a: i32, b: i32) -> i32 {
18517    unsafe { simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) }
18518}
18519#[doc = "Signed saturating rounding shift left"]
18520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"]
18521#[inline(always)]
18522#[target_feature(enable = "neon")]
18523#[cfg_attr(test, assert_instr(sqrshl))]
18524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18525pub fn vqrshlb_s8(a: i8, b: i8) -> i8 {
18526    let a: int8x8_t = vdup_n_s8(a);
18527    let b: int8x8_t = vdup_n_s8(b);
18528    unsafe { simd_extract!(vqrshl_s8(a, b), 0) }
18529}
18530#[doc = "Signed saturating rounding shift left"]
18531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"]
18532#[inline(always)]
18533#[target_feature(enable = "neon")]
18534#[cfg_attr(test, assert_instr(sqrshl))]
18535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18536pub fn vqrshlh_s16(a: i16, b: i16) -> i16 {
18537    let a: int16x4_t = vdup_n_s16(a);
18538    let b: int16x4_t = vdup_n_s16(b);
18539    unsafe { simd_extract!(vqrshl_s16(a, b), 0) }
18540}
18541#[doc = "Unsigned signed saturating rounding shift left"]
18542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"]
18543#[inline(always)]
18544#[target_feature(enable = "neon")]
18545#[cfg_attr(test, assert_instr(uqrshl))]
18546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18547pub fn vqrshlb_u8(a: u8, b: i8) -> u8 {
18548    let a: uint8x8_t = vdup_n_u8(a);
18549    let b: int8x8_t = vdup_n_s8(b);
18550    unsafe { simd_extract!(vqrshl_u8(a, b), 0) }
18551}
18552#[doc = "Unsigned signed saturating rounding shift left"]
18553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"]
18554#[inline(always)]
18555#[target_feature(enable = "neon")]
18556#[cfg_attr(test, assert_instr(uqrshl))]
18557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18558pub fn vqrshlh_u16(a: u16, b: i16) -> u16 {
18559    let a: uint16x4_t = vdup_n_u16(a);
18560    let b: int16x4_t = vdup_n_s16(b);
18561    unsafe { simd_extract!(vqrshl_u16(a, b), 0) }
18562}
18563#[doc = "Signed saturating rounding shift left"]
18564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"]
18565#[inline(always)]
18566#[target_feature(enable = "neon")]
18567#[cfg_attr(test, assert_instr(sqrshl))]
18568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18569pub fn vqrshld_s64(a: i64, b: i64) -> i64 {
18570    unsafe extern "unadjusted" {
18571        #[cfg_attr(
18572            any(target_arch = "aarch64", target_arch = "arm64ec"),
18573            link_name = "llvm.aarch64.neon.sqrshl.i64"
18574        )]
18575        fn _vqrshld_s64(a: i64, b: i64) -> i64;
18576    }
18577    unsafe { _vqrshld_s64(a, b) }
18578}
18579#[doc = "Signed saturating rounding shift left"]
18580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"]
18581#[inline(always)]
18582#[target_feature(enable = "neon")]
18583#[cfg_attr(test, assert_instr(sqrshl))]
18584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18585pub fn vqrshls_s32(a: i32, b: i32) -> i32 {
18586    unsafe extern "unadjusted" {
18587        #[cfg_attr(
18588            any(target_arch = "aarch64", target_arch = "arm64ec"),
18589            link_name = "llvm.aarch64.neon.sqrshl.i32"
18590        )]
18591        fn _vqrshls_s32(a: i32, b: i32) -> i32;
18592    }
18593    unsafe { _vqrshls_s32(a, b) }
18594}
18595#[doc = "Unsigned signed saturating rounding shift left"]
18596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"]
18597#[inline(always)]
18598#[target_feature(enable = "neon")]
18599#[cfg_attr(test, assert_instr(uqrshl))]
18600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18601pub fn vqrshls_u32(a: u32, b: i32) -> u32 {
18602    unsafe extern "unadjusted" {
18603        #[cfg_attr(
18604            any(target_arch = "aarch64", target_arch = "arm64ec"),
18605            link_name = "llvm.aarch64.neon.uqrshl.i32"
18606        )]
18607        fn _vqrshls_u32(a: u32, b: i32) -> u32;
18608    }
18609    unsafe { _vqrshls_u32(a, b) }
18610}
18611#[doc = "Unsigned signed saturating rounding shift left"]
18612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"]
18613#[inline(always)]
18614#[target_feature(enable = "neon")]
18615#[cfg_attr(test, assert_instr(uqrshl))]
18616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18617pub fn vqrshld_u64(a: u64, b: i64) -> u64 {
18618    unsafe extern "unadjusted" {
18619        #[cfg_attr(
18620            any(target_arch = "aarch64", target_arch = "arm64ec"),
18621            link_name = "llvm.aarch64.neon.uqrshl.i64"
18622        )]
18623        fn _vqrshld_u64(a: u64, b: i64) -> u64;
18624    }
18625    unsafe { _vqrshld_u64(a, b) }
18626}
18627#[doc = "Signed saturating rounded shift right narrow"]
18628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"]
18629#[inline(always)]
18630#[target_feature(enable = "neon")]
18631#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18632#[rustc_legacy_const_generics(2)]
18633#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18634pub fn vqrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18635    static_assert!(N >= 1 && N <= 8);
18636    unsafe {
18637        simd_shuffle!(
18638            a,
18639            vqrshrn_n_s16::<N>(b),
18640            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18641        )
18642    }
18643}
18644#[doc = "Signed saturating rounded shift right narrow"]
18645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"]
18646#[inline(always)]
18647#[target_feature(enable = "neon")]
18648#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18649#[rustc_legacy_const_generics(2)]
18650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18651pub fn vqrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18652    static_assert!(N >= 1 && N <= 16);
18653    unsafe { simd_shuffle!(a, vqrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18654}
18655#[doc = "Signed saturating rounded shift right narrow"]
18656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"]
18657#[inline(always)]
18658#[target_feature(enable = "neon")]
18659#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18660#[rustc_legacy_const_generics(2)]
18661#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18662pub fn vqrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18663    static_assert!(N >= 1 && N <= 32);
18664    unsafe { simd_shuffle!(a, vqrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18665}
18666#[doc = "Unsigned saturating rounded shift right narrow"]
18667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"]
18668#[inline(always)]
18669#[target_feature(enable = "neon")]
18670#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18671#[rustc_legacy_const_generics(2)]
18672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18673pub fn vqrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18674    static_assert!(N >= 1 && N <= 8);
18675    unsafe {
18676        simd_shuffle!(
18677            a,
18678            vqrshrn_n_u16::<N>(b),
18679            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18680        )
18681    }
18682}
18683#[doc = "Unsigned saturating rounded shift right narrow"]
18684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"]
18685#[inline(always)]
18686#[target_feature(enable = "neon")]
18687#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18688#[rustc_legacy_const_generics(2)]
18689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18690pub fn vqrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18691    static_assert!(N >= 1 && N <= 16);
18692    unsafe { simd_shuffle!(a, vqrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18693}
18694#[doc = "Unsigned saturating rounded shift right narrow"]
18695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"]
18696#[inline(always)]
18697#[target_feature(enable = "neon")]
18698#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18699#[rustc_legacy_const_generics(2)]
18700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18701pub fn vqrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18702    static_assert!(N >= 1 && N <= 32);
18703    unsafe { simd_shuffle!(a, vqrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18704}
18705#[doc = "Unsigned saturating rounded shift right narrow"]
18706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"]
18707#[inline(always)]
18708#[target_feature(enable = "neon")]
18709#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18710#[rustc_legacy_const_generics(1)]
18711#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18712pub fn vqrshrnd_n_u64<const N: i32>(a: u64) -> u32 {
18713    static_assert!(N >= 1 && N <= 32);
18714    let a: uint64x2_t = vdupq_n_u64(a);
18715    unsafe { simd_extract!(vqrshrn_n_u64::<N>(a), 0) }
18716}
18717#[doc = "Unsigned saturating rounded shift right narrow"]
18718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"]
18719#[inline(always)]
18720#[target_feature(enable = "neon")]
18721#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18722#[rustc_legacy_const_generics(1)]
18723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18724pub fn vqrshrnh_n_u16<const N: i32>(a: u16) -> u8 {
18725    static_assert!(N >= 1 && N <= 8);
18726    let a: uint16x8_t = vdupq_n_u16(a);
18727    unsafe { simd_extract!(vqrshrn_n_u16::<N>(a), 0) }
18728}
18729#[doc = "Unsigned saturating rounded shift right narrow"]
18730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"]
18731#[inline(always)]
18732#[target_feature(enable = "neon")]
18733#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18734#[rustc_legacy_const_generics(1)]
18735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18736pub fn vqrshrns_n_u32<const N: i32>(a: u32) -> u16 {
18737    static_assert!(N >= 1 && N <= 16);
18738    let a: uint32x4_t = vdupq_n_u32(a);
18739    unsafe { simd_extract!(vqrshrn_n_u32::<N>(a), 0) }
18740}
18741#[doc = "Signed saturating rounded shift right narrow"]
18742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"]
18743#[inline(always)]
18744#[target_feature(enable = "neon")]
18745#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18746#[rustc_legacy_const_generics(1)]
18747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18748pub fn vqrshrnh_n_s16<const N: i32>(a: i16) -> i8 {
18749    static_assert!(N >= 1 && N <= 8);
18750    let a: int16x8_t = vdupq_n_s16(a);
18751    unsafe { simd_extract!(vqrshrn_n_s16::<N>(a), 0) }
18752}
18753#[doc = "Signed saturating rounded shift right narrow"]
18754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"]
18755#[inline(always)]
18756#[target_feature(enable = "neon")]
18757#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18758#[rustc_legacy_const_generics(1)]
18759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18760pub fn vqrshrns_n_s32<const N: i32>(a: i32) -> i16 {
18761    static_assert!(N >= 1 && N <= 16);
18762    let a: int32x4_t = vdupq_n_s32(a);
18763    unsafe { simd_extract!(vqrshrn_n_s32::<N>(a), 0) }
18764}
18765#[doc = "Signed saturating rounded shift right narrow"]
18766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"]
18767#[inline(always)]
18768#[target_feature(enable = "neon")]
18769#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18770#[rustc_legacy_const_generics(1)]
18771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18772pub fn vqrshrnd_n_s64<const N: i32>(a: i64) -> i32 {
18773    static_assert!(N >= 1 && N <= 32);
18774    let a: int64x2_t = vdupq_n_s64(a);
18775    unsafe { simd_extract!(vqrshrn_n_s64::<N>(a), 0) }
18776}
18777#[doc = "Signed saturating rounded shift right unsigned narrow"]
18778#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"]
18779#[inline(always)]
18780#[target_feature(enable = "neon")]
18781#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18782#[rustc_legacy_const_generics(2)]
18783#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18784pub fn vqrshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
18785    static_assert!(N >= 1 && N <= 8);
18786    unsafe {
18787        simd_shuffle!(
18788            a,
18789            vqrshrun_n_s16::<N>(b),
18790            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18791        )
18792    }
18793}
18794#[doc = "Signed saturating rounded shift right unsigned narrow"]
18795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"]
18796#[inline(always)]
18797#[target_feature(enable = "neon")]
18798#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18799#[rustc_legacy_const_generics(2)]
18800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18801pub fn vqrshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
18802    static_assert!(N >= 1 && N <= 16);
18803    unsafe { simd_shuffle!(a, vqrshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18804}
18805#[doc = "Signed saturating rounded shift right unsigned narrow"]
18806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"]
18807#[inline(always)]
18808#[target_feature(enable = "neon")]
18809#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18810#[rustc_legacy_const_generics(2)]
18811#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18812pub fn vqrshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
18813    static_assert!(N >= 1 && N <= 32);
18814    unsafe { simd_shuffle!(a, vqrshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
18815}
18816#[doc = "Signed saturating rounded shift right unsigned narrow"]
18817#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"]
18818#[inline(always)]
18819#[target_feature(enable = "neon")]
18820#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18821#[rustc_legacy_const_generics(1)]
18822#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18823pub fn vqrshrund_n_s64<const N: i32>(a: i64) -> u32 {
18824    static_assert!(N >= 1 && N <= 32);
18825    let a: int64x2_t = vdupq_n_s64(a);
18826    unsafe { simd_extract!(vqrshrun_n_s64::<N>(a), 0) }
18827}
18828#[doc = "Signed saturating rounded shift right unsigned narrow"]
18829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"]
18830#[inline(always)]
18831#[target_feature(enable = "neon")]
18832#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18833#[rustc_legacy_const_generics(1)]
18834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18835pub fn vqrshrunh_n_s16<const N: i32>(a: i16) -> u8 {
18836    static_assert!(N >= 1 && N <= 8);
18837    let a: int16x8_t = vdupq_n_s16(a);
18838    unsafe { simd_extract!(vqrshrun_n_s16::<N>(a), 0) }
18839}
18840#[doc = "Signed saturating rounded shift right unsigned narrow"]
18841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"]
18842#[inline(always)]
18843#[target_feature(enable = "neon")]
18844#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18845#[rustc_legacy_const_generics(1)]
18846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18847pub fn vqrshruns_n_s32<const N: i32>(a: i32) -> u16 {
18848    static_assert!(N >= 1 && N <= 16);
18849    let a: int32x4_t = vdupq_n_s32(a);
18850    unsafe { simd_extract!(vqrshrun_n_s32::<N>(a), 0) }
18851}
18852#[doc = "Signed saturating shift left"]
18853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"]
18854#[inline(always)]
18855#[target_feature(enable = "neon")]
18856#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18857#[rustc_legacy_const_generics(1)]
18858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18859pub fn vqshlb_n_s8<const N: i32>(a: i8) -> i8 {
18860    static_assert_uimm_bits!(N, 3);
18861    unsafe { simd_extract!(vqshl_n_s8::<N>(vdup_n_s8(a)), 0) }
18862}
18863#[doc = "Signed saturating shift left"]
18864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"]
18865#[inline(always)]
18866#[target_feature(enable = "neon")]
18867#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18868#[rustc_legacy_const_generics(1)]
18869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18870pub fn vqshld_n_s64<const N: i32>(a: i64) -> i64 {
18871    static_assert_uimm_bits!(N, 6);
18872    unsafe { simd_extract!(vqshl_n_s64::<N>(vdup_n_s64(a)), 0) }
18873}
18874#[doc = "Signed saturating shift left"]
18875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"]
18876#[inline(always)]
18877#[target_feature(enable = "neon")]
18878#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18879#[rustc_legacy_const_generics(1)]
18880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18881pub fn vqshlh_n_s16<const N: i32>(a: i16) -> i16 {
18882    static_assert_uimm_bits!(N, 4);
18883    unsafe { simd_extract!(vqshl_n_s16::<N>(vdup_n_s16(a)), 0) }
18884}
18885#[doc = "Signed saturating shift left"]
18886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"]
18887#[inline(always)]
18888#[target_feature(enable = "neon")]
18889#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18890#[rustc_legacy_const_generics(1)]
18891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18892pub fn vqshls_n_s32<const N: i32>(a: i32) -> i32 {
18893    static_assert_uimm_bits!(N, 5);
18894    unsafe { simd_extract!(vqshl_n_s32::<N>(vdup_n_s32(a)), 0) }
18895}
18896#[doc = "Unsigned saturating shift left"]
18897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"]
18898#[inline(always)]
18899#[target_feature(enable = "neon")]
18900#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18901#[rustc_legacy_const_generics(1)]
18902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18903pub fn vqshlb_n_u8<const N: i32>(a: u8) -> u8 {
18904    static_assert_uimm_bits!(N, 3);
18905    unsafe { simd_extract!(vqshl_n_u8::<N>(vdup_n_u8(a)), 0) }
18906}
18907#[doc = "Unsigned saturating shift left"]
18908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"]
18909#[inline(always)]
18910#[target_feature(enable = "neon")]
18911#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18912#[rustc_legacy_const_generics(1)]
18913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18914pub fn vqshld_n_u64<const N: i32>(a: u64) -> u64 {
18915    static_assert_uimm_bits!(N, 6);
18916    unsafe { simd_extract!(vqshl_n_u64::<N>(vdup_n_u64(a)), 0) }
18917}
18918#[doc = "Unsigned saturating shift left"]
18919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"]
18920#[inline(always)]
18921#[target_feature(enable = "neon")]
18922#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18923#[rustc_legacy_const_generics(1)]
18924#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18925pub fn vqshlh_n_u16<const N: i32>(a: u16) -> u16 {
18926    static_assert_uimm_bits!(N, 4);
18927    unsafe { simd_extract!(vqshl_n_u16::<N>(vdup_n_u16(a)), 0) }
18928}
18929#[doc = "Unsigned saturating shift left"]
18930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"]
18931#[inline(always)]
18932#[target_feature(enable = "neon")]
18933#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18934#[rustc_legacy_const_generics(1)]
18935#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18936pub fn vqshls_n_u32<const N: i32>(a: u32) -> u32 {
18937    static_assert_uimm_bits!(N, 5);
18938    unsafe { simd_extract!(vqshl_n_u32::<N>(vdup_n_u32(a)), 0) }
18939}
18940#[doc = "Signed saturating shift left"]
18941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"]
18942#[inline(always)]
18943#[target_feature(enable = "neon")]
18944#[cfg_attr(test, assert_instr(sqshl))]
18945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18946pub fn vqshlb_s8(a: i8, b: i8) -> i8 {
18947    let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b));
18948    unsafe { simd_extract!(c, 0) }
18949}
18950#[doc = "Signed saturating shift left"]
18951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"]
18952#[inline(always)]
18953#[target_feature(enable = "neon")]
18954#[cfg_attr(test, assert_instr(sqshl))]
18955#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18956pub fn vqshlh_s16(a: i16, b: i16) -> i16 {
18957    let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b));
18958    unsafe { simd_extract!(c, 0) }
18959}
18960#[doc = "Signed saturating shift left"]
18961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"]
18962#[inline(always)]
18963#[target_feature(enable = "neon")]
18964#[cfg_attr(test, assert_instr(sqshl))]
18965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18966pub fn vqshls_s32(a: i32, b: i32) -> i32 {
18967    let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b));
18968    unsafe { simd_extract!(c, 0) }
18969}
18970#[doc = "Unsigned saturating shift left"]
18971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"]
18972#[inline(always)]
18973#[target_feature(enable = "neon")]
18974#[cfg_attr(test, assert_instr(uqshl))]
18975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18976pub fn vqshlb_u8(a: u8, b: i8) -> u8 {
18977    let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b));
18978    unsafe { simd_extract!(c, 0) }
18979}
18980#[doc = "Unsigned saturating shift left"]
18981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"]
18982#[inline(always)]
18983#[target_feature(enable = "neon")]
18984#[cfg_attr(test, assert_instr(uqshl))]
18985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18986pub fn vqshlh_u16(a: u16, b: i16) -> u16 {
18987    let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b));
18988    unsafe { simd_extract!(c, 0) }
18989}
18990#[doc = "Unsigned saturating shift left"]
18991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"]
18992#[inline(always)]
18993#[target_feature(enable = "neon")]
18994#[cfg_attr(test, assert_instr(uqshl))]
18995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18996pub fn vqshls_u32(a: u32, b: i32) -> u32 {
18997    let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b));
18998    unsafe { simd_extract!(c, 0) }
18999}
19000#[doc = "Signed saturating shift left"]
19001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"]
19002#[inline(always)]
19003#[target_feature(enable = "neon")]
19004#[cfg_attr(test, assert_instr(sqshl))]
19005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19006pub fn vqshld_s64(a: i64, b: i64) -> i64 {
19007    unsafe extern "unadjusted" {
19008        #[cfg_attr(
19009            any(target_arch = "aarch64", target_arch = "arm64ec"),
19010            link_name = "llvm.aarch64.neon.sqshl.i64"
19011        )]
19012        fn _vqshld_s64(a: i64, b: i64) -> i64;
19013    }
19014    unsafe { _vqshld_s64(a, b) }
19015}
19016#[doc = "Unsigned saturating shift left"]
19017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"]
19018#[inline(always)]
19019#[target_feature(enable = "neon")]
19020#[cfg_attr(test, assert_instr(uqshl))]
19021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19022pub fn vqshld_u64(a: u64, b: i64) -> u64 {
19023    unsafe extern "unadjusted" {
19024        #[cfg_attr(
19025            any(target_arch = "aarch64", target_arch = "arm64ec"),
19026            link_name = "llvm.aarch64.neon.uqshl.i64"
19027        )]
19028        fn _vqshld_u64(a: u64, b: i64) -> u64;
19029    }
19030    unsafe { _vqshld_u64(a, b) }
19031}
19032#[doc = "Signed saturating shift left unsigned"]
19033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"]
19034#[inline(always)]
19035#[target_feature(enable = "neon")]
19036#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19037#[rustc_legacy_const_generics(1)]
19038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19039pub fn vqshlub_n_s8<const N: i32>(a: i8) -> u8 {
19040    static_assert_uimm_bits!(N, 3);
19041    unsafe { simd_extract!(vqshlu_n_s8::<N>(vdup_n_s8(a)), 0) }
19042}
19043#[doc = "Signed saturating shift left unsigned"]
19044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"]
19045#[inline(always)]
19046#[target_feature(enable = "neon")]
19047#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19048#[rustc_legacy_const_generics(1)]
19049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19050pub fn vqshlud_n_s64<const N: i32>(a: i64) -> u64 {
19051    static_assert_uimm_bits!(N, 6);
19052    unsafe { simd_extract!(vqshlu_n_s64::<N>(vdup_n_s64(a)), 0) }
19053}
19054#[doc = "Signed saturating shift left unsigned"]
19055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"]
19056#[inline(always)]
19057#[target_feature(enable = "neon")]
19058#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19059#[rustc_legacy_const_generics(1)]
19060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19061pub fn vqshluh_n_s16<const N: i32>(a: i16) -> u16 {
19062    static_assert_uimm_bits!(N, 4);
19063    unsafe { simd_extract!(vqshlu_n_s16::<N>(vdup_n_s16(a)), 0) }
19064}
19065#[doc = "Signed saturating shift left unsigned"]
19066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"]
19067#[inline(always)]
19068#[target_feature(enable = "neon")]
19069#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19070#[rustc_legacy_const_generics(1)]
19071#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19072pub fn vqshlus_n_s32<const N: i32>(a: i32) -> u32 {
19073    static_assert_uimm_bits!(N, 5);
19074    unsafe { simd_extract!(vqshlu_n_s32::<N>(vdup_n_s32(a)), 0) }
19075}
19076#[doc = "Signed saturating shift right narrow"]
19077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"]
19078#[inline(always)]
19079#[target_feature(enable = "neon")]
19080#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19081#[rustc_legacy_const_generics(2)]
19082#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19083pub fn vqshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
19084    static_assert!(N >= 1 && N <= 8);
19085    unsafe {
19086        simd_shuffle!(
19087            a,
19088            vqshrn_n_s16::<N>(b),
19089            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19090        )
19091    }
19092}
19093#[doc = "Signed saturating shift right narrow"]
19094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"]
19095#[inline(always)]
19096#[target_feature(enable = "neon")]
19097#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19098#[rustc_legacy_const_generics(2)]
19099#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19100pub fn vqshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
19101    static_assert!(N >= 1 && N <= 16);
19102    unsafe { simd_shuffle!(a, vqshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19103}
19104#[doc = "Signed saturating shift right narrow"]
19105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"]
19106#[inline(always)]
19107#[target_feature(enable = "neon")]
19108#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19109#[rustc_legacy_const_generics(2)]
19110#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19111pub fn vqshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
19112    static_assert!(N >= 1 && N <= 32);
19113    unsafe { simd_shuffle!(a, vqshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
19114}
19115#[doc = "Unsigned saturating shift right narrow"]
19116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"]
19117#[inline(always)]
19118#[target_feature(enable = "neon")]
19119#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19120#[rustc_legacy_const_generics(2)]
19121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19122pub fn vqshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
19123    static_assert!(N >= 1 && N <= 8);
19124    unsafe {
19125        simd_shuffle!(
19126            a,
19127            vqshrn_n_u16::<N>(b),
19128            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19129        )
19130    }
19131}
19132#[doc = "Unsigned saturating shift right narrow"]
19133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"]
19134#[inline(always)]
19135#[target_feature(enable = "neon")]
19136#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19137#[rustc_legacy_const_generics(2)]
19138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19139pub fn vqshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
19140    static_assert!(N >= 1 && N <= 16);
19141    unsafe { simd_shuffle!(a, vqshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19142}
19143#[doc = "Unsigned saturating shift right narrow"]
19144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"]
19145#[inline(always)]
19146#[target_feature(enable = "neon")]
19147#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19148#[rustc_legacy_const_generics(2)]
19149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19150pub fn vqshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
19151    static_assert!(N >= 1 && N <= 32);
19152    unsafe { simd_shuffle!(a, vqshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
19153}
19154#[doc = "Signed saturating shift right narrow"]
19155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"]
19156#[inline(always)]
19157#[target_feature(enable = "neon")]
19158#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19159#[rustc_legacy_const_generics(1)]
19160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19161pub fn vqshrnd_n_s64<const N: i32>(a: i64) -> i32 {
19162    static_assert!(N >= 1 && N <= 32);
19163    unsafe extern "unadjusted" {
19164        #[cfg_attr(
19165            any(target_arch = "aarch64", target_arch = "arm64ec"),
19166            link_name = "llvm.aarch64.neon.sqshrn.i32"
19167        )]
19168        fn _vqshrnd_n_s64(a: i64, n: i32) -> i32;
19169    }
19170    unsafe { _vqshrnd_n_s64(a, N) }
19171}
19172#[doc = "Unsigned saturating shift right narrow"]
19173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"]
19174#[inline(always)]
19175#[target_feature(enable = "neon")]
19176#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19177#[rustc_legacy_const_generics(1)]
19178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19179pub fn vqshrnd_n_u64<const N: i32>(a: u64) -> u32 {
19180    static_assert!(N >= 1 && N <= 32);
19181    unsafe extern "unadjusted" {
19182        #[cfg_attr(
19183            any(target_arch = "aarch64", target_arch = "arm64ec"),
19184            link_name = "llvm.aarch64.neon.uqshrn.i32"
19185        )]
19186        fn _vqshrnd_n_u64(a: u64, n: i32) -> u32;
19187    }
19188    unsafe { _vqshrnd_n_u64(a, N) }
19189}
19190#[doc = "Signed saturating shift right narrow"]
19191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"]
19192#[inline(always)]
19193#[target_feature(enable = "neon")]
19194#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19195#[rustc_legacy_const_generics(1)]
19196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19197pub fn vqshrnh_n_s16<const N: i32>(a: i16) -> i8 {
19198    static_assert!(N >= 1 && N <= 8);
19199    unsafe { simd_extract!(vqshrn_n_s16::<N>(vdupq_n_s16(a)), 0) }
19200}
19201#[doc = "Signed saturating shift right narrow"]
19202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"]
19203#[inline(always)]
19204#[target_feature(enable = "neon")]
19205#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19206#[rustc_legacy_const_generics(1)]
19207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19208pub fn vqshrns_n_s32<const N: i32>(a: i32) -> i16 {
19209    static_assert!(N >= 1 && N <= 16);
19210    unsafe { simd_extract!(vqshrn_n_s32::<N>(vdupq_n_s32(a)), 0) }
19211}
19212#[doc = "Unsigned saturating shift right narrow"]
19213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"]
19214#[inline(always)]
19215#[target_feature(enable = "neon")]
19216#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19217#[rustc_legacy_const_generics(1)]
19218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19219pub fn vqshrnh_n_u16<const N: i32>(a: u16) -> u8 {
19220    static_assert!(N >= 1 && N <= 8);
19221    unsafe { simd_extract!(vqshrn_n_u16::<N>(vdupq_n_u16(a)), 0) }
19222}
19223#[doc = "Unsigned saturating shift right narrow"]
19224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"]
19225#[inline(always)]
19226#[target_feature(enable = "neon")]
19227#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19228#[rustc_legacy_const_generics(1)]
19229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19230pub fn vqshrns_n_u32<const N: i32>(a: u32) -> u16 {
19231    static_assert!(N >= 1 && N <= 16);
19232    unsafe { simd_extract!(vqshrn_n_u32::<N>(vdupq_n_u32(a)), 0) }
19233}
19234#[doc = "Signed saturating shift right unsigned narrow"]
19235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"]
19236#[inline(always)]
19237#[target_feature(enable = "neon")]
19238#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19239#[rustc_legacy_const_generics(2)]
19240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19241pub fn vqshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
19242    static_assert!(N >= 1 && N <= 8);
19243    unsafe {
19244        simd_shuffle!(
19245            a,
19246            vqshrun_n_s16::<N>(b),
19247            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19248        )
19249    }
19250}
19251#[doc = "Signed saturating shift right unsigned narrow"]
19252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"]
19253#[inline(always)]
19254#[target_feature(enable = "neon")]
19255#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19256#[rustc_legacy_const_generics(2)]
19257#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19258pub fn vqshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
19259    static_assert!(N >= 1 && N <= 16);
19260    unsafe { simd_shuffle!(a, vqshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19261}
19262#[doc = "Signed saturating shift right unsigned narrow"]
19263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"]
19264#[inline(always)]
19265#[target_feature(enable = "neon")]
19266#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19267#[rustc_legacy_const_generics(2)]
19268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19269pub fn vqshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
19270    static_assert!(N >= 1 && N <= 32);
19271    unsafe { simd_shuffle!(a, vqshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
19272}
19273#[doc = "Signed saturating shift right unsigned narrow"]
19274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"]
19275#[inline(always)]
19276#[target_feature(enable = "neon")]
19277#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19278#[rustc_legacy_const_generics(1)]
19279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19280pub fn vqshrund_n_s64<const N: i32>(a: i64) -> u32 {
19281    static_assert!(N >= 1 && N <= 32);
19282    unsafe { simd_extract!(vqshrun_n_s64::<N>(vdupq_n_s64(a)), 0) }
19283}
19284#[doc = "Signed saturating shift right unsigned narrow"]
19285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"]
19286#[inline(always)]
19287#[target_feature(enable = "neon")]
19288#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19289#[rustc_legacy_const_generics(1)]
19290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19291pub fn vqshrunh_n_s16<const N: i32>(a: i16) -> u8 {
19292    static_assert!(N >= 1 && N <= 8);
19293    unsafe { simd_extract!(vqshrun_n_s16::<N>(vdupq_n_s16(a)), 0) }
19294}
19295#[doc = "Signed saturating shift right unsigned narrow"]
19296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"]
19297#[inline(always)]
19298#[target_feature(enable = "neon")]
19299#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19300#[rustc_legacy_const_generics(1)]
19301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19302pub fn vqshruns_n_s32<const N: i32>(a: i32) -> u16 {
19303    static_assert!(N >= 1 && N <= 16);
19304    unsafe { simd_extract!(vqshrun_n_s32::<N>(vdupq_n_s32(a)), 0) }
19305}
19306#[doc = "Saturating subtract"]
19307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"]
19308#[inline(always)]
19309#[target_feature(enable = "neon")]
19310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19311#[cfg_attr(test, assert_instr(sqsub))]
19312pub fn vqsubb_s8(a: i8, b: i8) -> i8 {
19313    let a: int8x8_t = vdup_n_s8(a);
19314    let b: int8x8_t = vdup_n_s8(b);
19315    unsafe { simd_extract!(vqsub_s8(a, b), 0) }
19316}
19317#[doc = "Saturating subtract"]
19318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"]
19319#[inline(always)]
19320#[target_feature(enable = "neon")]
19321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19322#[cfg_attr(test, assert_instr(sqsub))]
19323pub fn vqsubh_s16(a: i16, b: i16) -> i16 {
19324    let a: int16x4_t = vdup_n_s16(a);
19325    let b: int16x4_t = vdup_n_s16(b);
19326    unsafe { simd_extract!(vqsub_s16(a, b), 0) }
19327}
19328#[doc = "Saturating subtract"]
19329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"]
19330#[inline(always)]
19331#[target_feature(enable = "neon")]
19332#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19333#[cfg_attr(test, assert_instr(uqsub))]
19334pub fn vqsubb_u8(a: u8, b: u8) -> u8 {
19335    let a: uint8x8_t = vdup_n_u8(a);
19336    let b: uint8x8_t = vdup_n_u8(b);
19337    unsafe { simd_extract!(vqsub_u8(a, b), 0) }
19338}
19339#[doc = "Saturating subtract"]
19340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"]
19341#[inline(always)]
19342#[target_feature(enable = "neon")]
19343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19344#[cfg_attr(test, assert_instr(uqsub))]
19345pub fn vqsubh_u16(a: u16, b: u16) -> u16 {
19346    let a: uint16x4_t = vdup_n_u16(a);
19347    let b: uint16x4_t = vdup_n_u16(b);
19348    unsafe { simd_extract!(vqsub_u16(a, b), 0) }
19349}
19350#[doc = "Saturating subtract"]
19351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"]
19352#[inline(always)]
19353#[target_feature(enable = "neon")]
19354#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19355#[cfg_attr(test, assert_instr(sqsub))]
19356pub fn vqsubs_s32(a: i32, b: i32) -> i32 {
19357    unsafe extern "unadjusted" {
19358        #[cfg_attr(
19359            any(target_arch = "aarch64", target_arch = "arm64ec"),
19360            link_name = "llvm.aarch64.neon.sqsub.i32"
19361        )]
19362        fn _vqsubs_s32(a: i32, b: i32) -> i32;
19363    }
19364    unsafe { _vqsubs_s32(a, b) }
19365}
19366#[doc = "Saturating subtract"]
19367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"]
19368#[inline(always)]
19369#[target_feature(enable = "neon")]
19370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19371#[cfg_attr(test, assert_instr(sqsub))]
19372pub fn vqsubd_s64(a: i64, b: i64) -> i64 {
19373    unsafe extern "unadjusted" {
19374        #[cfg_attr(
19375            any(target_arch = "aarch64", target_arch = "arm64ec"),
19376            link_name = "llvm.aarch64.neon.sqsub.i64"
19377        )]
19378        fn _vqsubd_s64(a: i64, b: i64) -> i64;
19379    }
19380    unsafe { _vqsubd_s64(a, b) }
19381}
19382#[doc = "Saturating subtract"]
19383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"]
19384#[inline(always)]
19385#[target_feature(enable = "neon")]
19386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19387#[cfg_attr(test, assert_instr(uqsub))]
19388pub fn vqsubs_u32(a: u32, b: u32) -> u32 {
19389    unsafe extern "unadjusted" {
19390        #[cfg_attr(
19391            any(target_arch = "aarch64", target_arch = "arm64ec"),
19392            link_name = "llvm.aarch64.neon.uqsub.i32"
19393        )]
19394        fn _vqsubs_u32(a: u32, b: u32) -> u32;
19395    }
19396    unsafe { _vqsubs_u32(a, b) }
19397}
19398#[doc = "Saturating subtract"]
19399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"]
19400#[inline(always)]
19401#[target_feature(enable = "neon")]
19402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19403#[cfg_attr(test, assert_instr(uqsub))]
19404pub fn vqsubd_u64(a: u64, b: u64) -> u64 {
19405    unsafe extern "unadjusted" {
19406        #[cfg_attr(
19407            any(target_arch = "aarch64", target_arch = "arm64ec"),
19408            link_name = "llvm.aarch64.neon.uqsub.i64"
19409        )]
19410        fn _vqsubd_u64(a: u64, b: u64) -> u64;
19411    }
19412    unsafe { _vqsubd_u64(a, b) }
19413}
19414#[doc = "Table look-up"]
19415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"]
19416#[inline(always)]
19417#[target_feature(enable = "neon")]
19418#[cfg_attr(test, assert_instr(tbl))]
19419#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19420fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19421    unsafe extern "unadjusted" {
19422        #[cfg_attr(
19423            any(target_arch = "aarch64", target_arch = "arm64ec"),
19424            link_name = "llvm.aarch64.neon.tbl1.v8i8"
19425        )]
19426        fn _vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t;
19427    }
19428    unsafe { _vqtbl1(a, b) }
19429}
19430#[doc = "Table look-up"]
19431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"]
19432#[inline(always)]
19433#[target_feature(enable = "neon")]
19434#[cfg_attr(test, assert_instr(tbl))]
19435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19436fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19437    unsafe extern "unadjusted" {
19438        #[cfg_attr(
19439            any(target_arch = "aarch64", target_arch = "arm64ec"),
19440            link_name = "llvm.aarch64.neon.tbl1.v16i8"
19441        )]
19442        fn _vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
19443    }
19444    unsafe { _vqtbl1q(a, b) }
19445}
19446#[doc = "Table look-up"]
19447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"]
19448#[inline(always)]
19449#[target_feature(enable = "neon")]
19450#[cfg_attr(test, assert_instr(tbl))]
19451#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19452pub fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19453    vqtbl1(a, b)
19454}
19455#[doc = "Table look-up"]
19456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"]
19457#[inline(always)]
19458#[target_feature(enable = "neon")]
19459#[cfg_attr(test, assert_instr(tbl))]
19460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19461pub fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19462    vqtbl1q(a, b)
19463}
19464#[doc = "Table look-up"]
19465#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"]
19466#[inline(always)]
19467#[target_feature(enable = "neon")]
19468#[cfg_attr(test, assert_instr(tbl))]
19469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19470pub fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t {
19471    unsafe { transmute(vqtbl1(transmute(a), b)) }
19472}
19473#[doc = "Table look-up"]
19474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"]
19475#[inline(always)]
19476#[target_feature(enable = "neon")]
19477#[cfg_attr(test, assert_instr(tbl))]
19478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19479pub fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
19480    unsafe { transmute(vqtbl1q(transmute(a), b)) }
19481}
19482#[doc = "Table look-up"]
19483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"]
19484#[inline(always)]
19485#[target_feature(enable = "neon")]
19486#[cfg_attr(test, assert_instr(tbl))]
19487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19488pub fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t {
19489    unsafe { transmute(vqtbl1(transmute(a), b)) }
19490}
19491#[doc = "Table look-up"]
19492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"]
19493#[inline(always)]
19494#[target_feature(enable = "neon")]
19495#[cfg_attr(test, assert_instr(tbl))]
19496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19497pub fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
19498    unsafe { transmute(vqtbl1q(transmute(a), b)) }
19499}
19500#[doc = "Table look-up"]
19501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"]
19502#[inline(always)]
19503#[target_feature(enable = "neon")]
19504#[cfg_attr(test, assert_instr(tbl))]
19505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19506fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19507    unsafe extern "unadjusted" {
19508        #[cfg_attr(
19509            any(target_arch = "aarch64", target_arch = "arm64ec"),
19510            link_name = "llvm.aarch64.neon.tbl2.v8i8"
19511        )]
19512        fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
19513    }
19514    unsafe { _vqtbl2(a, b, c) }
19515}
19516#[doc = "Table look-up"]
19517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"]
19518#[inline(always)]
19519#[target_feature(enable = "neon")]
19520#[cfg_attr(test, assert_instr(tbl))]
19521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19522fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19523    unsafe extern "unadjusted" {
19524        #[cfg_attr(
19525            any(target_arch = "aarch64", target_arch = "arm64ec"),
19526            link_name = "llvm.aarch64.neon.tbl2.v16i8"
19527        )]
19528        fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
19529    }
19530    unsafe { _vqtbl2q(a, b, c) }
19531}
19532#[doc = "Table look-up"]
19533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"]
19534#[inline(always)]
19535#[target_feature(enable = "neon")]
19536#[cfg_attr(test, assert_instr(tbl))]
19537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19538pub fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t {
19539    vqtbl2(a.0, a.1, b)
19540}
19541#[doc = "Table look-up"]
19542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"]
19543#[inline(always)]
19544#[target_feature(enable = "neon")]
19545#[cfg_attr(test, assert_instr(tbl))]
19546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19547pub fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t {
19548    vqtbl2q(a.0, a.1, b)
19549}
19550#[doc = "Table look-up"]
19551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19552#[inline(always)]
19553#[target_feature(enable = "neon")]
19554#[cfg_attr(test, assert_instr(tbl))]
19555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19556pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19557    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19558}
19559#[doc = "Table look-up"]
19560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19561#[inline(always)]
19562#[target_feature(enable = "neon")]
19563#[cfg_attr(test, assert_instr(tbl))]
19564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19565pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19566    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19567}
19568#[doc = "Table look-up"]
19569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19570#[inline(always)]
19571#[target_feature(enable = "neon")]
19572#[cfg_attr(test, assert_instr(tbl))]
19573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19574pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19575    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19576}
19577#[doc = "Table look-up"]
19578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19579#[inline(always)]
19580#[target_feature(enable = "neon")]
19581#[cfg_attr(test, assert_instr(tbl))]
19582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19583pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19584    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19585}
19586#[doc = "Table look-up"]
19587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"]
19588#[inline(always)]
19589#[target_feature(enable = "neon")]
19590#[cfg_attr(test, assert_instr(tbl))]
19591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19592fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
19593    unsafe extern "unadjusted" {
19594        #[cfg_attr(
19595            any(target_arch = "aarch64", target_arch = "arm64ec"),
19596            link_name = "llvm.aarch64.neon.tbl3.v8i8"
19597        )]
19598        fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
19599    }
19600    unsafe { _vqtbl3(a, b, c, d) }
19601}
19602#[doc = "Table look-up"]
19603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"]
19604#[inline(always)]
19605#[target_feature(enable = "neon")]
19606#[cfg_attr(test, assert_instr(tbl))]
19607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19608fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
19609    unsafe extern "unadjusted" {
19610        #[cfg_attr(
19611            any(target_arch = "aarch64", target_arch = "arm64ec"),
19612            link_name = "llvm.aarch64.neon.tbl3.v16i8"
19613        )]
19614        fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
19615    }
19616    unsafe { _vqtbl3q(a, b, c, d) }
19617}
19618#[doc = "Table look-up"]
19619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"]
19620#[inline(always)]
19621#[target_feature(enable = "neon")]
19622#[cfg_attr(test, assert_instr(tbl))]
19623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19624pub fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t {
19625    vqtbl3(a.0, a.1, a.2, b)
19626}
19627#[doc = "Table look-up"]
19628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"]
19629#[inline(always)]
19630#[target_feature(enable = "neon")]
19631#[cfg_attr(test, assert_instr(tbl))]
19632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19633pub fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t {
19634    vqtbl3q(a.0, a.1, a.2, b)
19635}
19636#[doc = "Table look-up"]
19637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19638#[inline(always)]
19639#[target_feature(enable = "neon")]
19640#[cfg_attr(test, assert_instr(tbl))]
19641#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19642pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19643    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19644}
19645#[doc = "Table look-up"]
19646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
19647#[inline(always)]
19648#[target_feature(enable = "neon")]
19649#[cfg_attr(test, assert_instr(tbl))]
19650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19651pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
19652    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19653}
19654#[doc = "Table look-up"]
19655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
19656#[inline(always)]
19657#[target_feature(enable = "neon")]
19658#[cfg_attr(test, assert_instr(tbl))]
19659#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19660pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
19661    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19662}
19663#[doc = "Table look-up"]
19664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
19665#[inline(always)]
19666#[target_feature(enable = "neon")]
19667#[cfg_attr(test, assert_instr(tbl))]
19668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19669pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
19670    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19671}
19672#[doc = "Table look-up"]
19673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"]
19674#[inline(always)]
19675#[target_feature(enable = "neon")]
19676#[cfg_attr(test, assert_instr(tbl))]
19677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19678fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
19679    unsafe extern "unadjusted" {
19680        #[cfg_attr(
19681            any(target_arch = "aarch64", target_arch = "arm64ec"),
19682            link_name = "llvm.aarch64.neon.tbl4.v8i8"
19683        )]
19684        fn _vqtbl4(
19685            a: int8x16_t,
19686            b: int8x16_t,
19687            c: int8x16_t,
19688            d: int8x16_t,
19689            e: uint8x8_t,
19690        ) -> int8x8_t;
19691    }
19692    unsafe { _vqtbl4(a, b, c, d, e) }
19693}
19694#[doc = "Table look-up"]
19695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"]
19696#[inline(always)]
19697#[target_feature(enable = "neon")]
19698#[cfg_attr(test, assert_instr(tbl))]
19699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19700fn vqtbl4q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
19701    unsafe extern "unadjusted" {
19702        #[cfg_attr(
19703            any(target_arch = "aarch64", target_arch = "arm64ec"),
19704            link_name = "llvm.aarch64.neon.tbl4.v16i8"
19705        )]
19706        fn _vqtbl4q(
19707            a: int8x16_t,
19708            b: int8x16_t,
19709            c: int8x16_t,
19710            d: int8x16_t,
19711            e: uint8x16_t,
19712        ) -> int8x16_t;
19713    }
19714    unsafe { _vqtbl4q(a, b, c, d, e) }
19715}
19716#[doc = "Table look-up"]
19717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"]
19718#[inline(always)]
19719#[target_feature(enable = "neon")]
19720#[cfg_attr(test, assert_instr(tbl))]
19721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19722pub fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t {
19723    vqtbl4(a.0, a.1, a.2, a.3, b)
19724}
19725#[doc = "Table look-up"]
19726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"]
19727#[inline(always)]
19728#[target_feature(enable = "neon")]
19729#[cfg_attr(test, assert_instr(tbl))]
19730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19731pub fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t {
19732    vqtbl4q(a.0, a.1, a.2, a.3, b)
19733}
19734#[doc = "Table look-up"]
19735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
19736#[inline(always)]
19737#[target_feature(enable = "neon")]
19738#[cfg_attr(test, assert_instr(tbl))]
19739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19740pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
19741    unsafe {
19742        transmute(vqtbl4(
19743            transmute(a.0),
19744            transmute(a.1),
19745            transmute(a.2),
19746            transmute(a.3),
19747            b,
19748        ))
19749    }
19750}
19751#[doc = "Table look-up"]
19752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
19753#[inline(always)]
19754#[target_feature(enable = "neon")]
19755#[cfg_attr(test, assert_instr(tbl))]
19756#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19757pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
19758    unsafe {
19759        transmute(vqtbl4q(
19760            transmute(a.0),
19761            transmute(a.1),
19762            transmute(a.2),
19763            transmute(a.3),
19764            b,
19765        ))
19766    }
19767}
19768#[doc = "Table look-up"]
19769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
19770#[inline(always)]
19771#[target_feature(enable = "neon")]
19772#[cfg_attr(test, assert_instr(tbl))]
19773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19774pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
19775    unsafe {
19776        transmute(vqtbl4(
19777            transmute(a.0),
19778            transmute(a.1),
19779            transmute(a.2),
19780            transmute(a.3),
19781            b,
19782        ))
19783    }
19784}
19785#[doc = "Table look-up"]
19786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
19787#[inline(always)]
19788#[target_feature(enable = "neon")]
19789#[cfg_attr(test, assert_instr(tbl))]
19790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19791pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
19792    unsafe {
19793        transmute(vqtbl4q(
19794            transmute(a.0),
19795            transmute(a.1),
19796            transmute(a.2),
19797            transmute(a.3),
19798            b,
19799        ))
19800    }
19801}
19802#[doc = "Extended table look-up"]
19803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"]
19804#[inline(always)]
19805#[target_feature(enable = "neon")]
19806#[cfg_attr(test, assert_instr(tbx))]
19807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19808fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19809    unsafe extern "unadjusted" {
19810        #[cfg_attr(
19811            any(target_arch = "aarch64", target_arch = "arm64ec"),
19812            link_name = "llvm.aarch64.neon.tbx1.v8i8"
19813        )]
19814        fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
19815    }
19816    unsafe { _vqtbx1(a, b, c) }
19817}
19818#[doc = "Extended table look-up"]
19819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"]
19820#[inline(always)]
19821#[target_feature(enable = "neon")]
19822#[cfg_attr(test, assert_instr(tbx))]
19823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19824fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19825    unsafe extern "unadjusted" {
19826        #[cfg_attr(
19827            any(target_arch = "aarch64", target_arch = "arm64ec"),
19828            link_name = "llvm.aarch64.neon.tbx1.v16i8"
19829        )]
19830        fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
19831    }
19832    unsafe { _vqtbx1q(a, b, c) }
19833}
19834#[doc = "Extended table look-up"]
19835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"]
19836#[inline(always)]
19837#[target_feature(enable = "neon")]
19838#[cfg_attr(test, assert_instr(tbx))]
19839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19840pub fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19841    vqtbx1(a, b, c)
19842}
19843#[doc = "Extended table look-up"]
19844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"]
19845#[inline(always)]
19846#[target_feature(enable = "neon")]
19847#[cfg_attr(test, assert_instr(tbx))]
19848#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19849pub fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19850    vqtbx1q(a, b, c)
19851}
19852#[doc = "Extended table look-up"]
19853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"]
19854#[inline(always)]
19855#[target_feature(enable = "neon")]
19856#[cfg_attr(test, assert_instr(tbx))]
19857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19858pub fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t {
19859    unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
19860}
19861#[doc = "Extended table look-up"]
19862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"]
19863#[inline(always)]
19864#[target_feature(enable = "neon")]
19865#[cfg_attr(test, assert_instr(tbx))]
19866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19867pub fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
19868    unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
19869}
19870#[doc = "Extended table look-up"]
19871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"]
19872#[inline(always)]
19873#[target_feature(enable = "neon")]
19874#[cfg_attr(test, assert_instr(tbx))]
19875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19876pub fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t {
19877    unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
19878}
19879#[doc = "Extended table look-up"]
19880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"]
19881#[inline(always)]
19882#[target_feature(enable = "neon")]
19883#[cfg_attr(test, assert_instr(tbx))]
19884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19885pub fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t {
19886    unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
19887}
19888#[doc = "Extended table look-up"]
19889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"]
19890#[inline(always)]
19891#[target_feature(enable = "neon")]
19892#[cfg_attr(test, assert_instr(tbx))]
19893#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19894fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
19895    unsafe extern "unadjusted" {
19896        #[cfg_attr(
19897            any(target_arch = "aarch64", target_arch = "arm64ec"),
19898            link_name = "llvm.aarch64.neon.tbx2.v8i8"
19899        )]
19900        fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
19901    }
19902    unsafe { _vqtbx2(a, b, c, d) }
19903}
19904#[doc = "Extended table look-up"]
19905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"]
19906#[inline(always)]
19907#[target_feature(enable = "neon")]
19908#[cfg_attr(test, assert_instr(tbx))]
19909#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19910fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
19911    unsafe extern "unadjusted" {
19912        #[cfg_attr(
19913            any(target_arch = "aarch64", target_arch = "arm64ec"),
19914            link_name = "llvm.aarch64.neon.tbx2.v16i8"
19915        )]
19916        fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
19917    }
19918    unsafe { _vqtbx2q(a, b, c, d) }
19919}
19920#[doc = "Extended table look-up"]
19921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"]
19922#[inline(always)]
19923#[target_feature(enable = "neon")]
19924#[cfg_attr(test, assert_instr(tbx))]
19925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19926pub fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t {
19927    vqtbx2(a, b.0, b.1, c)
19928}
19929#[doc = "Extended table look-up"]
19930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"]
19931#[inline(always)]
19932#[target_feature(enable = "neon")]
19933#[cfg_attr(test, assert_instr(tbx))]
19934#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19935pub fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t {
19936    vqtbx2q(a, b.0, b.1, c)
19937}
19938#[doc = "Extended table look-up"]
19939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
19940#[inline(always)]
19941#[target_feature(enable = "neon")]
19942#[cfg_attr(test, assert_instr(tbx))]
19943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19944pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
19945    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
19946}
19947#[doc = "Extended table look-up"]
19948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
19949#[inline(always)]
19950#[target_feature(enable = "neon")]
19951#[cfg_attr(test, assert_instr(tbx))]
19952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19953pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
19954    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
19955}
19956#[doc = "Extended table look-up"]
19957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
19958#[inline(always)]
19959#[target_feature(enable = "neon")]
19960#[cfg_attr(test, assert_instr(tbx))]
19961#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19962pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
19963    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
19964}
19965#[doc = "Extended table look-up"]
19966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
19967#[inline(always)]
19968#[target_feature(enable = "neon")]
19969#[cfg_attr(test, assert_instr(tbx))]
19970#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19971pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
19972    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
19973}
19974#[doc = "Extended table look-up"]
19975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"]
19976#[inline(always)]
19977#[target_feature(enable = "neon")]
19978#[cfg_attr(test, assert_instr(tbx))]
19979#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19980fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
19981    unsafe extern "unadjusted" {
19982        #[cfg_attr(
19983            any(target_arch = "aarch64", target_arch = "arm64ec"),
19984            link_name = "llvm.aarch64.neon.tbx3.v8i8"
19985        )]
19986        fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t)
19987            -> int8x8_t;
19988    }
19989    unsafe { _vqtbx3(a, b, c, d, e) }
19990}
19991#[doc = "Extended table look-up"]
19992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"]
19993#[inline(always)]
19994#[target_feature(enable = "neon")]
19995#[cfg_attr(test, assert_instr(tbx))]
19996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19997fn vqtbx3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
19998    unsafe extern "unadjusted" {
19999        #[cfg_attr(
20000            any(target_arch = "aarch64", target_arch = "arm64ec"),
20001            link_name = "llvm.aarch64.neon.tbx3.v16i8"
20002        )]
20003        fn _vqtbx3q(
20004            a: int8x16_t,
20005            b: int8x16_t,
20006            c: int8x16_t,
20007            d: int8x16_t,
20008            e: uint8x16_t,
20009        ) -> int8x16_t;
20010    }
20011    unsafe { _vqtbx3q(a, b, c, d, e) }
20012}
20013#[doc = "Extended table look-up"]
20014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"]
20015#[inline(always)]
20016#[target_feature(enable = "neon")]
20017#[cfg_attr(test, assert_instr(tbx))]
20018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20019pub fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t {
20020    vqtbx3(a, b.0, b.1, b.2, c)
20021}
20022#[doc = "Extended table look-up"]
20023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"]
20024#[inline(always)]
20025#[target_feature(enable = "neon")]
20026#[cfg_attr(test, assert_instr(tbx))]
20027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20028pub fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t {
20029    vqtbx3q(a, b.0, b.1, b.2, c)
20030}
20031#[doc = "Extended table look-up"]
20032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
20033#[inline(always)]
20034#[target_feature(enable = "neon")]
20035#[cfg_attr(test, assert_instr(tbx))]
20036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20037pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
20038    unsafe {
20039        transmute(vqtbx3(
20040            transmute(a),
20041            transmute(b.0),
20042            transmute(b.1),
20043            transmute(b.2),
20044            c,
20045        ))
20046    }
20047}
20048#[doc = "Extended table look-up"]
20049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
20050#[inline(always)]
20051#[target_feature(enable = "neon")]
20052#[cfg_attr(test, assert_instr(tbx))]
20053#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20054pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
20055    unsafe {
20056        transmute(vqtbx3q(
20057            transmute(a),
20058            transmute(b.0),
20059            transmute(b.1),
20060            transmute(b.2),
20061            c,
20062        ))
20063    }
20064}
20065#[doc = "Extended table look-up"]
20066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
20067#[inline(always)]
20068#[target_feature(enable = "neon")]
20069#[cfg_attr(test, assert_instr(tbx))]
20070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20071pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
20072    unsafe {
20073        transmute(vqtbx3(
20074            transmute(a),
20075            transmute(b.0),
20076            transmute(b.1),
20077            transmute(b.2),
20078            c,
20079        ))
20080    }
20081}
20082#[doc = "Extended table look-up"]
20083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
20084#[inline(always)]
20085#[target_feature(enable = "neon")]
20086#[cfg_attr(test, assert_instr(tbx))]
20087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20088pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
20089    unsafe {
20090        transmute(vqtbx3q(
20091            transmute(a),
20092            transmute(b.0),
20093            transmute(b.1),
20094            transmute(b.2),
20095            c,
20096        ))
20097    }
20098}
20099#[doc = "Extended table look-up"]
20100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"]
20101#[inline(always)]
20102#[target_feature(enable = "neon")]
20103#[cfg_attr(test, assert_instr(tbx))]
20104#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20105fn vqtbx4(
20106    a: int8x8_t,
20107    b: int8x16_t,
20108    c: int8x16_t,
20109    d: int8x16_t,
20110    e: int8x16_t,
20111    f: uint8x8_t,
20112) -> int8x8_t {
20113    unsafe extern "unadjusted" {
20114        #[cfg_attr(
20115            any(target_arch = "aarch64", target_arch = "arm64ec"),
20116            link_name = "llvm.aarch64.neon.tbx4.v8i8"
20117        )]
20118        fn _vqtbx4(
20119            a: int8x8_t,
20120            b: int8x16_t,
20121            c: int8x16_t,
20122            d: int8x16_t,
20123            e: int8x16_t,
20124            f: uint8x8_t,
20125        ) -> int8x8_t;
20126    }
20127    unsafe { _vqtbx4(a, b, c, d, e, f) }
20128}
20129#[doc = "Extended table look-up"]
20130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"]
20131#[inline(always)]
20132#[target_feature(enable = "neon")]
20133#[cfg_attr(test, assert_instr(tbx))]
20134#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20135fn vqtbx4q(
20136    a: int8x16_t,
20137    b: int8x16_t,
20138    c: int8x16_t,
20139    d: int8x16_t,
20140    e: int8x16_t,
20141    f: uint8x16_t,
20142) -> int8x16_t {
20143    unsafe extern "unadjusted" {
20144        #[cfg_attr(
20145            any(target_arch = "aarch64", target_arch = "arm64ec"),
20146            link_name = "llvm.aarch64.neon.tbx4.v16i8"
20147        )]
20148        fn _vqtbx4q(
20149            a: int8x16_t,
20150            b: int8x16_t,
20151            c: int8x16_t,
20152            d: int8x16_t,
20153            e: int8x16_t,
20154            f: uint8x16_t,
20155        ) -> int8x16_t;
20156    }
20157    unsafe { _vqtbx4q(a, b, c, d, e, f) }
20158}
20159#[doc = "Extended table look-up"]
20160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"]
20161#[inline(always)]
20162#[target_feature(enable = "neon")]
20163#[cfg_attr(test, assert_instr(tbx))]
20164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20165pub fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t {
20166    vqtbx4(a, b.0, b.1, b.2, b.3, c)
20167}
20168#[doc = "Extended table look-up"]
20169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"]
20170#[inline(always)]
20171#[target_feature(enable = "neon")]
20172#[cfg_attr(test, assert_instr(tbx))]
20173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20174pub fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t {
20175    vqtbx4q(a, b.0, b.1, b.2, b.3, c)
20176}
20177#[doc = "Extended table look-up"]
20178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
20179#[inline(always)]
20180#[target_feature(enable = "neon")]
20181#[cfg_attr(test, assert_instr(tbx))]
20182#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20183pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
20184    unsafe {
20185        transmute(vqtbx4(
20186            transmute(a),
20187            transmute(b.0),
20188            transmute(b.1),
20189            transmute(b.2),
20190            transmute(b.3),
20191            c,
20192        ))
20193    }
20194}
20195#[doc = "Extended table look-up"]
20196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
20197#[inline(always)]
20198#[target_feature(enable = "neon")]
20199#[cfg_attr(test, assert_instr(tbx))]
20200#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20201pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
20202    unsafe {
20203        transmute(vqtbx4q(
20204            transmute(a),
20205            transmute(b.0),
20206            transmute(b.1),
20207            transmute(b.2),
20208            transmute(b.3),
20209            c,
20210        ))
20211    }
20212}
20213#[doc = "Extended table look-up"]
20214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
20215#[inline(always)]
20216#[target_feature(enable = "neon")]
20217#[cfg_attr(test, assert_instr(tbx))]
20218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20219pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
20220    unsafe {
20221        transmute(vqtbx4(
20222            transmute(a),
20223            transmute(b.0),
20224            transmute(b.1),
20225            transmute(b.2),
20226            transmute(b.3),
20227            c,
20228        ))
20229    }
20230}
20231#[doc = "Extended table look-up"]
20232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
20233#[inline(always)]
20234#[target_feature(enable = "neon")]
20235#[cfg_attr(test, assert_instr(tbx))]
20236#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20237pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
20238    unsafe {
20239        transmute(vqtbx4q(
20240            transmute(a),
20241            transmute(b.0),
20242            transmute(b.1),
20243            transmute(b.2),
20244            transmute(b.3),
20245            c,
20246        ))
20247    }
20248}
20249#[doc = "Rotate and exclusive OR"]
20250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"]
20251#[inline(always)]
20252#[target_feature(enable = "neon,sha3")]
20253#[cfg_attr(test, assert_instr(rax1))]
20254#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
20255pub fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
20256    unsafe extern "unadjusted" {
20257        #[cfg_attr(
20258            any(target_arch = "aarch64", target_arch = "arm64ec"),
20259            link_name = "llvm.aarch64.crypto.rax1"
20260        )]
20261        fn _vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
20262    }
20263    unsafe { _vrax1q_u64(a, b) }
20264}
20265#[doc = "Reverse bit order"]
20266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"]
20267#[inline(always)]
20268#[target_feature(enable = "neon")]
20269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20270#[cfg_attr(test, assert_instr(rbit))]
20271pub fn vrbit_s8(a: int8x8_t) -> int8x8_t {
20272    unsafe { simd_bitreverse(a) }
20273}
20274#[doc = "Reverse bit order"]
20275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"]
20276#[inline(always)]
20277#[target_feature(enable = "neon")]
20278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20279#[cfg_attr(test, assert_instr(rbit))]
20280pub fn vrbitq_s8(a: int8x16_t) -> int8x16_t {
20281    unsafe { simd_bitreverse(a) }
20282}
20283#[doc = "Reverse bit order"]
20284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
20285#[inline(always)]
20286#[cfg(target_endian = "little")]
20287#[target_feature(enable = "neon")]
20288#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20289#[cfg_attr(test, assert_instr(rbit))]
20290pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
20291    unsafe { transmute(vrbit_s8(transmute(a))) }
20292}
20293#[doc = "Reverse bit order"]
20294#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
20295#[inline(always)]
20296#[cfg(target_endian = "big")]
20297#[target_feature(enable = "neon")]
20298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20299#[cfg_attr(test, assert_instr(rbit))]
20300pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
20301    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20302    unsafe {
20303        let ret_val: uint8x8_t = transmute(vrbit_s8(transmute(a)));
20304        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20305    }
20306}
20307#[doc = "Reverse bit order"]
20308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
20309#[inline(always)]
20310#[cfg(target_endian = "little")]
20311#[target_feature(enable = "neon")]
20312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20313#[cfg_attr(test, assert_instr(rbit))]
20314pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
20315    unsafe { transmute(vrbitq_s8(transmute(a))) }
20316}
20317#[doc = "Reverse bit order"]
20318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
20319#[inline(always)]
20320#[cfg(target_endian = "big")]
20321#[target_feature(enable = "neon")]
20322#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20323#[cfg_attr(test, assert_instr(rbit))]
20324pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
20325    let a: uint8x16_t =
20326        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20327    unsafe {
20328        let ret_val: uint8x16_t = transmute(vrbitq_s8(transmute(a)));
20329        simd_shuffle!(
20330            ret_val,
20331            ret_val,
20332            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20333        )
20334    }
20335}
20336#[doc = "Reverse bit order"]
20337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
20338#[inline(always)]
20339#[cfg(target_endian = "little")]
20340#[target_feature(enable = "neon")]
20341#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20342#[cfg_attr(test, assert_instr(rbit))]
20343pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
20344    unsafe { transmute(vrbit_s8(transmute(a))) }
20345}
20346#[doc = "Reverse bit order"]
20347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
20348#[inline(always)]
20349#[cfg(target_endian = "big")]
20350#[target_feature(enable = "neon")]
20351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20352#[cfg_attr(test, assert_instr(rbit))]
20353pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
20354    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20355    unsafe {
20356        let ret_val: poly8x8_t = transmute(vrbit_s8(transmute(a)));
20357        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20358    }
20359}
20360#[doc = "Reverse bit order"]
20361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
20362#[inline(always)]
20363#[cfg(target_endian = "little")]
20364#[target_feature(enable = "neon")]
20365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20366#[cfg_attr(test, assert_instr(rbit))]
20367pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
20368    unsafe { transmute(vrbitq_s8(transmute(a))) }
20369}
20370#[doc = "Reverse bit order"]
20371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
20372#[inline(always)]
20373#[cfg(target_endian = "big")]
20374#[target_feature(enable = "neon")]
20375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20376#[cfg_attr(test, assert_instr(rbit))]
20377pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
20378    let a: poly8x16_t =
20379        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20380    unsafe {
20381        let ret_val: poly8x16_t = transmute(vrbitq_s8(transmute(a)));
20382        simd_shuffle!(
20383            ret_val,
20384            ret_val,
20385            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20386        )
20387    }
20388}
20389#[doc = "Reciprocal estimate."]
20390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"]
20391#[inline(always)]
20392#[target_feature(enable = "neon")]
20393#[cfg_attr(test, assert_instr(frecpe))]
20394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20395pub fn vrecpe_f64(a: float64x1_t) -> float64x1_t {
20396    unsafe extern "unadjusted" {
20397        #[cfg_attr(
20398            any(target_arch = "aarch64", target_arch = "arm64ec"),
20399            link_name = "llvm.aarch64.neon.frecpe.v1f64"
20400        )]
20401        fn _vrecpe_f64(a: float64x1_t) -> float64x1_t;
20402    }
20403    unsafe { _vrecpe_f64(a) }
20404}
20405#[doc = "Reciprocal estimate."]
20406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"]
20407#[inline(always)]
20408#[target_feature(enable = "neon")]
20409#[cfg_attr(test, assert_instr(frecpe))]
20410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20411pub fn vrecpeq_f64(a: float64x2_t) -> float64x2_t {
20412    unsafe extern "unadjusted" {
20413        #[cfg_attr(
20414            any(target_arch = "aarch64", target_arch = "arm64ec"),
20415            link_name = "llvm.aarch64.neon.frecpe.v2f64"
20416        )]
20417        fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t;
20418    }
20419    unsafe { _vrecpeq_f64(a) }
20420}
20421#[doc = "Reciprocal estimate."]
20422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"]
20423#[inline(always)]
20424#[target_feature(enable = "neon")]
20425#[cfg_attr(test, assert_instr(frecpe))]
20426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20427pub fn vrecped_f64(a: f64) -> f64 {
20428    unsafe extern "unadjusted" {
20429        #[cfg_attr(
20430            any(target_arch = "aarch64", target_arch = "arm64ec"),
20431            link_name = "llvm.aarch64.neon.frecpe.f64"
20432        )]
20433        fn _vrecped_f64(a: f64) -> f64;
20434    }
20435    unsafe { _vrecped_f64(a) }
20436}
20437#[doc = "Reciprocal estimate."]
20438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"]
20439#[inline(always)]
20440#[target_feature(enable = "neon")]
20441#[cfg_attr(test, assert_instr(frecpe))]
20442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20443pub fn vrecpes_f32(a: f32) -> f32 {
20444    unsafe extern "unadjusted" {
20445        #[cfg_attr(
20446            any(target_arch = "aarch64", target_arch = "arm64ec"),
20447            link_name = "llvm.aarch64.neon.frecpe.f32"
20448        )]
20449        fn _vrecpes_f32(a: f32) -> f32;
20450    }
20451    unsafe { _vrecpes_f32(a) }
20452}
20453#[doc = "Reciprocal estimate."]
20454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeh_f16)"]
20455#[inline(always)]
20456#[cfg_attr(test, assert_instr(frecpe))]
20457#[target_feature(enable = "neon,fp16")]
20458#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
20459#[cfg(not(target_arch = "arm64ec"))]
20460pub fn vrecpeh_f16(a: f16) -> f16 {
20461    unsafe extern "unadjusted" {
20462        #[cfg_attr(
20463            any(target_arch = "aarch64", target_arch = "arm64ec"),
20464            link_name = "llvm.aarch64.neon.frecpe.f16"
20465        )]
20466        fn _vrecpeh_f16(a: f16) -> f16;
20467    }
20468    unsafe { _vrecpeh_f16(a) }
20469}
20470#[doc = "Floating-point reciprocal step"]
20471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"]
20472#[inline(always)]
20473#[target_feature(enable = "neon")]
20474#[cfg_attr(test, assert_instr(frecps))]
20475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20476pub fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
20477    unsafe extern "unadjusted" {
20478        #[cfg_attr(
20479            any(target_arch = "aarch64", target_arch = "arm64ec"),
20480            link_name = "llvm.aarch64.neon.frecps.v1f64"
20481        )]
20482        fn _vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
20483    }
20484    unsafe { _vrecps_f64(a, b) }
20485}
20486#[doc = "Floating-point reciprocal step"]
20487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"]
20488#[inline(always)]
20489#[target_feature(enable = "neon")]
20490#[cfg_attr(test, assert_instr(frecps))]
20491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20492pub fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
20493    unsafe extern "unadjusted" {
20494        #[cfg_attr(
20495            any(target_arch = "aarch64", target_arch = "arm64ec"),
20496            link_name = "llvm.aarch64.neon.frecps.v2f64"
20497        )]
20498        fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
20499    }
20500    unsafe { _vrecpsq_f64(a, b) }
20501}
20502#[doc = "Floating-point reciprocal step"]
20503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"]
20504#[inline(always)]
20505#[target_feature(enable = "neon")]
20506#[cfg_attr(test, assert_instr(frecps))]
20507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20508pub fn vrecpsd_f64(a: f64, b: f64) -> f64 {
20509    unsafe extern "unadjusted" {
20510        #[cfg_attr(
20511            any(target_arch = "aarch64", target_arch = "arm64ec"),
20512            link_name = "llvm.aarch64.neon.frecps.f64"
20513        )]
20514        fn _vrecpsd_f64(a: f64, b: f64) -> f64;
20515    }
20516    unsafe { _vrecpsd_f64(a, b) }
20517}
20518#[doc = "Floating-point reciprocal step"]
20519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"]
20520#[inline(always)]
20521#[target_feature(enable = "neon")]
20522#[cfg_attr(test, assert_instr(frecps))]
20523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20524pub fn vrecpss_f32(a: f32, b: f32) -> f32 {
20525    unsafe extern "unadjusted" {
20526        #[cfg_attr(
20527            any(target_arch = "aarch64", target_arch = "arm64ec"),
20528            link_name = "llvm.aarch64.neon.frecps.f32"
20529        )]
20530        fn _vrecpss_f32(a: f32, b: f32) -> f32;
20531    }
20532    unsafe { _vrecpss_f32(a, b) }
20533}
20534#[doc = "Floating-point reciprocal step"]
20535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsh_f16)"]
20536#[inline(always)]
20537#[cfg_attr(test, assert_instr(frecps))]
20538#[target_feature(enable = "neon,fp16")]
20539#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
20540#[cfg(not(target_arch = "arm64ec"))]
20541pub fn vrecpsh_f16(a: f16, b: f16) -> f16 {
20542    unsafe extern "unadjusted" {
20543        #[cfg_attr(
20544            any(target_arch = "aarch64", target_arch = "arm64ec"),
20545            link_name = "llvm.aarch64.neon.frecps.f16"
20546        )]
20547        fn _vrecpsh_f16(a: f16, b: f16) -> f16;
20548    }
20549    unsafe { _vrecpsh_f16(a, b) }
20550}
20551#[doc = "Floating-point reciprocal exponent"]
20552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"]
20553#[inline(always)]
20554#[target_feature(enable = "neon")]
20555#[cfg_attr(test, assert_instr(frecpx))]
20556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20557pub fn vrecpxd_f64(a: f64) -> f64 {
20558    unsafe extern "unadjusted" {
20559        #[cfg_attr(
20560            any(target_arch = "aarch64", target_arch = "arm64ec"),
20561            link_name = "llvm.aarch64.neon.frecpx.f64"
20562        )]
20563        fn _vrecpxd_f64(a: f64) -> f64;
20564    }
20565    unsafe { _vrecpxd_f64(a) }
20566}
20567#[doc = "Floating-point reciprocal exponent"]
20568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"]
20569#[inline(always)]
20570#[target_feature(enable = "neon")]
20571#[cfg_attr(test, assert_instr(frecpx))]
20572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20573pub fn vrecpxs_f32(a: f32) -> f32 {
20574    unsafe extern "unadjusted" {
20575        #[cfg_attr(
20576            any(target_arch = "aarch64", target_arch = "arm64ec"),
20577            link_name = "llvm.aarch64.neon.frecpx.f32"
20578        )]
20579        fn _vrecpxs_f32(a: f32) -> f32;
20580    }
20581    unsafe { _vrecpxs_f32(a) }
20582}
20583#[doc = "Floating-point reciprocal exponent"]
20584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxh_f16)"]
20585#[inline(always)]
20586#[cfg_attr(test, assert_instr(frecpx))]
20587#[target_feature(enable = "neon,fp16")]
20588#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
20589#[cfg(not(target_arch = "arm64ec"))]
20590pub fn vrecpxh_f16(a: f16) -> f16 {
20591    unsafe extern "unadjusted" {
20592        #[cfg_attr(
20593            any(target_arch = "aarch64", target_arch = "arm64ec"),
20594            link_name = "llvm.aarch64.neon.frecpx.f16"
20595        )]
20596        fn _vrecpxh_f16(a: f16) -> f16;
20597    }
20598    unsafe { _vrecpxh_f16(a) }
20599}
20600#[doc = "Vector reinterpret cast operation"]
20601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
20602#[inline(always)]
20603#[cfg(target_endian = "little")]
20604#[target_feature(enable = "neon")]
20605#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20606#[cfg(not(target_arch = "arm64ec"))]
20607#[cfg_attr(test, assert_instr(nop))]
20608pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
20609    unsafe { transmute(a) }
20610}
20611#[doc = "Vector reinterpret cast operation"]
20612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
20613#[inline(always)]
20614#[cfg(target_endian = "big")]
20615#[target_feature(enable = "neon")]
20616#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20617#[cfg(not(target_arch = "arm64ec"))]
20618#[cfg_attr(test, assert_instr(nop))]
20619pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
20620    let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
20621    unsafe { transmute(a) }
20622}
20623#[doc = "Vector reinterpret cast operation"]
20624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
20625#[inline(always)]
20626#[cfg(target_endian = "little")]
20627#[target_feature(enable = "neon")]
20628#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20629#[cfg(not(target_arch = "arm64ec"))]
20630#[cfg_attr(test, assert_instr(nop))]
20631pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
20632    unsafe { transmute(a) }
20633}
20634#[doc = "Vector reinterpret cast operation"]
20635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
20636#[inline(always)]
20637#[cfg(target_endian = "big")]
20638#[target_feature(enable = "neon")]
20639#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20640#[cfg(not(target_arch = "arm64ec"))]
20641#[cfg_attr(test, assert_instr(nop))]
20642pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
20643    let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20644    unsafe {
20645        let ret_val: float64x2_t = transmute(a);
20646        simd_shuffle!(ret_val, ret_val, [1, 0])
20647    }
20648}
20649#[doc = "Vector reinterpret cast operation"]
20650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
20651#[inline(always)]
20652#[cfg(target_endian = "little")]
20653#[target_feature(enable = "neon")]
20654#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20655#[cfg(not(target_arch = "arm64ec"))]
20656#[cfg_attr(test, assert_instr(nop))]
20657pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
20658    unsafe { transmute(a) }
20659}
20660#[doc = "Vector reinterpret cast operation"]
20661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
20662#[inline(always)]
20663#[cfg(target_endian = "big")]
20664#[target_feature(enable = "neon")]
20665#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20666#[cfg(not(target_arch = "arm64ec"))]
20667#[cfg_attr(test, assert_instr(nop))]
20668pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
20669    unsafe {
20670        let ret_val: float16x4_t = transmute(a);
20671        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20672    }
20673}
20674#[doc = "Vector reinterpret cast operation"]
20675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
20676#[inline(always)]
20677#[cfg(target_endian = "little")]
20678#[target_feature(enable = "neon")]
20679#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20680#[cfg(not(target_arch = "arm64ec"))]
20681#[cfg_attr(test, assert_instr(nop))]
20682pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
20683    unsafe { transmute(a) }
20684}
20685#[doc = "Vector reinterpret cast operation"]
20686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
20687#[inline(always)]
20688#[cfg(target_endian = "big")]
20689#[target_feature(enable = "neon")]
20690#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20691#[cfg(not(target_arch = "arm64ec"))]
20692#[cfg_attr(test, assert_instr(nop))]
20693pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
20694    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20695    unsafe {
20696        let ret_val: float16x8_t = transmute(a);
20697        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20698    }
20699}
20700#[doc = "Vector reinterpret cast operation"]
20701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
20702#[inline(always)]
20703#[cfg(target_endian = "little")]
20704#[target_feature(enable = "neon")]
20705#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20706#[cfg_attr(test, assert_instr(nop))]
20707pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
20708    unsafe { transmute(a) }
20709}
20710#[doc = "Vector reinterpret cast operation"]
20711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
20712#[inline(always)]
20713#[cfg(target_endian = "big")]
20714#[target_feature(enable = "neon")]
20715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20716#[cfg_attr(test, assert_instr(nop))]
20717pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
20718    unsafe {
20719        let ret_val: float64x2_t = transmute(a);
20720        simd_shuffle!(ret_val, ret_val, [1, 0])
20721    }
20722}
20723#[doc = "Vector reinterpret cast operation"]
20724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
20725#[inline(always)]
20726#[cfg(target_endian = "little")]
20727#[target_feature(enable = "neon")]
20728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20729#[cfg_attr(test, assert_instr(nop))]
20730pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
20731    unsafe { transmute(a) }
20732}
20733#[doc = "Vector reinterpret cast operation"]
20734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
20735#[inline(always)]
20736#[cfg(target_endian = "big")]
20737#[target_feature(enable = "neon")]
20738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20739#[cfg_attr(test, assert_instr(nop))]
20740pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
20741    let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20742    unsafe { transmute(a) }
20743}
20744#[doc = "Vector reinterpret cast operation"]
20745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
20746#[inline(always)]
20747#[cfg(target_endian = "little")]
20748#[target_feature(enable = "neon")]
20749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20750#[cfg_attr(test, assert_instr(nop))]
20751pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
20752    unsafe { transmute(a) }
20753}
20754#[doc = "Vector reinterpret cast operation"]
20755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
20756#[inline(always)]
20757#[cfg(target_endian = "big")]
20758#[target_feature(enable = "neon")]
20759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20760#[cfg_attr(test, assert_instr(nop))]
20761pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
20762    let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20763    unsafe { transmute(a) }
20764}
20765#[doc = "Vector reinterpret cast operation"]
20766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
20767#[inline(always)]
20768#[cfg(target_endian = "little")]
20769#[target_feature(enable = "neon")]
20770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20771#[cfg_attr(test, assert_instr(nop))]
20772pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
20773    unsafe { transmute(a) }
20774}
20775#[doc = "Vector reinterpret cast operation"]
20776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
20777#[inline(always)]
20778#[cfg(target_endian = "big")]
20779#[target_feature(enable = "neon")]
20780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20781#[cfg_attr(test, assert_instr(nop))]
20782pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
20783    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
20784    unsafe {
20785        let ret_val: float64x2_t = transmute(a);
20786        simd_shuffle!(ret_val, ret_val, [1, 0])
20787    }
20788}
20789#[doc = "Vector reinterpret cast operation"]
20790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
20791#[inline(always)]
20792#[cfg(target_endian = "little")]
20793#[target_feature(enable = "neon")]
20794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20795#[cfg_attr(test, assert_instr(nop))]
20796pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
20797    unsafe { transmute(a) }
20798}
20799#[doc = "Vector reinterpret cast operation"]
20800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
20801#[inline(always)]
20802#[cfg(target_endian = "big")]
20803#[target_feature(enable = "neon")]
20804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20805#[cfg_attr(test, assert_instr(nop))]
20806pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
20807    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
20808    unsafe {
20809        let ret_val: poly64x2_t = transmute(a);
20810        simd_shuffle!(ret_val, ret_val, [1, 0])
20811    }
20812}
20813#[doc = "Vector reinterpret cast operation"]
20814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
20815#[inline(always)]
20816#[cfg(target_endian = "little")]
20817#[target_feature(enable = "neon")]
20818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20819#[cfg_attr(test, assert_instr(nop))]
20820pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
20821    unsafe { transmute(a) }
20822}
20823#[doc = "Vector reinterpret cast operation"]
20824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
20825#[inline(always)]
20826#[cfg(target_endian = "big")]
20827#[target_feature(enable = "neon")]
20828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20829#[cfg_attr(test, assert_instr(nop))]
20830pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
20831    unsafe {
20832        let ret_val: float32x2_t = transmute(a);
20833        simd_shuffle!(ret_val, ret_val, [1, 0])
20834    }
20835}
20836#[doc = "Vector reinterpret cast operation"]
20837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
20838#[inline(always)]
20839#[cfg(target_endian = "little")]
20840#[target_feature(enable = "neon")]
20841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20842#[cfg_attr(test, assert_instr(nop))]
20843pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
20844    unsafe { transmute(a) }
20845}
20846#[doc = "Vector reinterpret cast operation"]
20847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
20848#[inline(always)]
20849#[cfg(target_endian = "big")]
20850#[target_feature(enable = "neon")]
20851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20852#[cfg_attr(test, assert_instr(nop))]
20853pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
20854    unsafe {
20855        let ret_val: int8x8_t = transmute(a);
20856        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20857    }
20858}
20859#[doc = "Vector reinterpret cast operation"]
20860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
20861#[inline(always)]
20862#[cfg(target_endian = "little")]
20863#[target_feature(enable = "neon")]
20864#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20865#[cfg_attr(test, assert_instr(nop))]
20866pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
20867    unsafe { transmute(a) }
20868}
20869#[doc = "Vector reinterpret cast operation"]
20870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
20871#[inline(always)]
20872#[cfg(target_endian = "big")]
20873#[target_feature(enable = "neon")]
20874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20875#[cfg_attr(test, assert_instr(nop))]
20876pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
20877    unsafe {
20878        let ret_val: int16x4_t = transmute(a);
20879        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20880    }
20881}
20882#[doc = "Vector reinterpret cast operation"]
20883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
20884#[inline(always)]
20885#[cfg(target_endian = "little")]
20886#[target_feature(enable = "neon")]
20887#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20888#[cfg_attr(test, assert_instr(nop))]
20889pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
20890    unsafe { transmute(a) }
20891}
20892#[doc = "Vector reinterpret cast operation"]
20893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
20894#[inline(always)]
20895#[cfg(target_endian = "big")]
20896#[target_feature(enable = "neon")]
20897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20898#[cfg_attr(test, assert_instr(nop))]
20899pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
20900    unsafe {
20901        let ret_val: int32x2_t = transmute(a);
20902        simd_shuffle!(ret_val, ret_val, [1, 0])
20903    }
20904}
20905#[doc = "Vector reinterpret cast operation"]
20906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"]
20907#[inline(always)]
20908#[target_feature(enable = "neon")]
20909#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20910#[cfg_attr(test, assert_instr(nop))]
20911pub fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t {
20912    unsafe { transmute(a) }
20913}
20914#[doc = "Vector reinterpret cast operation"]
20915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
20916#[inline(always)]
20917#[cfg(target_endian = "little")]
20918#[target_feature(enable = "neon")]
20919#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20920#[cfg_attr(test, assert_instr(nop))]
20921pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
20922    unsafe { transmute(a) }
20923}
20924#[doc = "Vector reinterpret cast operation"]
20925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
20926#[inline(always)]
20927#[cfg(target_endian = "big")]
20928#[target_feature(enable = "neon")]
20929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20930#[cfg_attr(test, assert_instr(nop))]
20931pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
20932    unsafe {
20933        let ret_val: uint8x8_t = transmute(a);
20934        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20935    }
20936}
20937#[doc = "Vector reinterpret cast operation"]
20938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
20939#[inline(always)]
20940#[cfg(target_endian = "little")]
20941#[target_feature(enable = "neon")]
20942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20943#[cfg_attr(test, assert_instr(nop))]
20944pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
20945    unsafe { transmute(a) }
20946}
20947#[doc = "Vector reinterpret cast operation"]
20948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
20949#[inline(always)]
20950#[cfg(target_endian = "big")]
20951#[target_feature(enable = "neon")]
20952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20953#[cfg_attr(test, assert_instr(nop))]
20954pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
20955    unsafe {
20956        let ret_val: uint16x4_t = transmute(a);
20957        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20958    }
20959}
20960#[doc = "Vector reinterpret cast operation"]
20961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
20962#[inline(always)]
20963#[cfg(target_endian = "little")]
20964#[target_feature(enable = "neon")]
20965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20966#[cfg_attr(test, assert_instr(nop))]
20967pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
20968    unsafe { transmute(a) }
20969}
20970#[doc = "Vector reinterpret cast operation"]
20971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
20972#[inline(always)]
20973#[cfg(target_endian = "big")]
20974#[target_feature(enable = "neon")]
20975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20976#[cfg_attr(test, assert_instr(nop))]
20977pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
20978    unsafe {
20979        let ret_val: uint32x2_t = transmute(a);
20980        simd_shuffle!(ret_val, ret_val, [1, 0])
20981    }
20982}
20983#[doc = "Vector reinterpret cast operation"]
20984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"]
20985#[inline(always)]
20986#[target_feature(enable = "neon")]
20987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20988#[cfg_attr(test, assert_instr(nop))]
20989pub fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t {
20990    unsafe { transmute(a) }
20991}
20992#[doc = "Vector reinterpret cast operation"]
20993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
20994#[inline(always)]
20995#[cfg(target_endian = "little")]
20996#[target_feature(enable = "neon")]
20997#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20998#[cfg_attr(test, assert_instr(nop))]
20999pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
21000    unsafe { transmute(a) }
21001}
21002#[doc = "Vector reinterpret cast operation"]
21003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
21004#[inline(always)]
21005#[cfg(target_endian = "big")]
21006#[target_feature(enable = "neon")]
21007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21008#[cfg_attr(test, assert_instr(nop))]
21009pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
21010    unsafe {
21011        let ret_val: poly8x8_t = transmute(a);
21012        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21013    }
21014}
21015#[doc = "Vector reinterpret cast operation"]
21016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
21017#[inline(always)]
21018#[cfg(target_endian = "little")]
21019#[target_feature(enable = "neon")]
21020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21021#[cfg_attr(test, assert_instr(nop))]
21022pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
21023    unsafe { transmute(a) }
21024}
21025#[doc = "Vector reinterpret cast operation"]
21026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
21027#[inline(always)]
21028#[cfg(target_endian = "big")]
21029#[target_feature(enable = "neon")]
21030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21031#[cfg_attr(test, assert_instr(nop))]
21032pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
21033    unsafe {
21034        let ret_val: poly16x4_t = transmute(a);
21035        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21036    }
21037}
21038#[doc = "Vector reinterpret cast operation"]
21039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"]
21040#[inline(always)]
21041#[target_feature(enable = "neon")]
21042#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21043#[cfg_attr(test, assert_instr(nop))]
21044pub fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t {
21045    unsafe { transmute(a) }
21046}
21047#[doc = "Vector reinterpret cast operation"]
21048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
21049#[inline(always)]
21050#[cfg(target_endian = "little")]
21051#[target_feature(enable = "neon")]
21052#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21053#[cfg_attr(test, assert_instr(nop))]
21054pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
21055    unsafe { transmute(a) }
21056}
21057#[doc = "Vector reinterpret cast operation"]
21058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
21059#[inline(always)]
21060#[cfg(target_endian = "big")]
21061#[target_feature(enable = "neon")]
21062#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21063#[cfg_attr(test, assert_instr(nop))]
21064pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
21065    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21066    unsafe { transmute(a) }
21067}
21068#[doc = "Vector reinterpret cast operation"]
21069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
21070#[inline(always)]
21071#[cfg(target_endian = "little")]
21072#[target_feature(enable = "neon")]
21073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21074#[cfg_attr(test, assert_instr(nop))]
21075pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
21076    unsafe { transmute(a) }
21077}
21078#[doc = "Vector reinterpret cast operation"]
21079#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
21080#[inline(always)]
21081#[cfg(target_endian = "big")]
21082#[target_feature(enable = "neon")]
21083#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21084#[cfg_attr(test, assert_instr(nop))]
21085pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
21086    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21087    unsafe {
21088        let ret_val: float32x4_t = transmute(a);
21089        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21090    }
21091}
21092#[doc = "Vector reinterpret cast operation"]
21093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
21094#[inline(always)]
21095#[cfg(target_endian = "little")]
21096#[target_feature(enable = "neon")]
21097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21098#[cfg_attr(test, assert_instr(nop))]
21099pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
21100    unsafe { transmute(a) }
21101}
21102#[doc = "Vector reinterpret cast operation"]
21103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
21104#[inline(always)]
21105#[cfg(target_endian = "big")]
21106#[target_feature(enable = "neon")]
21107#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21108#[cfg_attr(test, assert_instr(nop))]
21109pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
21110    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21111    unsafe {
21112        let ret_val: int8x16_t = transmute(a);
21113        simd_shuffle!(
21114            ret_val,
21115            ret_val,
21116            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21117        )
21118    }
21119}
21120#[doc = "Vector reinterpret cast operation"]
21121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
21122#[inline(always)]
21123#[cfg(target_endian = "little")]
21124#[target_feature(enable = "neon")]
21125#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21126#[cfg_attr(test, assert_instr(nop))]
21127pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
21128    unsafe { transmute(a) }
21129}
21130#[doc = "Vector reinterpret cast operation"]
21131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
21132#[inline(always)]
21133#[cfg(target_endian = "big")]
21134#[target_feature(enable = "neon")]
21135#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21136#[cfg_attr(test, assert_instr(nop))]
21137pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
21138    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21139    unsafe {
21140        let ret_val: int16x8_t = transmute(a);
21141        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21142    }
21143}
21144#[doc = "Vector reinterpret cast operation"]
21145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
21146#[inline(always)]
21147#[cfg(target_endian = "little")]
21148#[target_feature(enable = "neon")]
21149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21150#[cfg_attr(test, assert_instr(nop))]
21151pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
21152    unsafe { transmute(a) }
21153}
21154#[doc = "Vector reinterpret cast operation"]
21155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
21156#[inline(always)]
21157#[cfg(target_endian = "big")]
21158#[target_feature(enable = "neon")]
21159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21160#[cfg_attr(test, assert_instr(nop))]
21161pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
21162    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21163    unsafe {
21164        let ret_val: int32x4_t = transmute(a);
21165        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21166    }
21167}
21168#[doc = "Vector reinterpret cast operation"]
21169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
21170#[inline(always)]
21171#[cfg(target_endian = "little")]
21172#[target_feature(enable = "neon")]
21173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21174#[cfg_attr(test, assert_instr(nop))]
21175pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
21176    unsafe { transmute(a) }
21177}
21178#[doc = "Vector reinterpret cast operation"]
21179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
21180#[inline(always)]
21181#[cfg(target_endian = "big")]
21182#[target_feature(enable = "neon")]
21183#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21184#[cfg_attr(test, assert_instr(nop))]
21185pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
21186    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21187    unsafe {
21188        let ret_val: int64x2_t = transmute(a);
21189        simd_shuffle!(ret_val, ret_val, [1, 0])
21190    }
21191}
21192#[doc = "Vector reinterpret cast operation"]
21193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
21194#[inline(always)]
21195#[cfg(target_endian = "little")]
21196#[target_feature(enable = "neon")]
21197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21198#[cfg_attr(test, assert_instr(nop))]
21199pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
21200    unsafe { transmute(a) }
21201}
21202#[doc = "Vector reinterpret cast operation"]
21203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
21204#[inline(always)]
21205#[cfg(target_endian = "big")]
21206#[target_feature(enable = "neon")]
21207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21208#[cfg_attr(test, assert_instr(nop))]
21209pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
21210    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21211    unsafe {
21212        let ret_val: uint8x16_t = transmute(a);
21213        simd_shuffle!(
21214            ret_val,
21215            ret_val,
21216            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21217        )
21218    }
21219}
21220#[doc = "Vector reinterpret cast operation"]
21221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
21222#[inline(always)]
21223#[cfg(target_endian = "little")]
21224#[target_feature(enable = "neon")]
21225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21226#[cfg_attr(test, assert_instr(nop))]
21227pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
21228    unsafe { transmute(a) }
21229}
21230#[doc = "Vector reinterpret cast operation"]
21231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
21232#[inline(always)]
21233#[cfg(target_endian = "big")]
21234#[target_feature(enable = "neon")]
21235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21236#[cfg_attr(test, assert_instr(nop))]
21237pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
21238    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21239    unsafe {
21240        let ret_val: uint16x8_t = transmute(a);
21241        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21242    }
21243}
21244#[doc = "Vector reinterpret cast operation"]
21245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
21246#[inline(always)]
21247#[cfg(target_endian = "little")]
21248#[target_feature(enable = "neon")]
21249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21250#[cfg_attr(test, assert_instr(nop))]
21251pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
21252    unsafe { transmute(a) }
21253}
21254#[doc = "Vector reinterpret cast operation"]
21255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
21256#[inline(always)]
21257#[cfg(target_endian = "big")]
21258#[target_feature(enable = "neon")]
21259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21260#[cfg_attr(test, assert_instr(nop))]
21261pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
21262    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21263    unsafe {
21264        let ret_val: uint32x4_t = transmute(a);
21265        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21266    }
21267}
21268#[doc = "Vector reinterpret cast operation"]
21269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
21270#[inline(always)]
21271#[cfg(target_endian = "little")]
21272#[target_feature(enable = "neon")]
21273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21274#[cfg_attr(test, assert_instr(nop))]
21275pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
21276    unsafe { transmute(a) }
21277}
21278#[doc = "Vector reinterpret cast operation"]
21279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
21280#[inline(always)]
21281#[cfg(target_endian = "big")]
21282#[target_feature(enable = "neon")]
21283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21284#[cfg_attr(test, assert_instr(nop))]
21285pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
21286    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21287    unsafe {
21288        let ret_val: uint64x2_t = transmute(a);
21289        simd_shuffle!(ret_val, ret_val, [1, 0])
21290    }
21291}
21292#[doc = "Vector reinterpret cast operation"]
21293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
21294#[inline(always)]
21295#[cfg(target_endian = "little")]
21296#[target_feature(enable = "neon")]
21297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21298#[cfg_attr(test, assert_instr(nop))]
21299pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
21300    unsafe { transmute(a) }
21301}
21302#[doc = "Vector reinterpret cast operation"]
21303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
21304#[inline(always)]
21305#[cfg(target_endian = "big")]
21306#[target_feature(enable = "neon")]
21307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21308#[cfg_attr(test, assert_instr(nop))]
21309pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
21310    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21311    unsafe {
21312        let ret_val: poly8x16_t = transmute(a);
21313        simd_shuffle!(
21314            ret_val,
21315            ret_val,
21316            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21317        )
21318    }
21319}
21320#[doc = "Vector reinterpret cast operation"]
21321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
21322#[inline(always)]
21323#[cfg(target_endian = "little")]
21324#[target_feature(enable = "neon")]
21325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21326#[cfg_attr(test, assert_instr(nop))]
21327pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
21328    unsafe { transmute(a) }
21329}
21330#[doc = "Vector reinterpret cast operation"]
21331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
21332#[inline(always)]
21333#[cfg(target_endian = "big")]
21334#[target_feature(enable = "neon")]
21335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21336#[cfg_attr(test, assert_instr(nop))]
21337pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
21338    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21339    unsafe {
21340        let ret_val: poly16x8_t = transmute(a);
21341        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21342    }
21343}
21344#[doc = "Vector reinterpret cast operation"]
21345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
21346#[inline(always)]
21347#[cfg(target_endian = "little")]
21348#[target_feature(enable = "neon")]
21349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21350#[cfg_attr(test, assert_instr(nop))]
21351pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
21352    unsafe { transmute(a) }
21353}
21354#[doc = "Vector reinterpret cast operation"]
21355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
21356#[inline(always)]
21357#[cfg(target_endian = "big")]
21358#[target_feature(enable = "neon")]
21359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21360#[cfg_attr(test, assert_instr(nop))]
21361pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
21362    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21363    unsafe {
21364        let ret_val: poly64x2_t = transmute(a);
21365        simd_shuffle!(ret_val, ret_val, [1, 0])
21366    }
21367}
21368#[doc = "Vector reinterpret cast operation"]
21369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
21370#[inline(always)]
21371#[cfg(target_endian = "little")]
21372#[target_feature(enable = "neon")]
21373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21374#[cfg_attr(test, assert_instr(nop))]
21375pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
21376    unsafe { transmute(a) }
21377}
21378#[doc = "Vector reinterpret cast operation"]
21379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
21380#[inline(always)]
21381#[cfg(target_endian = "big")]
21382#[target_feature(enable = "neon")]
21383#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21384#[cfg_attr(test, assert_instr(nop))]
21385pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
21386    let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21387    unsafe { transmute(a) }
21388}
21389#[doc = "Vector reinterpret cast operation"]
21390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
21391#[inline(always)]
21392#[cfg(target_endian = "little")]
21393#[target_feature(enable = "neon")]
21394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21395#[cfg_attr(test, assert_instr(nop))]
21396pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
21397    unsafe { transmute(a) }
21398}
21399#[doc = "Vector reinterpret cast operation"]
21400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
21401#[inline(always)]
21402#[cfg(target_endian = "big")]
21403#[target_feature(enable = "neon")]
21404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21405#[cfg_attr(test, assert_instr(nop))]
21406pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
21407    let a: int8x16_t =
21408        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21409    unsafe {
21410        let ret_val: float64x2_t = transmute(a);
21411        simd_shuffle!(ret_val, ret_val, [1, 0])
21412    }
21413}
21414#[doc = "Vector reinterpret cast operation"]
21415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
21416#[inline(always)]
21417#[cfg(target_endian = "little")]
21418#[target_feature(enable = "neon")]
21419#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21420#[cfg_attr(test, assert_instr(nop))]
21421pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
21422    unsafe { transmute(a) }
21423}
21424#[doc = "Vector reinterpret cast operation"]
21425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
21426#[inline(always)]
21427#[cfg(target_endian = "big")]
21428#[target_feature(enable = "neon")]
21429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21430#[cfg_attr(test, assert_instr(nop))]
21431pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
21432    let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21433    unsafe { transmute(a) }
21434}
21435#[doc = "Vector reinterpret cast operation"]
21436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
21437#[inline(always)]
21438#[cfg(target_endian = "little")]
21439#[target_feature(enable = "neon")]
21440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21441#[cfg_attr(test, assert_instr(nop))]
21442pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
21443    unsafe { transmute(a) }
21444}
21445#[doc = "Vector reinterpret cast operation"]
21446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
21447#[inline(always)]
21448#[cfg(target_endian = "big")]
21449#[target_feature(enable = "neon")]
21450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21451#[cfg_attr(test, assert_instr(nop))]
21452pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
21453    let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21454    unsafe {
21455        let ret_val: float64x2_t = transmute(a);
21456        simd_shuffle!(ret_val, ret_val, [1, 0])
21457    }
21458}
21459#[doc = "Vector reinterpret cast operation"]
21460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
21461#[inline(always)]
21462#[cfg(target_endian = "little")]
21463#[target_feature(enable = "neon")]
21464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21465#[cfg_attr(test, assert_instr(nop))]
21466pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
21467    unsafe { transmute(a) }
21468}
21469#[doc = "Vector reinterpret cast operation"]
21470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
21471#[inline(always)]
21472#[cfg(target_endian = "big")]
21473#[target_feature(enable = "neon")]
21474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21475#[cfg_attr(test, assert_instr(nop))]
21476pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
21477    let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21478    unsafe { transmute(a) }
21479}
21480#[doc = "Vector reinterpret cast operation"]
21481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
21482#[inline(always)]
21483#[cfg(target_endian = "little")]
21484#[target_feature(enable = "neon")]
21485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21486#[cfg_attr(test, assert_instr(nop))]
21487pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
21488    unsafe { transmute(a) }
21489}
21490#[doc = "Vector reinterpret cast operation"]
21491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
21492#[inline(always)]
21493#[cfg(target_endian = "big")]
21494#[target_feature(enable = "neon")]
21495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21496#[cfg_attr(test, assert_instr(nop))]
21497pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
21498    let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21499    unsafe {
21500        let ret_val: float64x2_t = transmute(a);
21501        simd_shuffle!(ret_val, ret_val, [1, 0])
21502    }
21503}
21504#[doc = "Vector reinterpret cast operation"]
21505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"]
21506#[inline(always)]
21507#[target_feature(enable = "neon")]
21508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21509#[cfg_attr(test, assert_instr(nop))]
21510pub fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t {
21511    unsafe { transmute(a) }
21512}
21513#[doc = "Vector reinterpret cast operation"]
21514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"]
21515#[inline(always)]
21516#[target_feature(enable = "neon")]
21517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21518#[cfg_attr(test, assert_instr(nop))]
21519pub fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t {
21520    unsafe { transmute(a) }
21521}
21522#[doc = "Vector reinterpret cast operation"]
21523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
21524#[inline(always)]
21525#[cfg(target_endian = "little")]
21526#[target_feature(enable = "neon")]
21527#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21528#[cfg_attr(test, assert_instr(nop))]
21529pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
21530    unsafe { transmute(a) }
21531}
21532#[doc = "Vector reinterpret cast operation"]
21533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
21534#[inline(always)]
21535#[cfg(target_endian = "big")]
21536#[target_feature(enable = "neon")]
21537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21538#[cfg_attr(test, assert_instr(nop))]
21539pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
21540    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21541    unsafe {
21542        let ret_val: float64x2_t = transmute(a);
21543        simd_shuffle!(ret_val, ret_val, [1, 0])
21544    }
21545}
21546#[doc = "Vector reinterpret cast operation"]
21547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
21548#[inline(always)]
21549#[cfg(target_endian = "little")]
21550#[target_feature(enable = "neon")]
21551#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21552#[cfg_attr(test, assert_instr(nop))]
21553pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
21554    unsafe { transmute(a) }
21555}
21556#[doc = "Vector reinterpret cast operation"]
21557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
21558#[inline(always)]
21559#[cfg(target_endian = "big")]
21560#[target_feature(enable = "neon")]
21561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21562#[cfg_attr(test, assert_instr(nop))]
21563pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
21564    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21565    unsafe {
21566        let ret_val: poly64x2_t = transmute(a);
21567        simd_shuffle!(ret_val, ret_val, [1, 0])
21568    }
21569}
21570#[doc = "Vector reinterpret cast operation"]
21571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
21572#[inline(always)]
21573#[cfg(target_endian = "little")]
21574#[target_feature(enable = "neon")]
21575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21576#[cfg_attr(test, assert_instr(nop))]
21577pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
21578    unsafe { transmute(a) }
21579}
21580#[doc = "Vector reinterpret cast operation"]
21581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
21582#[inline(always)]
21583#[cfg(target_endian = "big")]
21584#[target_feature(enable = "neon")]
21585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21586#[cfg_attr(test, assert_instr(nop))]
21587pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
21588    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21589    unsafe { transmute(a) }
21590}
21591#[doc = "Vector reinterpret cast operation"]
21592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
21593#[inline(always)]
21594#[cfg(target_endian = "little")]
21595#[target_feature(enable = "neon")]
21596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21597#[cfg_attr(test, assert_instr(nop))]
21598pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
21599    unsafe { transmute(a) }
21600}
21601#[doc = "Vector reinterpret cast operation"]
21602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
21603#[inline(always)]
21604#[cfg(target_endian = "big")]
21605#[target_feature(enable = "neon")]
21606#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21607#[cfg_attr(test, assert_instr(nop))]
21608pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
21609    let a: uint8x16_t =
21610        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21611    unsafe {
21612        let ret_val: float64x2_t = transmute(a);
21613        simd_shuffle!(ret_val, ret_val, [1, 0])
21614    }
21615}
21616#[doc = "Vector reinterpret cast operation"]
21617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
21618#[inline(always)]
21619#[cfg(target_endian = "little")]
21620#[target_feature(enable = "neon")]
21621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21622#[cfg_attr(test, assert_instr(nop))]
21623pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
21624    unsafe { transmute(a) }
21625}
21626#[doc = "Vector reinterpret cast operation"]
21627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
21628#[inline(always)]
21629#[cfg(target_endian = "big")]
21630#[target_feature(enable = "neon")]
21631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21632#[cfg_attr(test, assert_instr(nop))]
21633pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
21634    let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21635    unsafe { transmute(a) }
21636}
21637#[doc = "Vector reinterpret cast operation"]
21638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
21639#[inline(always)]
21640#[cfg(target_endian = "little")]
21641#[target_feature(enable = "neon")]
21642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21643#[cfg_attr(test, assert_instr(nop))]
21644pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
21645    unsafe { transmute(a) }
21646}
21647#[doc = "Vector reinterpret cast operation"]
21648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
21649#[inline(always)]
21650#[cfg(target_endian = "big")]
21651#[target_feature(enable = "neon")]
21652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21653#[cfg_attr(test, assert_instr(nop))]
21654pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
21655    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21656    unsafe {
21657        let ret_val: float64x2_t = transmute(a);
21658        simd_shuffle!(ret_val, ret_val, [1, 0])
21659    }
21660}
21661#[doc = "Vector reinterpret cast operation"]
21662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
21663#[inline(always)]
21664#[cfg(target_endian = "little")]
21665#[target_feature(enable = "neon")]
21666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21667#[cfg_attr(test, assert_instr(nop))]
21668pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
21669    unsafe { transmute(a) }
21670}
21671#[doc = "Vector reinterpret cast operation"]
21672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
21673#[inline(always)]
21674#[cfg(target_endian = "big")]
21675#[target_feature(enable = "neon")]
21676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21677#[cfg_attr(test, assert_instr(nop))]
21678pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
21679    let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21680    unsafe { transmute(a) }
21681}
21682#[doc = "Vector reinterpret cast operation"]
21683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
21684#[inline(always)]
21685#[cfg(target_endian = "little")]
21686#[target_feature(enable = "neon")]
21687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21688#[cfg_attr(test, assert_instr(nop))]
21689pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
21690    unsafe { transmute(a) }
21691}
21692#[doc = "Vector reinterpret cast operation"]
21693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
21694#[inline(always)]
21695#[cfg(target_endian = "big")]
21696#[target_feature(enable = "neon")]
21697#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21698#[cfg_attr(test, assert_instr(nop))]
21699pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
21700    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21701    unsafe {
21702        let ret_val: float64x2_t = transmute(a);
21703        simd_shuffle!(ret_val, ret_val, [1, 0])
21704    }
21705}
21706#[doc = "Vector reinterpret cast operation"]
21707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"]
21708#[inline(always)]
21709#[target_feature(enable = "neon")]
21710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21711#[cfg_attr(test, assert_instr(nop))]
21712pub fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t {
21713    unsafe { transmute(a) }
21714}
21715#[doc = "Vector reinterpret cast operation"]
21716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"]
21717#[inline(always)]
21718#[target_feature(enable = "neon")]
21719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21720#[cfg_attr(test, assert_instr(nop))]
21721pub fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t {
21722    unsafe { transmute(a) }
21723}
21724#[doc = "Vector reinterpret cast operation"]
21725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
21726#[inline(always)]
21727#[cfg(target_endian = "little")]
21728#[target_feature(enable = "neon")]
21729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21730#[cfg_attr(test, assert_instr(nop))]
21731pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
21732    unsafe { transmute(a) }
21733}
21734#[doc = "Vector reinterpret cast operation"]
21735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
21736#[inline(always)]
21737#[cfg(target_endian = "big")]
21738#[target_feature(enable = "neon")]
21739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21740#[cfg_attr(test, assert_instr(nop))]
21741pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
21742    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21743    unsafe {
21744        let ret_val: float64x2_t = transmute(a);
21745        simd_shuffle!(ret_val, ret_val, [1, 0])
21746    }
21747}
21748#[doc = "Vector reinterpret cast operation"]
21749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
21750#[inline(always)]
21751#[cfg(target_endian = "little")]
21752#[target_feature(enable = "neon")]
21753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21754#[cfg_attr(test, assert_instr(nop))]
21755pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
21756    unsafe { transmute(a) }
21757}
21758#[doc = "Vector reinterpret cast operation"]
21759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
21760#[inline(always)]
21761#[cfg(target_endian = "big")]
21762#[target_feature(enable = "neon")]
21763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21764#[cfg_attr(test, assert_instr(nop))]
21765pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
21766    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21767    unsafe {
21768        let ret_val: poly64x2_t = transmute(a);
21769        simd_shuffle!(ret_val, ret_val, [1, 0])
21770    }
21771}
21772#[doc = "Vector reinterpret cast operation"]
21773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
21774#[inline(always)]
21775#[cfg(target_endian = "little")]
21776#[target_feature(enable = "neon")]
21777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21778#[cfg_attr(test, assert_instr(nop))]
21779pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
21780    unsafe { transmute(a) }
21781}
21782#[doc = "Vector reinterpret cast operation"]
21783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
21784#[inline(always)]
21785#[cfg(target_endian = "big")]
21786#[target_feature(enable = "neon")]
21787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21788#[cfg_attr(test, assert_instr(nop))]
21789pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
21790    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21791    unsafe { transmute(a) }
21792}
21793#[doc = "Vector reinterpret cast operation"]
21794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
21795#[inline(always)]
21796#[cfg(target_endian = "little")]
21797#[target_feature(enable = "neon")]
21798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21799#[cfg_attr(test, assert_instr(nop))]
21800pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
21801    unsafe { transmute(a) }
21802}
21803#[doc = "Vector reinterpret cast operation"]
21804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
21805#[inline(always)]
21806#[cfg(target_endian = "big")]
21807#[target_feature(enable = "neon")]
21808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21809#[cfg_attr(test, assert_instr(nop))]
21810pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
21811    let a: poly8x16_t =
21812        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21813    unsafe {
21814        let ret_val: float64x2_t = transmute(a);
21815        simd_shuffle!(ret_val, ret_val, [1, 0])
21816    }
21817}
21818#[doc = "Vector reinterpret cast operation"]
21819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
21820#[inline(always)]
21821#[cfg(target_endian = "little")]
21822#[target_feature(enable = "neon")]
21823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21824#[cfg_attr(test, assert_instr(nop))]
21825pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
21826    unsafe { transmute(a) }
21827}
21828#[doc = "Vector reinterpret cast operation"]
21829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
21830#[inline(always)]
21831#[cfg(target_endian = "big")]
21832#[target_feature(enable = "neon")]
21833#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21834#[cfg_attr(test, assert_instr(nop))]
21835pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
21836    let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21837    unsafe { transmute(a) }
21838}
21839#[doc = "Vector reinterpret cast operation"]
21840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
21841#[inline(always)]
21842#[cfg(target_endian = "little")]
21843#[target_feature(enable = "neon")]
21844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21845#[cfg_attr(test, assert_instr(nop))]
21846pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
21847    unsafe { transmute(a) }
21848}
21849#[doc = "Vector reinterpret cast operation"]
21850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
21851#[inline(always)]
21852#[cfg(target_endian = "big")]
21853#[target_feature(enable = "neon")]
21854#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21855#[cfg_attr(test, assert_instr(nop))]
21856pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
21857    let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21858    unsafe {
21859        let ret_val: float64x2_t = transmute(a);
21860        simd_shuffle!(ret_val, ret_val, [1, 0])
21861    }
21862}
21863#[doc = "Vector reinterpret cast operation"]
21864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
21865#[inline(always)]
21866#[cfg(target_endian = "little")]
21867#[target_feature(enable = "neon")]
21868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21869#[cfg_attr(test, assert_instr(nop))]
21870pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
21871    unsafe { transmute(a) }
21872}
21873#[doc = "Vector reinterpret cast operation"]
21874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
21875#[inline(always)]
21876#[cfg(target_endian = "big")]
21877#[target_feature(enable = "neon")]
21878#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21879#[cfg_attr(test, assert_instr(nop))]
21880pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
21881    unsafe {
21882        let ret_val: float32x2_t = transmute(a);
21883        simd_shuffle!(ret_val, ret_val, [1, 0])
21884    }
21885}
21886#[doc = "Vector reinterpret cast operation"]
21887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"]
21888#[inline(always)]
21889#[target_feature(enable = "neon")]
21890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21891#[cfg_attr(test, assert_instr(nop))]
21892pub fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t {
21893    unsafe { transmute(a) }
21894}
21895#[doc = "Vector reinterpret cast operation"]
21896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"]
21897#[inline(always)]
21898#[target_feature(enable = "neon")]
21899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21900#[cfg_attr(test, assert_instr(nop))]
21901pub fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t {
21902    unsafe { transmute(a) }
21903}
21904#[doc = "Vector reinterpret cast operation"]
21905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"]
21906#[inline(always)]
21907#[target_feature(enable = "neon")]
21908#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21909#[cfg_attr(test, assert_instr(nop))]
21910pub fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t {
21911    unsafe { transmute(a) }
21912}
21913#[doc = "Vector reinterpret cast operation"]
21914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
21915#[inline(always)]
21916#[cfg(target_endian = "little")]
21917#[target_feature(enable = "neon")]
21918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21919#[cfg_attr(test, assert_instr(nop))]
21920pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
21921    unsafe { transmute(a) }
21922}
21923#[doc = "Vector reinterpret cast operation"]
21924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
21925#[inline(always)]
21926#[cfg(target_endian = "big")]
21927#[target_feature(enable = "neon")]
21928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21929#[cfg_attr(test, assert_instr(nop))]
21930pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
21931    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21932    unsafe {
21933        let ret_val: float32x4_t = transmute(a);
21934        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21935    }
21936}
21937#[doc = "Vector reinterpret cast operation"]
21938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
21939#[inline(always)]
21940#[cfg(target_endian = "little")]
21941#[target_feature(enable = "neon")]
21942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21943#[cfg_attr(test, assert_instr(nop))]
21944pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
21945    unsafe { transmute(a) }
21946}
21947#[doc = "Vector reinterpret cast operation"]
21948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
21949#[inline(always)]
21950#[cfg(target_endian = "big")]
21951#[target_feature(enable = "neon")]
21952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21953#[cfg_attr(test, assert_instr(nop))]
21954pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
21955    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21956    unsafe {
21957        let ret_val: float64x2_t = transmute(a);
21958        simd_shuffle!(ret_val, ret_val, [1, 0])
21959    }
21960}
21961#[doc = "Vector reinterpret cast operation"]
21962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
21963#[inline(always)]
21964#[cfg(target_endian = "little")]
21965#[target_feature(enable = "neon")]
21966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21967#[cfg_attr(test, assert_instr(nop))]
21968pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
21969    unsafe { transmute(a) }
21970}
21971#[doc = "Vector reinterpret cast operation"]
21972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
21973#[inline(always)]
21974#[cfg(target_endian = "big")]
21975#[target_feature(enable = "neon")]
21976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21977#[cfg_attr(test, assert_instr(nop))]
21978pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
21979    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21980    unsafe {
21981        let ret_val: int64x2_t = transmute(a);
21982        simd_shuffle!(ret_val, ret_val, [1, 0])
21983    }
21984}
21985#[doc = "Vector reinterpret cast operation"]
21986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
21987#[inline(always)]
21988#[cfg(target_endian = "little")]
21989#[target_feature(enable = "neon")]
21990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21991#[cfg_attr(test, assert_instr(nop))]
21992pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
21993    unsafe { transmute(a) }
21994}
21995#[doc = "Vector reinterpret cast operation"]
21996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
21997#[inline(always)]
21998#[cfg(target_endian = "big")]
21999#[target_feature(enable = "neon")]
22000#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22001#[cfg_attr(test, assert_instr(nop))]
22002pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
22003    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22004    unsafe {
22005        let ret_val: uint64x2_t = transmute(a);
22006        simd_shuffle!(ret_val, ret_val, [1, 0])
22007    }
22008}
22009#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
22010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"]
22011#[inline(always)]
22012#[target_feature(enable = "neon,frintts")]
22013#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22014#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
22015pub fn vrnd32x_f32(a: float32x2_t) -> float32x2_t {
22016    unsafe extern "unadjusted" {
22017        #[cfg_attr(
22018            any(target_arch = "aarch64", target_arch = "arm64ec"),
22019            link_name = "llvm.aarch64.neon.frint32x.v2f32"
22020        )]
22021        fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t;
22022    }
22023    unsafe { _vrnd32x_f32(a) }
22024}
22025#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
22026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"]
22027#[inline(always)]
22028#[target_feature(enable = "neon,frintts")]
22029#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22030#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
22031pub fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t {
22032    unsafe extern "unadjusted" {
22033        #[cfg_attr(
22034            any(target_arch = "aarch64", target_arch = "arm64ec"),
22035            link_name = "llvm.aarch64.neon.frint32x.v4f32"
22036        )]
22037        fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t;
22038    }
22039    unsafe { _vrnd32xq_f32(a) }
22040}
22041#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
22042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"]
22043#[inline(always)]
22044#[target_feature(enable = "neon,frintts")]
22045#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22046#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
22047pub fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t {
22048    unsafe extern "unadjusted" {
22049        #[cfg_attr(
22050            any(target_arch = "aarch64", target_arch = "arm64ec"),
22051            link_name = "llvm.aarch64.neon.frint32x.v2f64"
22052        )]
22053        fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t;
22054    }
22055    unsafe { _vrnd32xq_f64(a) }
22056}
22057#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
22058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"]
22059#[inline(always)]
22060#[target_feature(enable = "neon,frintts")]
22061#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22062#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
22063pub fn vrnd32x_f64(a: float64x1_t) -> float64x1_t {
22064    unsafe extern "unadjusted" {
22065        #[cfg_attr(
22066            any(target_arch = "aarch64", target_arch = "arm64ec"),
22067            link_name = "llvm.aarch64.frint32x.f64"
22068        )]
22069        fn _vrnd32x_f64(a: f64) -> f64;
22070    }
22071    unsafe { transmute(_vrnd32x_f64(simd_extract!(a, 0))) }
22072}
22073#[doc = "Floating-point round to 32-bit integer toward zero"]
22074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"]
22075#[inline(always)]
22076#[target_feature(enable = "neon,frintts")]
22077#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22078#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
22079pub fn vrnd32z_f32(a: float32x2_t) -> float32x2_t {
22080    unsafe extern "unadjusted" {
22081        #[cfg_attr(
22082            any(target_arch = "aarch64", target_arch = "arm64ec"),
22083            link_name = "llvm.aarch64.neon.frint32z.v2f32"
22084        )]
22085        fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t;
22086    }
22087    unsafe { _vrnd32z_f32(a) }
22088}
22089#[doc = "Floating-point round to 32-bit integer toward zero"]
22090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"]
22091#[inline(always)]
22092#[target_feature(enable = "neon,frintts")]
22093#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22094#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
22095pub fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t {
22096    unsafe extern "unadjusted" {
22097        #[cfg_attr(
22098            any(target_arch = "aarch64", target_arch = "arm64ec"),
22099            link_name = "llvm.aarch64.neon.frint32z.v4f32"
22100        )]
22101        fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t;
22102    }
22103    unsafe { _vrnd32zq_f32(a) }
22104}
22105#[doc = "Floating-point round to 32-bit integer toward zero"]
22106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"]
22107#[inline(always)]
22108#[target_feature(enable = "neon,frintts")]
22109#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22110#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
22111pub fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t {
22112    unsafe extern "unadjusted" {
22113        #[cfg_attr(
22114            any(target_arch = "aarch64", target_arch = "arm64ec"),
22115            link_name = "llvm.aarch64.neon.frint32z.v2f64"
22116        )]
22117        fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t;
22118    }
22119    unsafe { _vrnd32zq_f64(a) }
22120}
22121#[doc = "Floating-point round to 32-bit integer toward zero"]
22122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"]
22123#[inline(always)]
22124#[target_feature(enable = "neon,frintts")]
22125#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22126#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
22127pub fn vrnd32z_f64(a: float64x1_t) -> float64x1_t {
22128    unsafe extern "unadjusted" {
22129        #[cfg_attr(
22130            any(target_arch = "aarch64", target_arch = "arm64ec"),
22131            link_name = "llvm.aarch64.frint32z.f64"
22132        )]
22133        fn _vrnd32z_f64(a: f64) -> f64;
22134    }
22135    unsafe { transmute(_vrnd32z_f64(simd_extract!(a, 0))) }
22136}
22137#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
22138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"]
22139#[inline(always)]
22140#[target_feature(enable = "neon,frintts")]
22141#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22142#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
22143pub fn vrnd64x_f32(a: float32x2_t) -> float32x2_t {
22144    unsafe extern "unadjusted" {
22145        #[cfg_attr(
22146            any(target_arch = "aarch64", target_arch = "arm64ec"),
22147            link_name = "llvm.aarch64.neon.frint64x.v2f32"
22148        )]
22149        fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t;
22150    }
22151    unsafe { _vrnd64x_f32(a) }
22152}
22153#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
22154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"]
22155#[inline(always)]
22156#[target_feature(enable = "neon,frintts")]
22157#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22158#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
22159pub fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t {
22160    unsafe extern "unadjusted" {
22161        #[cfg_attr(
22162            any(target_arch = "aarch64", target_arch = "arm64ec"),
22163            link_name = "llvm.aarch64.neon.frint64x.v4f32"
22164        )]
22165        fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t;
22166    }
22167    unsafe { _vrnd64xq_f32(a) }
22168}
22169#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
22170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"]
22171#[inline(always)]
22172#[target_feature(enable = "neon,frintts")]
22173#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22174#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
22175pub fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t {
22176    unsafe extern "unadjusted" {
22177        #[cfg_attr(
22178            any(target_arch = "aarch64", target_arch = "arm64ec"),
22179            link_name = "llvm.aarch64.neon.frint64x.v2f64"
22180        )]
22181        fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t;
22182    }
22183    unsafe { _vrnd64xq_f64(a) }
22184}
22185#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
22186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"]
22187#[inline(always)]
22188#[target_feature(enable = "neon,frintts")]
22189#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22190#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
22191pub fn vrnd64x_f64(a: float64x1_t) -> float64x1_t {
22192    unsafe extern "unadjusted" {
22193        #[cfg_attr(
22194            any(target_arch = "aarch64", target_arch = "arm64ec"),
22195            link_name = "llvm.aarch64.frint64x.f64"
22196        )]
22197        fn _vrnd64x_f64(a: f64) -> f64;
22198    }
22199    unsafe { transmute(_vrnd64x_f64(simd_extract!(a, 0))) }
22200}
22201#[doc = "Floating-point round to 64-bit integer toward zero"]
22202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"]
22203#[inline(always)]
22204#[target_feature(enable = "neon,frintts")]
22205#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22206#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
22207pub fn vrnd64z_f32(a: float32x2_t) -> float32x2_t {
22208    unsafe extern "unadjusted" {
22209        #[cfg_attr(
22210            any(target_arch = "aarch64", target_arch = "arm64ec"),
22211            link_name = "llvm.aarch64.neon.frint64z.v2f32"
22212        )]
22213        fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t;
22214    }
22215    unsafe { _vrnd64z_f32(a) }
22216}
22217#[doc = "Floating-point round to 64-bit integer toward zero"]
22218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"]
22219#[inline(always)]
22220#[target_feature(enable = "neon,frintts")]
22221#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22222#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
22223pub fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t {
22224    unsafe extern "unadjusted" {
22225        #[cfg_attr(
22226            any(target_arch = "aarch64", target_arch = "arm64ec"),
22227            link_name = "llvm.aarch64.neon.frint64z.v4f32"
22228        )]
22229        fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t;
22230    }
22231    unsafe { _vrnd64zq_f32(a) }
22232}
22233#[doc = "Floating-point round to 64-bit integer toward zero"]
22234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"]
22235#[inline(always)]
22236#[target_feature(enable = "neon,frintts")]
22237#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22238#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
22239pub fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t {
22240    unsafe extern "unadjusted" {
22241        #[cfg_attr(
22242            any(target_arch = "aarch64", target_arch = "arm64ec"),
22243            link_name = "llvm.aarch64.neon.frint64z.v2f64"
22244        )]
22245        fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t;
22246    }
22247    unsafe { _vrnd64zq_f64(a) }
22248}
22249#[doc = "Floating-point round to 64-bit integer toward zero"]
22250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"]
22251#[inline(always)]
22252#[target_feature(enable = "neon,frintts")]
22253#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22254#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
22255pub fn vrnd64z_f64(a: float64x1_t) -> float64x1_t {
22256    unsafe extern "unadjusted" {
22257        #[cfg_attr(
22258            any(target_arch = "aarch64", target_arch = "arm64ec"),
22259            link_name = "llvm.aarch64.frint64z.f64"
22260        )]
22261        fn _vrnd64z_f64(a: f64) -> f64;
22262    }
22263    unsafe { transmute(_vrnd64z_f64(simd_extract!(a, 0))) }
22264}
22265#[doc = "Floating-point round to integral, toward zero"]
22266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f16)"]
22267#[inline(always)]
22268#[target_feature(enable = "neon,fp16")]
22269#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22270#[cfg(not(target_arch = "arm64ec"))]
22271#[cfg_attr(test, assert_instr(frintz))]
22272pub fn vrnd_f16(a: float16x4_t) -> float16x4_t {
22273    unsafe { simd_trunc(a) }
22274}
22275#[doc = "Floating-point round to integral, toward zero"]
22276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f16)"]
22277#[inline(always)]
22278#[target_feature(enable = "neon,fp16")]
22279#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22280#[cfg(not(target_arch = "arm64ec"))]
22281#[cfg_attr(test, assert_instr(frintz))]
22282pub fn vrndq_f16(a: float16x8_t) -> float16x8_t {
22283    unsafe { simd_trunc(a) }
22284}
22285#[doc = "Floating-point round to integral, toward zero"]
22286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"]
22287#[inline(always)]
22288#[target_feature(enable = "neon")]
22289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22290#[cfg_attr(test, assert_instr(frintz))]
22291pub fn vrnd_f32(a: float32x2_t) -> float32x2_t {
22292    unsafe { simd_trunc(a) }
22293}
22294#[doc = "Floating-point round to integral, toward zero"]
22295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"]
22296#[inline(always)]
22297#[target_feature(enable = "neon")]
22298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22299#[cfg_attr(test, assert_instr(frintz))]
22300pub fn vrndq_f32(a: float32x4_t) -> float32x4_t {
22301    unsafe { simd_trunc(a) }
22302}
22303#[doc = "Floating-point round to integral, toward zero"]
22304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"]
22305#[inline(always)]
22306#[target_feature(enable = "neon")]
22307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22308#[cfg_attr(test, assert_instr(frintz))]
22309pub fn vrnd_f64(a: float64x1_t) -> float64x1_t {
22310    unsafe { simd_trunc(a) }
22311}
22312#[doc = "Floating-point round to integral, toward zero"]
22313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"]
22314#[inline(always)]
22315#[target_feature(enable = "neon")]
22316#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22317#[cfg_attr(test, assert_instr(frintz))]
22318pub fn vrndq_f64(a: float64x2_t) -> float64x2_t {
22319    unsafe { simd_trunc(a) }
22320}
22321#[doc = "Floating-point round to integral, to nearest with ties to away"]
22322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f16)"]
22323#[inline(always)]
22324#[target_feature(enable = "neon,fp16")]
22325#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22326#[cfg(not(target_arch = "arm64ec"))]
22327#[cfg_attr(test, assert_instr(frinta))]
22328pub fn vrnda_f16(a: float16x4_t) -> float16x4_t {
22329    unsafe { simd_round(a) }
22330}
22331#[doc = "Floating-point round to integral, to nearest with ties to away"]
22332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f16)"]
22333#[inline(always)]
22334#[target_feature(enable = "neon,fp16")]
22335#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22336#[cfg(not(target_arch = "arm64ec"))]
22337#[cfg_attr(test, assert_instr(frinta))]
22338pub fn vrndaq_f16(a: float16x8_t) -> float16x8_t {
22339    unsafe { simd_round(a) }
22340}
22341#[doc = "Floating-point round to integral, to nearest with ties to away"]
22342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"]
22343#[inline(always)]
22344#[target_feature(enable = "neon")]
22345#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22346#[cfg_attr(test, assert_instr(frinta))]
22347pub fn vrnda_f32(a: float32x2_t) -> float32x2_t {
22348    unsafe { simd_round(a) }
22349}
22350#[doc = "Floating-point round to integral, to nearest with ties to away"]
22351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"]
22352#[inline(always)]
22353#[target_feature(enable = "neon")]
22354#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22355#[cfg_attr(test, assert_instr(frinta))]
22356pub fn vrndaq_f32(a: float32x4_t) -> float32x4_t {
22357    unsafe { simd_round(a) }
22358}
22359#[doc = "Floating-point round to integral, to nearest with ties to away"]
22360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"]
22361#[inline(always)]
22362#[target_feature(enable = "neon")]
22363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22364#[cfg_attr(test, assert_instr(frinta))]
22365pub fn vrnda_f64(a: float64x1_t) -> float64x1_t {
22366    unsafe { simd_round(a) }
22367}
22368#[doc = "Floating-point round to integral, to nearest with ties to away"]
22369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"]
22370#[inline(always)]
22371#[target_feature(enable = "neon")]
22372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22373#[cfg_attr(test, assert_instr(frinta))]
22374pub fn vrndaq_f64(a: float64x2_t) -> float64x2_t {
22375    unsafe { simd_round(a) }
22376}
22377#[doc = "Floating-point round to integral, to nearest with ties to away"]
22378#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndah_f16)"]
22379#[inline(always)]
22380#[target_feature(enable = "neon,fp16")]
22381#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22382#[cfg(not(target_arch = "arm64ec"))]
22383#[cfg_attr(test, assert_instr(frinta))]
22384pub fn vrndah_f16(a: f16) -> f16 {
22385    roundf16(a)
22386}
22387#[doc = "Floating-point round to integral, to nearest with ties to away"]
22388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndh_f16)"]
22389#[inline(always)]
22390#[target_feature(enable = "neon,fp16")]
22391#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22392#[cfg(not(target_arch = "arm64ec"))]
22393#[cfg_attr(test, assert_instr(frintz))]
22394pub fn vrndh_f16(a: f16) -> f16 {
22395    truncf16(a)
22396}
22397#[doc = "Floating-point round to integral, using current rounding mode"]
22398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f16)"]
22399#[inline(always)]
22400#[target_feature(enable = "neon,fp16")]
22401#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22402#[cfg(not(target_arch = "arm64ec"))]
22403#[cfg_attr(test, assert_instr(frinti))]
22404pub fn vrndi_f16(a: float16x4_t) -> float16x4_t {
22405    unsafe extern "unadjusted" {
22406        #[cfg_attr(
22407            any(target_arch = "aarch64", target_arch = "arm64ec"),
22408            link_name = "llvm.nearbyint.v4f16"
22409        )]
22410        fn _vrndi_f16(a: float16x4_t) -> float16x4_t;
22411    }
22412    unsafe { _vrndi_f16(a) }
22413}
22414#[doc = "Floating-point round to integral, using current rounding mode"]
22415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f16)"]
22416#[inline(always)]
22417#[target_feature(enable = "neon,fp16")]
22418#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22419#[cfg(not(target_arch = "arm64ec"))]
22420#[cfg_attr(test, assert_instr(frinti))]
22421pub fn vrndiq_f16(a: float16x8_t) -> float16x8_t {
22422    unsafe extern "unadjusted" {
22423        #[cfg_attr(
22424            any(target_arch = "aarch64", target_arch = "arm64ec"),
22425            link_name = "llvm.nearbyint.v8f16"
22426        )]
22427        fn _vrndiq_f16(a: float16x8_t) -> float16x8_t;
22428    }
22429    unsafe { _vrndiq_f16(a) }
22430}
22431#[doc = "Floating-point round to integral, using current rounding mode"]
22432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"]
22433#[inline(always)]
22434#[target_feature(enable = "neon")]
22435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22436#[cfg_attr(test, assert_instr(frinti))]
22437pub fn vrndi_f32(a: float32x2_t) -> float32x2_t {
22438    unsafe extern "unadjusted" {
22439        #[cfg_attr(
22440            any(target_arch = "aarch64", target_arch = "arm64ec"),
22441            link_name = "llvm.nearbyint.v2f32"
22442        )]
22443        fn _vrndi_f32(a: float32x2_t) -> float32x2_t;
22444    }
22445    unsafe { _vrndi_f32(a) }
22446}
22447#[doc = "Floating-point round to integral, using current rounding mode"]
22448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"]
22449#[inline(always)]
22450#[target_feature(enable = "neon")]
22451#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22452#[cfg_attr(test, assert_instr(frinti))]
22453pub fn vrndiq_f32(a: float32x4_t) -> float32x4_t {
22454    unsafe extern "unadjusted" {
22455        #[cfg_attr(
22456            any(target_arch = "aarch64", target_arch = "arm64ec"),
22457            link_name = "llvm.nearbyint.v4f32"
22458        )]
22459        fn _vrndiq_f32(a: float32x4_t) -> float32x4_t;
22460    }
22461    unsafe { _vrndiq_f32(a) }
22462}
22463#[doc = "Floating-point round to integral, using current rounding mode"]
22464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"]
22465#[inline(always)]
22466#[target_feature(enable = "neon")]
22467#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22468#[cfg_attr(test, assert_instr(frinti))]
22469pub fn vrndi_f64(a: float64x1_t) -> float64x1_t {
22470    unsafe extern "unadjusted" {
22471        #[cfg_attr(
22472            any(target_arch = "aarch64", target_arch = "arm64ec"),
22473            link_name = "llvm.nearbyint.v1f64"
22474        )]
22475        fn _vrndi_f64(a: float64x1_t) -> float64x1_t;
22476    }
22477    unsafe { _vrndi_f64(a) }
22478}
22479#[doc = "Floating-point round to integral, using current rounding mode"]
22480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"]
22481#[inline(always)]
22482#[target_feature(enable = "neon")]
22483#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22484#[cfg_attr(test, assert_instr(frinti))]
22485pub fn vrndiq_f64(a: float64x2_t) -> float64x2_t {
22486    unsafe extern "unadjusted" {
22487        #[cfg_attr(
22488            any(target_arch = "aarch64", target_arch = "arm64ec"),
22489            link_name = "llvm.nearbyint.v2f64"
22490        )]
22491        fn _vrndiq_f64(a: float64x2_t) -> float64x2_t;
22492    }
22493    unsafe { _vrndiq_f64(a) }
22494}
22495#[doc = "Floating-point round to integral, using current rounding mode"]
22496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndih_f16)"]
22497#[inline(always)]
22498#[target_feature(enable = "neon,fp16")]
22499#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22500#[cfg(not(target_arch = "arm64ec"))]
22501#[cfg_attr(test, assert_instr(frinti))]
22502pub fn vrndih_f16(a: f16) -> f16 {
22503    unsafe extern "unadjusted" {
22504        #[cfg_attr(
22505            any(target_arch = "aarch64", target_arch = "arm64ec"),
22506            link_name = "llvm.nearbyint.f16"
22507        )]
22508        fn _vrndih_f16(a: f16) -> f16;
22509    }
22510    unsafe { _vrndih_f16(a) }
22511}
22512#[doc = "Floating-point round to integral, toward minus infinity"]
22513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f16)"]
22514#[inline(always)]
22515#[target_feature(enable = "neon,fp16")]
22516#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22517#[cfg(not(target_arch = "arm64ec"))]
22518#[cfg_attr(test, assert_instr(frintm))]
22519pub fn vrndm_f16(a: float16x4_t) -> float16x4_t {
22520    unsafe { simd_floor(a) }
22521}
22522#[doc = "Floating-point round to integral, toward minus infinity"]
22523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f16)"]
22524#[inline(always)]
22525#[target_feature(enable = "neon,fp16")]
22526#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22527#[cfg(not(target_arch = "arm64ec"))]
22528#[cfg_attr(test, assert_instr(frintm))]
22529pub fn vrndmq_f16(a: float16x8_t) -> float16x8_t {
22530    unsafe { simd_floor(a) }
22531}
22532#[doc = "Floating-point round to integral, toward minus infinity"]
22533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"]
22534#[inline(always)]
22535#[target_feature(enable = "neon")]
22536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22537#[cfg_attr(test, assert_instr(frintm))]
22538pub fn vrndm_f32(a: float32x2_t) -> float32x2_t {
22539    unsafe { simd_floor(a) }
22540}
22541#[doc = "Floating-point round to integral, toward minus infinity"]
22542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"]
22543#[inline(always)]
22544#[target_feature(enable = "neon")]
22545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22546#[cfg_attr(test, assert_instr(frintm))]
22547pub fn vrndmq_f32(a: float32x4_t) -> float32x4_t {
22548    unsafe { simd_floor(a) }
22549}
22550#[doc = "Floating-point round to integral, toward minus infinity"]
22551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"]
22552#[inline(always)]
22553#[target_feature(enable = "neon")]
22554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22555#[cfg_attr(test, assert_instr(frintm))]
22556pub fn vrndm_f64(a: float64x1_t) -> float64x1_t {
22557    unsafe { simd_floor(a) }
22558}
22559#[doc = "Floating-point round to integral, toward minus infinity"]
22560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"]
22561#[inline(always)]
22562#[target_feature(enable = "neon")]
22563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22564#[cfg_attr(test, assert_instr(frintm))]
22565pub fn vrndmq_f64(a: float64x2_t) -> float64x2_t {
22566    unsafe { simd_floor(a) }
22567}
22568#[doc = "Floating-point round to integral, toward minus infinity"]
22569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmh_f16)"]
22570#[inline(always)]
22571#[target_feature(enable = "neon,fp16")]
22572#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22573#[cfg(not(target_arch = "arm64ec"))]
22574#[cfg_attr(test, assert_instr(frintm))]
22575pub fn vrndmh_f16(a: f16) -> f16 {
22576    floorf16(a)
22577}
22578#[doc = "Floating-point round to integral, to nearest with ties to even"]
22579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"]
22580#[inline(always)]
22581#[target_feature(enable = "neon")]
22582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22583#[cfg_attr(test, assert_instr(frintn))]
22584pub fn vrndn_f64(a: float64x1_t) -> float64x1_t {
22585    unsafe extern "unadjusted" {
22586        #[cfg_attr(
22587            any(target_arch = "aarch64", target_arch = "arm64ec"),
22588            link_name = "llvm.roundeven.v1f64"
22589        )]
22590        fn _vrndn_f64(a: float64x1_t) -> float64x1_t;
22591    }
22592    unsafe { _vrndn_f64(a) }
22593}
22594#[doc = "Floating-point round to integral, to nearest with ties to even"]
22595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"]
22596#[inline(always)]
22597#[target_feature(enable = "neon")]
22598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22599#[cfg_attr(test, assert_instr(frintn))]
22600pub fn vrndnq_f64(a: float64x2_t) -> float64x2_t {
22601    unsafe extern "unadjusted" {
22602        #[cfg_attr(
22603            any(target_arch = "aarch64", target_arch = "arm64ec"),
22604            link_name = "llvm.roundeven.v2f64"
22605        )]
22606        fn _vrndnq_f64(a: float64x2_t) -> float64x2_t;
22607    }
22608    unsafe { _vrndnq_f64(a) }
22609}
22610#[doc = "Floating-point round to integral, toward minus infinity"]
22611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnh_f16)"]
22612#[inline(always)]
22613#[target_feature(enable = "neon,fp16")]
22614#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22615#[cfg(not(target_arch = "arm64ec"))]
22616#[cfg_attr(test, assert_instr(frintn))]
22617pub fn vrndnh_f16(a: f16) -> f16 {
22618    unsafe extern "unadjusted" {
22619        #[cfg_attr(
22620            any(target_arch = "aarch64", target_arch = "arm64ec"),
22621            link_name = "llvm.roundeven.f16"
22622        )]
22623        fn _vrndnh_f16(a: f16) -> f16;
22624    }
22625    unsafe { _vrndnh_f16(a) }
22626}
22627#[doc = "Floating-point round to integral, to nearest with ties to even"]
22628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"]
22629#[inline(always)]
22630#[target_feature(enable = "neon")]
22631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22632#[cfg_attr(test, assert_instr(frintn))]
22633pub fn vrndns_f32(a: f32) -> f32 {
22634    unsafe extern "unadjusted" {
22635        #[cfg_attr(
22636            any(target_arch = "aarch64", target_arch = "arm64ec"),
22637            link_name = "llvm.roundeven.f32"
22638        )]
22639        fn _vrndns_f32(a: f32) -> f32;
22640    }
22641    unsafe { _vrndns_f32(a) }
22642}
22643#[doc = "Floating-point round to integral, toward plus infinity"]
22644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f16)"]
22645#[inline(always)]
22646#[target_feature(enable = "neon,fp16")]
22647#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22648#[cfg(not(target_arch = "arm64ec"))]
22649#[cfg_attr(test, assert_instr(frintp))]
22650pub fn vrndp_f16(a: float16x4_t) -> float16x4_t {
22651    unsafe { simd_ceil(a) }
22652}
22653#[doc = "Floating-point round to integral, toward plus infinity"]
22654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f16)"]
22655#[inline(always)]
22656#[target_feature(enable = "neon,fp16")]
22657#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22658#[cfg(not(target_arch = "arm64ec"))]
22659#[cfg_attr(test, assert_instr(frintp))]
22660pub fn vrndpq_f16(a: float16x8_t) -> float16x8_t {
22661    unsafe { simd_ceil(a) }
22662}
22663#[doc = "Floating-point round to integral, toward plus infinity"]
22664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"]
22665#[inline(always)]
22666#[target_feature(enable = "neon")]
22667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22668#[cfg_attr(test, assert_instr(frintp))]
22669pub fn vrndp_f32(a: float32x2_t) -> float32x2_t {
22670    unsafe { simd_ceil(a) }
22671}
22672#[doc = "Floating-point round to integral, toward plus infinity"]
22673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"]
22674#[inline(always)]
22675#[target_feature(enable = "neon")]
22676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22677#[cfg_attr(test, assert_instr(frintp))]
22678pub fn vrndpq_f32(a: float32x4_t) -> float32x4_t {
22679    unsafe { simd_ceil(a) }
22680}
22681#[doc = "Floating-point round to integral, toward plus infinity"]
22682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"]
22683#[inline(always)]
22684#[target_feature(enable = "neon")]
22685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22686#[cfg_attr(test, assert_instr(frintp))]
22687pub fn vrndp_f64(a: float64x1_t) -> float64x1_t {
22688    unsafe { simd_ceil(a) }
22689}
22690#[doc = "Floating-point round to integral, toward plus infinity"]
22691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"]
22692#[inline(always)]
22693#[target_feature(enable = "neon")]
22694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22695#[cfg_attr(test, assert_instr(frintp))]
22696pub fn vrndpq_f64(a: float64x2_t) -> float64x2_t {
22697    unsafe { simd_ceil(a) }
22698}
22699#[doc = "Floating-point round to integral, toward plus infinity"]
22700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndph_f16)"]
22701#[inline(always)]
22702#[target_feature(enable = "neon,fp16")]
22703#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22704#[cfg(not(target_arch = "arm64ec"))]
22705#[cfg_attr(test, assert_instr(frintp))]
22706pub fn vrndph_f16(a: f16) -> f16 {
22707    ceilf16(a)
22708}
22709#[doc = "Floating-point round to integral exact, using current rounding mode"]
22710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f16)"]
22711#[inline(always)]
22712#[target_feature(enable = "neon,fp16")]
22713#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22714#[cfg(not(target_arch = "arm64ec"))]
22715#[cfg_attr(test, assert_instr(frintx))]
22716pub fn vrndx_f16(a: float16x4_t) -> float16x4_t {
22717    unsafe { simd_round_ties_even(a) }
22718}
22719#[doc = "Floating-point round to integral exact, using current rounding mode"]
22720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f16)"]
22721#[inline(always)]
22722#[target_feature(enable = "neon,fp16")]
22723#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22724#[cfg(not(target_arch = "arm64ec"))]
22725#[cfg_attr(test, assert_instr(frintx))]
22726pub fn vrndxq_f16(a: float16x8_t) -> float16x8_t {
22727    unsafe { simd_round_ties_even(a) }
22728}
22729#[doc = "Floating-point round to integral exact, using current rounding mode"]
22730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"]
22731#[inline(always)]
22732#[target_feature(enable = "neon")]
22733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22734#[cfg_attr(test, assert_instr(frintx))]
22735pub fn vrndx_f32(a: float32x2_t) -> float32x2_t {
22736    unsafe { simd_round_ties_even(a) }
22737}
22738#[doc = "Floating-point round to integral exact, using current rounding mode"]
22739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"]
22740#[inline(always)]
22741#[target_feature(enable = "neon")]
22742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22743#[cfg_attr(test, assert_instr(frintx))]
22744pub fn vrndxq_f32(a: float32x4_t) -> float32x4_t {
22745    unsafe { simd_round_ties_even(a) }
22746}
22747#[doc = "Floating-point round to integral exact, using current rounding mode"]
22748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"]
22749#[inline(always)]
22750#[target_feature(enable = "neon")]
22751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22752#[cfg_attr(test, assert_instr(frintx))]
22753pub fn vrndx_f64(a: float64x1_t) -> float64x1_t {
22754    unsafe { simd_round_ties_even(a) }
22755}
22756#[doc = "Floating-point round to integral exact, using current rounding mode"]
22757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"]
22758#[inline(always)]
22759#[target_feature(enable = "neon")]
22760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22761#[cfg_attr(test, assert_instr(frintx))]
22762pub fn vrndxq_f64(a: float64x2_t) -> float64x2_t {
22763    unsafe { simd_round_ties_even(a) }
22764}
22765#[doc = "Floating-point round to integral, using current rounding mode"]
22766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxh_f16)"]
22767#[inline(always)]
22768#[target_feature(enable = "neon,fp16")]
22769#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22770#[cfg(not(target_arch = "arm64ec"))]
22771#[cfg_attr(test, assert_instr(frintx))]
22772pub fn vrndxh_f16(a: f16) -> f16 {
22773    round_ties_even_f16(a)
22774}
22775#[doc = "Signed rounding shift left"]
22776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"]
22777#[inline(always)]
22778#[target_feature(enable = "neon")]
22779#[cfg_attr(test, assert_instr(srshl))]
22780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22781pub fn vrshld_s64(a: i64, b: i64) -> i64 {
22782    unsafe extern "unadjusted" {
22783        #[cfg_attr(
22784            any(target_arch = "aarch64", target_arch = "arm64ec"),
22785            link_name = "llvm.aarch64.neon.srshl.i64"
22786        )]
22787        fn _vrshld_s64(a: i64, b: i64) -> i64;
22788    }
22789    unsafe { _vrshld_s64(a, b) }
22790}
22791#[doc = "Unsigned rounding shift left"]
22792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"]
22793#[inline(always)]
22794#[target_feature(enable = "neon")]
22795#[cfg_attr(test, assert_instr(urshl))]
22796#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22797pub fn vrshld_u64(a: u64, b: i64) -> u64 {
22798    unsafe extern "unadjusted" {
22799        #[cfg_attr(
22800            any(target_arch = "aarch64", target_arch = "arm64ec"),
22801            link_name = "llvm.aarch64.neon.urshl.i64"
22802        )]
22803        fn _vrshld_u64(a: u64, b: i64) -> u64;
22804    }
22805    unsafe { _vrshld_u64(a, b) }
22806}
22807#[doc = "Signed rounding shift right"]
22808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"]
22809#[inline(always)]
22810#[target_feature(enable = "neon")]
22811#[cfg_attr(test, assert_instr(srshr, N = 2))]
22812#[rustc_legacy_const_generics(1)]
22813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22814pub fn vrshrd_n_s64<const N: i32>(a: i64) -> i64 {
22815    static_assert!(N >= 1 && N <= 64);
22816    vrshld_s64(a, -N as i64)
22817}
22818#[doc = "Unsigned rounding shift right"]
22819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"]
22820#[inline(always)]
22821#[target_feature(enable = "neon")]
22822#[cfg_attr(test, assert_instr(urshr, N = 2))]
22823#[rustc_legacy_const_generics(1)]
22824#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22825pub fn vrshrd_n_u64<const N: i32>(a: u64) -> u64 {
22826    static_assert!(N >= 1 && N <= 64);
22827    vrshld_u64(a, -N as i64)
22828}
22829#[doc = "Rounding shift right narrow"]
22830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"]
22831#[inline(always)]
22832#[target_feature(enable = "neon")]
22833#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22834#[rustc_legacy_const_generics(2)]
22835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22836pub fn vrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
22837    static_assert!(N >= 1 && N <= 8);
22838    unsafe {
22839        simd_shuffle!(
22840            a,
22841            vrshrn_n_s16::<N>(b),
22842            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
22843        )
22844    }
22845}
22846#[doc = "Rounding shift right narrow"]
22847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"]
22848#[inline(always)]
22849#[target_feature(enable = "neon")]
22850#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22851#[rustc_legacy_const_generics(2)]
22852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22853pub fn vrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
22854    static_assert!(N >= 1 && N <= 16);
22855    unsafe { simd_shuffle!(a, vrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
22856}
22857#[doc = "Rounding shift right narrow"]
22858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"]
22859#[inline(always)]
22860#[target_feature(enable = "neon")]
22861#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22862#[rustc_legacy_const_generics(2)]
22863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22864pub fn vrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
22865    static_assert!(N >= 1 && N <= 32);
22866    unsafe { simd_shuffle!(a, vrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
22867}
22868#[doc = "Rounding shift right narrow"]
22869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"]
22870#[inline(always)]
22871#[target_feature(enable = "neon")]
22872#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22873#[rustc_legacy_const_generics(2)]
22874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22875pub fn vrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
22876    static_assert!(N >= 1 && N <= 8);
22877    unsafe {
22878        simd_shuffle!(
22879            a,
22880            vrshrn_n_u16::<N>(b),
22881            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
22882        )
22883    }
22884}
22885#[doc = "Rounding shift right narrow"]
22886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"]
22887#[inline(always)]
22888#[target_feature(enable = "neon")]
22889#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22890#[rustc_legacy_const_generics(2)]
22891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22892pub fn vrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
22893    static_assert!(N >= 1 && N <= 16);
22894    unsafe { simd_shuffle!(a, vrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
22895}
22896#[doc = "Rounding shift right narrow"]
22897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"]
22898#[inline(always)]
22899#[target_feature(enable = "neon")]
22900#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22901#[rustc_legacy_const_generics(2)]
22902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22903pub fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
22904    static_assert!(N >= 1 && N <= 32);
22905    unsafe { simd_shuffle!(a, vrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
22906}
22907#[doc = "Reciprocal square-root estimate."]
22908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"]
22909#[inline(always)]
22910#[target_feature(enable = "neon")]
22911#[cfg_attr(test, assert_instr(frsqrte))]
22912#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22913pub fn vrsqrte_f64(a: float64x1_t) -> float64x1_t {
22914    unsafe extern "unadjusted" {
22915        #[cfg_attr(
22916            any(target_arch = "aarch64", target_arch = "arm64ec"),
22917            link_name = "llvm.aarch64.neon.frsqrte.v1f64"
22918        )]
22919        fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t;
22920    }
22921    unsafe { _vrsqrte_f64(a) }
22922}
22923#[doc = "Reciprocal square-root estimate."]
22924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"]
22925#[inline(always)]
22926#[target_feature(enable = "neon")]
22927#[cfg_attr(test, assert_instr(frsqrte))]
22928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22929pub fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t {
22930    unsafe extern "unadjusted" {
22931        #[cfg_attr(
22932            any(target_arch = "aarch64", target_arch = "arm64ec"),
22933            link_name = "llvm.aarch64.neon.frsqrte.v2f64"
22934        )]
22935        fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t;
22936    }
22937    unsafe { _vrsqrteq_f64(a) }
22938}
22939#[doc = "Reciprocal square-root estimate."]
22940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"]
22941#[inline(always)]
22942#[target_feature(enable = "neon")]
22943#[cfg_attr(test, assert_instr(frsqrte))]
22944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22945pub fn vrsqrted_f64(a: f64) -> f64 {
22946    unsafe extern "unadjusted" {
22947        #[cfg_attr(
22948            any(target_arch = "aarch64", target_arch = "arm64ec"),
22949            link_name = "llvm.aarch64.neon.frsqrte.f64"
22950        )]
22951        fn _vrsqrted_f64(a: f64) -> f64;
22952    }
22953    unsafe { _vrsqrted_f64(a) }
22954}
22955#[doc = "Reciprocal square-root estimate."]
22956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"]
22957#[inline(always)]
22958#[target_feature(enable = "neon")]
22959#[cfg_attr(test, assert_instr(frsqrte))]
22960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22961pub fn vrsqrtes_f32(a: f32) -> f32 {
22962    unsafe extern "unadjusted" {
22963        #[cfg_attr(
22964            any(target_arch = "aarch64", target_arch = "arm64ec"),
22965            link_name = "llvm.aarch64.neon.frsqrte.f32"
22966        )]
22967        fn _vrsqrtes_f32(a: f32) -> f32;
22968    }
22969    unsafe { _vrsqrtes_f32(a) }
22970}
22971#[doc = "Reciprocal square-root estimate."]
22972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteh_f16)"]
22973#[inline(always)]
22974#[cfg_attr(test, assert_instr(frsqrte))]
22975#[target_feature(enable = "neon,fp16")]
22976#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22977#[cfg(not(target_arch = "arm64ec"))]
22978pub fn vrsqrteh_f16(a: f16) -> f16 {
22979    unsafe extern "unadjusted" {
22980        #[cfg_attr(
22981            any(target_arch = "aarch64", target_arch = "arm64ec"),
22982            link_name = "llvm.aarch64.neon.frsqrte.f16"
22983        )]
22984        fn _vrsqrteh_f16(a: f16) -> f16;
22985    }
22986    unsafe { _vrsqrteh_f16(a) }
22987}
22988#[doc = "Floating-point reciprocal square root step"]
22989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"]
22990#[inline(always)]
22991#[target_feature(enable = "neon")]
22992#[cfg_attr(test, assert_instr(frsqrts))]
22993#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22994pub fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
22995    unsafe extern "unadjusted" {
22996        #[cfg_attr(
22997            any(target_arch = "aarch64", target_arch = "arm64ec"),
22998            link_name = "llvm.aarch64.neon.frsqrts.v1f64"
22999        )]
23000        fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
23001    }
23002    unsafe { _vrsqrts_f64(a, b) }
23003}
23004#[doc = "Floating-point reciprocal square root step"]
23005#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"]
23006#[inline(always)]
23007#[target_feature(enable = "neon")]
23008#[cfg_attr(test, assert_instr(frsqrts))]
23009#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23010pub fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
23011    unsafe extern "unadjusted" {
23012        #[cfg_attr(
23013            any(target_arch = "aarch64", target_arch = "arm64ec"),
23014            link_name = "llvm.aarch64.neon.frsqrts.v2f64"
23015        )]
23016        fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
23017    }
23018    unsafe { _vrsqrtsq_f64(a, b) }
23019}
23020#[doc = "Floating-point reciprocal square root step"]
23021#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"]
23022#[inline(always)]
23023#[target_feature(enable = "neon")]
23024#[cfg_attr(test, assert_instr(frsqrts))]
23025#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23026pub fn vrsqrtsd_f64(a: f64, b: f64) -> f64 {
23027    unsafe extern "unadjusted" {
23028        #[cfg_attr(
23029            any(target_arch = "aarch64", target_arch = "arm64ec"),
23030            link_name = "llvm.aarch64.neon.frsqrts.f64"
23031        )]
23032        fn _vrsqrtsd_f64(a: f64, b: f64) -> f64;
23033    }
23034    unsafe { _vrsqrtsd_f64(a, b) }
23035}
23036#[doc = "Floating-point reciprocal square root step"]
23037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"]
23038#[inline(always)]
23039#[target_feature(enable = "neon")]
23040#[cfg_attr(test, assert_instr(frsqrts))]
23041#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23042pub fn vrsqrtss_f32(a: f32, b: f32) -> f32 {
23043    unsafe extern "unadjusted" {
23044        #[cfg_attr(
23045            any(target_arch = "aarch64", target_arch = "arm64ec"),
23046            link_name = "llvm.aarch64.neon.frsqrts.f32"
23047        )]
23048        fn _vrsqrtss_f32(a: f32, b: f32) -> f32;
23049    }
23050    unsafe { _vrsqrtss_f32(a, b) }
23051}
23052#[doc = "Floating-point reciprocal square root step"]
23053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsh_f16)"]
23054#[inline(always)]
23055#[target_feature(enable = "neon,fp16")]
23056#[cfg_attr(test, assert_instr(frsqrts))]
23057#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23058#[cfg(not(target_arch = "arm64ec"))]
23059pub fn vrsqrtsh_f16(a: f16, b: f16) -> f16 {
23060    unsafe extern "unadjusted" {
23061        #[cfg_attr(
23062            any(target_arch = "aarch64", target_arch = "arm64ec"),
23063            link_name = "llvm.aarch64.neon.frsqrts.f16"
23064        )]
23065        fn _vrsqrtsh_f16(a: f16, b: f16) -> f16;
23066    }
23067    unsafe { _vrsqrtsh_f16(a, b) }
23068}
23069#[doc = "Signed rounding shift right and accumulate."]
23070#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"]
23071#[inline(always)]
23072#[target_feature(enable = "neon")]
23073#[cfg_attr(test, assert_instr(srshr, N = 2))]
23074#[rustc_legacy_const_generics(2)]
23075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23076pub fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
23077    static_assert!(N >= 1 && N <= 64);
23078    let b: i64 = vrshrd_n_s64::<N>(b);
23079    a.wrapping_add(b)
23080}
23081#[doc = "Unsigned rounding shift right and accumulate."]
23082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"]
23083#[inline(always)]
23084#[target_feature(enable = "neon")]
23085#[cfg_attr(test, assert_instr(urshr, N = 2))]
23086#[rustc_legacy_const_generics(2)]
23087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23088pub fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
23089    static_assert!(N >= 1 && N <= 64);
23090    let b: u64 = vrshrd_n_u64::<N>(b);
23091    a.wrapping_add(b)
23092}
23093#[doc = "Rounding subtract returning high narrow"]
23094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
23095#[inline(always)]
23096#[target_feature(enable = "neon")]
23097#[cfg(target_endian = "little")]
23098#[cfg_attr(test, assert_instr(rsubhn2))]
23099#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23100pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
23101    let x: int8x8_t = vrsubhn_s16(b, c);
23102    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
23103}
23104#[doc = "Rounding subtract returning high narrow"]
23105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
23106#[inline(always)]
23107#[target_feature(enable = "neon")]
23108#[cfg(target_endian = "little")]
23109#[cfg_attr(test, assert_instr(rsubhn2))]
23110#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23111pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
23112    let x: int16x4_t = vrsubhn_s32(b, c);
23113    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
23114}
23115#[doc = "Rounding subtract returning high narrow"]
23116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
23117#[inline(always)]
23118#[target_feature(enable = "neon")]
23119#[cfg(target_endian = "little")]
23120#[cfg_attr(test, assert_instr(rsubhn2))]
23121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23122pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
23123    let x: int32x2_t = vrsubhn_s64(b, c);
23124    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
23125}
23126#[doc = "Rounding subtract returning high narrow"]
23127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
23128#[inline(always)]
23129#[target_feature(enable = "neon")]
23130#[cfg(target_endian = "little")]
23131#[cfg_attr(test, assert_instr(rsubhn2))]
23132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23133pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
23134    let x: uint8x8_t = vrsubhn_u16(b, c);
23135    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
23136}
23137#[doc = "Rounding subtract returning high narrow"]
23138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
23139#[inline(always)]
23140#[target_feature(enable = "neon")]
23141#[cfg(target_endian = "little")]
23142#[cfg_attr(test, assert_instr(rsubhn2))]
23143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23144pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
23145    let x: uint16x4_t = vrsubhn_u32(b, c);
23146    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
23147}
23148#[doc = "Rounding subtract returning high narrow"]
23149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
23150#[inline(always)]
23151#[target_feature(enable = "neon")]
23152#[cfg(target_endian = "little")]
23153#[cfg_attr(test, assert_instr(rsubhn2))]
23154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23155pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
23156    let x: uint32x2_t = vrsubhn_u64(b, c);
23157    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
23158}
23159#[doc = "Rounding subtract returning high narrow"]
23160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
23161#[inline(always)]
23162#[target_feature(enable = "neon")]
23163#[cfg(target_endian = "big")]
23164#[cfg_attr(test, assert_instr(rsubhn))]
23165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23166pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
23167    let x: int8x8_t = vrsubhn_s16(b, c);
23168    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
23169}
23170#[doc = "Rounding subtract returning high narrow"]
23171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
23172#[inline(always)]
23173#[target_feature(enable = "neon")]
23174#[cfg(target_endian = "big")]
23175#[cfg_attr(test, assert_instr(rsubhn))]
23176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23177pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
23178    let x: int16x4_t = vrsubhn_s32(b, c);
23179    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
23180}
23181#[doc = "Rounding subtract returning high narrow"]
23182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
23183#[inline(always)]
23184#[target_feature(enable = "neon")]
23185#[cfg(target_endian = "big")]
23186#[cfg_attr(test, assert_instr(rsubhn))]
23187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23188pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
23189    let x: int32x2_t = vrsubhn_s64(b, c);
23190    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
23191}
23192#[doc = "Rounding subtract returning high narrow"]
23193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
23194#[inline(always)]
23195#[target_feature(enable = "neon")]
23196#[cfg(target_endian = "big")]
23197#[cfg_attr(test, assert_instr(rsubhn))]
23198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23199pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
23200    let x: uint8x8_t = vrsubhn_u16(b, c);
23201    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
23202}
23203#[doc = "Rounding subtract returning high narrow"]
23204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
23205#[inline(always)]
23206#[target_feature(enable = "neon")]
23207#[cfg(target_endian = "big")]
23208#[cfg_attr(test, assert_instr(rsubhn))]
23209#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23210pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
23211    let x: uint16x4_t = vrsubhn_u32(b, c);
23212    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
23213}
23214#[doc = "Rounding subtract returning high narrow"]
23215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
23216#[inline(always)]
23217#[target_feature(enable = "neon")]
23218#[cfg(target_endian = "big")]
23219#[cfg_attr(test, assert_instr(rsubhn))]
23220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23221pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
23222    let x: uint32x2_t = vrsubhn_u64(b, c);
23223    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
23224}
23225#[doc = "Multi-vector floating-point adjust exponent"]
23226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f16)"]
23227#[inline(always)]
23228#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
23229#[target_feature(enable = "neon,fp8")]
23230#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
23231pub fn vscale_f16(vn: float16x4_t, vm: int16x4_t) -> float16x4_t {
23232    unsafe extern "unadjusted" {
23233        #[cfg_attr(
23234            any(target_arch = "aarch64", target_arch = "arm64ec"),
23235            link_name = "llvm.aarch64.neon.fp8.fscale.v4f16"
23236        )]
23237        fn _vscale_f16(vn: float16x4_t, vm: int16x4_t) -> float16x4_t;
23238    }
23239    unsafe { _vscale_f16(vn, vm) }
23240}
23241#[doc = "Multi-vector floating-point adjust exponent"]
23242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f16)"]
23243#[inline(always)]
23244#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
23245#[target_feature(enable = "neon,fp8")]
23246#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
23247pub fn vscaleq_f16(vn: float16x8_t, vm: int16x8_t) -> float16x8_t {
23248    unsafe extern "unadjusted" {
23249        #[cfg_attr(
23250            any(target_arch = "aarch64", target_arch = "arm64ec"),
23251            link_name = "llvm.aarch64.neon.fp8.fscale.v8f16"
23252        )]
23253        fn _vscaleq_f16(vn: float16x8_t, vm: int16x8_t) -> float16x8_t;
23254    }
23255    unsafe { _vscaleq_f16(vn, vm) }
23256}
23257#[doc = "Multi-vector floating-point adjust exponent"]
23258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f32)"]
23259#[inline(always)]
23260#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
23261#[target_feature(enable = "neon,fp8")]
23262#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
23263pub fn vscale_f32(vn: float32x2_t, vm: int32x2_t) -> float32x2_t {
23264    unsafe extern "unadjusted" {
23265        #[cfg_attr(
23266            any(target_arch = "aarch64", target_arch = "arm64ec"),
23267            link_name = "llvm.aarch64.neon.fp8.fscale.v2f32"
23268        )]
23269        fn _vscale_f32(vn: float32x2_t, vm: int32x2_t) -> float32x2_t;
23270    }
23271    unsafe { _vscale_f32(vn, vm) }
23272}
23273#[doc = "Multi-vector floating-point adjust exponent"]
23274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f32)"]
23275#[inline(always)]
23276#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
23277#[target_feature(enable = "neon,fp8")]
23278#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
23279pub fn vscaleq_f32(vn: float32x4_t, vm: int32x4_t) -> float32x4_t {
23280    unsafe extern "unadjusted" {
23281        #[cfg_attr(
23282            any(target_arch = "aarch64", target_arch = "arm64ec"),
23283            link_name = "llvm.aarch64.neon.fp8.fscale.v4f32"
23284        )]
23285        fn _vscaleq_f32(vn: float32x4_t, vm: int32x4_t) -> float32x4_t;
23286    }
23287    unsafe { _vscaleq_f32(vn, vm) }
23288}
23289#[doc = "Multi-vector floating-point adjust exponent"]
23290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f64)"]
23291#[inline(always)]
23292#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
23293#[target_feature(enable = "neon,fp8")]
23294#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
23295pub fn vscaleq_f64(vn: float64x2_t, vm: int64x2_t) -> float64x2_t {
23296    unsafe extern "unadjusted" {
23297        #[cfg_attr(
23298            any(target_arch = "aarch64", target_arch = "arm64ec"),
23299            link_name = "llvm.aarch64.neon.fp8.fscale.v2f64"
23300        )]
23301        fn _vscaleq_f64(vn: float64x2_t, vm: int64x2_t) -> float64x2_t;
23302    }
23303    unsafe { _vscaleq_f64(vn, vm) }
23304}
23305#[doc = "Insert vector element from another vector element"]
23306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"]
23307#[inline(always)]
23308#[target_feature(enable = "neon")]
23309#[cfg_attr(test, assert_instr(nop, LANE = 0))]
23310#[rustc_legacy_const_generics(2)]
23311#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23312pub fn vset_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> float64x1_t {
23313    static_assert!(LANE == 0);
23314    unsafe { simd_insert!(b, LANE as u32, a) }
23315}
23316#[doc = "Insert vector element from another vector element"]
23317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"]
23318#[inline(always)]
23319#[target_feature(enable = "neon")]
23320#[cfg_attr(test, assert_instr(nop, LANE = 0))]
23321#[rustc_legacy_const_generics(2)]
23322#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23323pub fn vsetq_lane_f64<const LANE: i32>(a: f64, b: float64x2_t) -> float64x2_t {
23324    static_assert_uimm_bits!(LANE, 1);
23325    unsafe { simd_insert!(b, LANE as u32, a) }
23326}
23327#[doc = "SHA512 hash update part 2"]
23328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"]
23329#[inline(always)]
23330#[target_feature(enable = "neon,sha3")]
23331#[cfg_attr(test, assert_instr(sha512h2))]
23332#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23333pub fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
23334    unsafe extern "unadjusted" {
23335        #[cfg_attr(
23336            any(target_arch = "aarch64", target_arch = "arm64ec"),
23337            link_name = "llvm.aarch64.crypto.sha512h2"
23338        )]
23339        fn _vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
23340    }
23341    unsafe { _vsha512h2q_u64(a, b, c) }
23342}
23343#[doc = "SHA512 hash update part 1"]
23344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"]
23345#[inline(always)]
23346#[target_feature(enable = "neon,sha3")]
23347#[cfg_attr(test, assert_instr(sha512h))]
23348#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23349pub fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
23350    unsafe extern "unadjusted" {
23351        #[cfg_attr(
23352            any(target_arch = "aarch64", target_arch = "arm64ec"),
23353            link_name = "llvm.aarch64.crypto.sha512h"
23354        )]
23355        fn _vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
23356    }
23357    unsafe { _vsha512hq_u64(a, b, c) }
23358}
23359#[doc = "SHA512 schedule update 0"]
23360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"]
23361#[inline(always)]
23362#[target_feature(enable = "neon,sha3")]
23363#[cfg_attr(test, assert_instr(sha512su0))]
23364#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23365pub fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
23366    unsafe extern "unadjusted" {
23367        #[cfg_attr(
23368            any(target_arch = "aarch64", target_arch = "arm64ec"),
23369            link_name = "llvm.aarch64.crypto.sha512su0"
23370        )]
23371        fn _vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
23372    }
23373    unsafe { _vsha512su0q_u64(a, b) }
23374}
23375#[doc = "SHA512 schedule update 1"]
23376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"]
23377#[inline(always)]
23378#[target_feature(enable = "neon,sha3")]
23379#[cfg_attr(test, assert_instr(sha512su1))]
23380#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23381pub fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
23382    unsafe extern "unadjusted" {
23383        #[cfg_attr(
23384            any(target_arch = "aarch64", target_arch = "arm64ec"),
23385            link_name = "llvm.aarch64.crypto.sha512su1"
23386        )]
23387        fn _vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
23388    }
23389    unsafe { _vsha512su1q_u64(a, b, c) }
23390}
23391#[doc = "Signed Shift left"]
23392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"]
23393#[inline(always)]
23394#[target_feature(enable = "neon")]
23395#[cfg_attr(test, assert_instr(sshl))]
23396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23397pub fn vshld_s64(a: i64, b: i64) -> i64 {
23398    unsafe { transmute(vshl_s64(transmute(a), transmute(b))) }
23399}
23400#[doc = "Unsigned Shift left"]
23401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"]
23402#[inline(always)]
23403#[target_feature(enable = "neon")]
23404#[cfg_attr(test, assert_instr(ushl))]
23405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23406pub fn vshld_u64(a: u64, b: i64) -> u64 {
23407    unsafe { transmute(vshl_u64(transmute(a), transmute(b))) }
23408}
23409#[doc = "Signed shift left long"]
23410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"]
23411#[inline(always)]
23412#[target_feature(enable = "neon")]
23413#[cfg_attr(test, assert_instr(sshll2, N = 2))]
23414#[rustc_legacy_const_generics(1)]
23415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23416pub fn vshll_high_n_s8<const N: i32>(a: int8x16_t) -> int16x8_t {
23417    static_assert!(N >= 0 && N <= 8);
23418    unsafe {
23419        let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
23420        vshll_n_s8::<N>(b)
23421    }
23422}
23423#[doc = "Signed shift left long"]
23424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"]
23425#[inline(always)]
23426#[target_feature(enable = "neon")]
23427#[cfg_attr(test, assert_instr(sshll2, N = 2))]
23428#[rustc_legacy_const_generics(1)]
23429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23430pub fn vshll_high_n_s16<const N: i32>(a: int16x8_t) -> int32x4_t {
23431    static_assert!(N >= 0 && N <= 16);
23432    unsafe {
23433        let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
23434        vshll_n_s16::<N>(b)
23435    }
23436}
23437#[doc = "Signed shift left long"]
23438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"]
23439#[inline(always)]
23440#[target_feature(enable = "neon")]
23441#[cfg_attr(test, assert_instr(sshll2, N = 2))]
23442#[rustc_legacy_const_generics(1)]
23443#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23444pub fn vshll_high_n_s32<const N: i32>(a: int32x4_t) -> int64x2_t {
23445    static_assert!(N >= 0 && N <= 32);
23446    unsafe {
23447        let b: int32x2_t = simd_shuffle!(a, a, [2, 3]);
23448        vshll_n_s32::<N>(b)
23449    }
23450}
23451#[doc = "Signed shift left long"]
23452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"]
23453#[inline(always)]
23454#[target_feature(enable = "neon")]
23455#[cfg_attr(test, assert_instr(ushll2, N = 2))]
23456#[rustc_legacy_const_generics(1)]
23457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23458pub fn vshll_high_n_u8<const N: i32>(a: uint8x16_t) -> uint16x8_t {
23459    static_assert!(N >= 0 && N <= 8);
23460    unsafe {
23461        let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
23462        vshll_n_u8::<N>(b)
23463    }
23464}
23465#[doc = "Signed shift left long"]
23466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"]
23467#[inline(always)]
23468#[target_feature(enable = "neon")]
23469#[cfg_attr(test, assert_instr(ushll2, N = 2))]
23470#[rustc_legacy_const_generics(1)]
23471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23472pub fn vshll_high_n_u16<const N: i32>(a: uint16x8_t) -> uint32x4_t {
23473    static_assert!(N >= 0 && N <= 16);
23474    unsafe {
23475        let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
23476        vshll_n_u16::<N>(b)
23477    }
23478}
23479#[doc = "Signed shift left long"]
23480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"]
23481#[inline(always)]
23482#[target_feature(enable = "neon")]
23483#[cfg_attr(test, assert_instr(ushll2, N = 2))]
23484#[rustc_legacy_const_generics(1)]
23485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23486pub fn vshll_high_n_u32<const N: i32>(a: uint32x4_t) -> uint64x2_t {
23487    static_assert!(N >= 0 && N <= 32);
23488    unsafe {
23489        let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
23490        vshll_n_u32::<N>(b)
23491    }
23492}
23493#[doc = "Shift right narrow"]
23494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"]
23495#[inline(always)]
23496#[target_feature(enable = "neon")]
23497#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23498#[rustc_legacy_const_generics(2)]
23499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23500pub fn vshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
23501    static_assert!(N >= 1 && N <= 8);
23502    unsafe {
23503        simd_shuffle!(
23504            a,
23505            vshrn_n_s16::<N>(b),
23506            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23507        )
23508    }
23509}
23510#[doc = "Shift right narrow"]
23511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"]
23512#[inline(always)]
23513#[target_feature(enable = "neon")]
23514#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23515#[rustc_legacy_const_generics(2)]
23516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23517pub fn vshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
23518    static_assert!(N >= 1 && N <= 16);
23519    unsafe { simd_shuffle!(a, vshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
23520}
23521#[doc = "Shift right narrow"]
23522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"]
23523#[inline(always)]
23524#[target_feature(enable = "neon")]
23525#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23526#[rustc_legacy_const_generics(2)]
23527#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23528pub fn vshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
23529    static_assert!(N >= 1 && N <= 32);
23530    unsafe { simd_shuffle!(a, vshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
23531}
23532#[doc = "Shift right narrow"]
23533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"]
23534#[inline(always)]
23535#[target_feature(enable = "neon")]
23536#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23537#[rustc_legacy_const_generics(2)]
23538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23539pub fn vshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
23540    static_assert!(N >= 1 && N <= 8);
23541    unsafe {
23542        simd_shuffle!(
23543            a,
23544            vshrn_n_u16::<N>(b),
23545            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23546        )
23547    }
23548}
23549#[doc = "Shift right narrow"]
23550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"]
23551#[inline(always)]
23552#[target_feature(enable = "neon")]
23553#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23554#[rustc_legacy_const_generics(2)]
23555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23556pub fn vshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
23557    static_assert!(N >= 1 && N <= 16);
23558    unsafe { simd_shuffle!(a, vshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
23559}
23560#[doc = "Shift right narrow"]
23561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"]
23562#[inline(always)]
23563#[target_feature(enable = "neon")]
23564#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23565#[rustc_legacy_const_generics(2)]
23566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23567pub fn vshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
23568    static_assert!(N >= 1 && N <= 32);
23569    unsafe { simd_shuffle!(a, vshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
23570}
23571#[doc = "Shift Left and Insert (immediate)"]
23572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"]
23573#[inline(always)]
23574#[target_feature(enable = "neon")]
23575#[cfg_attr(test, assert_instr(sli, N = 1))]
23576#[rustc_legacy_const_generics(2)]
23577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23578pub fn vsli_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
23579    static_assert_uimm_bits!(N, 3);
23580    unsafe extern "unadjusted" {
23581        #[cfg_attr(
23582            any(target_arch = "aarch64", target_arch = "arm64ec"),
23583            link_name = "llvm.aarch64.neon.vsli.v8i8"
23584        )]
23585        fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
23586    }
23587    unsafe { _vsli_n_s8(a, b, N) }
23588}
23589#[doc = "Shift Left and Insert (immediate)"]
23590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"]
23591#[inline(always)]
23592#[target_feature(enable = "neon")]
23593#[cfg_attr(test, assert_instr(sli, N = 1))]
23594#[rustc_legacy_const_generics(2)]
23595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23596pub fn vsliq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
23597    static_assert_uimm_bits!(N, 3);
23598    unsafe extern "unadjusted" {
23599        #[cfg_attr(
23600            any(target_arch = "aarch64", target_arch = "arm64ec"),
23601            link_name = "llvm.aarch64.neon.vsli.v16i8"
23602        )]
23603        fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
23604    }
23605    unsafe { _vsliq_n_s8(a, b, N) }
23606}
23607#[doc = "Shift Left and Insert (immediate)"]
23608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"]
23609#[inline(always)]
23610#[target_feature(enable = "neon")]
23611#[cfg_attr(test, assert_instr(sli, N = 1))]
23612#[rustc_legacy_const_generics(2)]
23613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23614pub fn vsli_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
23615    static_assert_uimm_bits!(N, 4);
23616    unsafe extern "unadjusted" {
23617        #[cfg_attr(
23618            any(target_arch = "aarch64", target_arch = "arm64ec"),
23619            link_name = "llvm.aarch64.neon.vsli.v4i16"
23620        )]
23621        fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
23622    }
23623    unsafe { _vsli_n_s16(a, b, N) }
23624}
23625#[doc = "Shift Left and Insert (immediate)"]
23626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"]
23627#[inline(always)]
23628#[target_feature(enable = "neon")]
23629#[cfg_attr(test, assert_instr(sli, N = 1))]
23630#[rustc_legacy_const_generics(2)]
23631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23632pub fn vsliq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
23633    static_assert_uimm_bits!(N, 4);
23634    unsafe extern "unadjusted" {
23635        #[cfg_attr(
23636            any(target_arch = "aarch64", target_arch = "arm64ec"),
23637            link_name = "llvm.aarch64.neon.vsli.v8i16"
23638        )]
23639        fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
23640    }
23641    unsafe { _vsliq_n_s16(a, b, N) }
23642}
23643#[doc = "Shift Left and Insert (immediate)"]
23644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"]
23645#[inline(always)]
23646#[target_feature(enable = "neon")]
23647#[cfg_attr(test, assert_instr(sli, N = 1))]
23648#[rustc_legacy_const_generics(2)]
23649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23650pub fn vsli_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
23651    static_assert!(N >= 0 && N <= 31);
23652    unsafe extern "unadjusted" {
23653        #[cfg_attr(
23654            any(target_arch = "aarch64", target_arch = "arm64ec"),
23655            link_name = "llvm.aarch64.neon.vsli.v2i32"
23656        )]
23657        fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
23658    }
23659    unsafe { _vsli_n_s32(a, b, N) }
23660}
23661#[doc = "Shift Left and Insert (immediate)"]
23662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"]
23663#[inline(always)]
23664#[target_feature(enable = "neon")]
23665#[cfg_attr(test, assert_instr(sli, N = 1))]
23666#[rustc_legacy_const_generics(2)]
23667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23668pub fn vsliq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
23669    static_assert!(N >= 0 && N <= 31);
23670    unsafe extern "unadjusted" {
23671        #[cfg_attr(
23672            any(target_arch = "aarch64", target_arch = "arm64ec"),
23673            link_name = "llvm.aarch64.neon.vsli.v4i32"
23674        )]
23675        fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
23676    }
23677    unsafe { _vsliq_n_s32(a, b, N) }
23678}
23679#[doc = "Shift Left and Insert (immediate)"]
23680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"]
23681#[inline(always)]
23682#[target_feature(enable = "neon")]
23683#[cfg_attr(test, assert_instr(sli, N = 1))]
23684#[rustc_legacy_const_generics(2)]
23685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23686pub fn vsli_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
23687    static_assert!(N >= 0 && N <= 63);
23688    unsafe extern "unadjusted" {
23689        #[cfg_attr(
23690            any(target_arch = "aarch64", target_arch = "arm64ec"),
23691            link_name = "llvm.aarch64.neon.vsli.v1i64"
23692        )]
23693        fn _vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
23694    }
23695    unsafe { _vsli_n_s64(a, b, N) }
23696}
23697#[doc = "Shift Left and Insert (immediate)"]
23698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"]
23699#[inline(always)]
23700#[target_feature(enable = "neon")]
23701#[cfg_attr(test, assert_instr(sli, N = 1))]
23702#[rustc_legacy_const_generics(2)]
23703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23704pub fn vsliq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
23705    static_assert!(N >= 0 && N <= 63);
23706    unsafe extern "unadjusted" {
23707        #[cfg_attr(
23708            any(target_arch = "aarch64", target_arch = "arm64ec"),
23709            link_name = "llvm.aarch64.neon.vsli.v2i64"
23710        )]
23711        fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
23712    }
23713    unsafe { _vsliq_n_s64(a, b, N) }
23714}
23715#[doc = "Shift Left and Insert (immediate)"]
23716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"]
23717#[inline(always)]
23718#[target_feature(enable = "neon")]
23719#[cfg_attr(test, assert_instr(sli, N = 1))]
23720#[rustc_legacy_const_generics(2)]
23721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23722pub fn vsli_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
23723    static_assert_uimm_bits!(N, 3);
23724    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
23725}
23726#[doc = "Shift Left and Insert (immediate)"]
23727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"]
23728#[inline(always)]
23729#[target_feature(enable = "neon")]
23730#[cfg_attr(test, assert_instr(sli, N = 1))]
23731#[rustc_legacy_const_generics(2)]
23732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23733pub fn vsliq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
23734    static_assert_uimm_bits!(N, 3);
23735    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
23736}
23737#[doc = "Shift Left and Insert (immediate)"]
23738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"]
23739#[inline(always)]
23740#[target_feature(enable = "neon")]
23741#[cfg_attr(test, assert_instr(sli, N = 1))]
23742#[rustc_legacy_const_generics(2)]
23743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23744pub fn vsli_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
23745    static_assert_uimm_bits!(N, 4);
23746    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
23747}
23748#[doc = "Shift Left and Insert (immediate)"]
23749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"]
23750#[inline(always)]
23751#[target_feature(enable = "neon")]
23752#[cfg_attr(test, assert_instr(sli, N = 1))]
23753#[rustc_legacy_const_generics(2)]
23754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23755pub fn vsliq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
23756    static_assert_uimm_bits!(N, 4);
23757    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
23758}
23759#[doc = "Shift Left and Insert (immediate)"]
23760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"]
23761#[inline(always)]
23762#[target_feature(enable = "neon")]
23763#[cfg_attr(test, assert_instr(sli, N = 1))]
23764#[rustc_legacy_const_generics(2)]
23765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23766pub fn vsli_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
23767    static_assert!(N >= 0 && N <= 31);
23768    unsafe { transmute(vsli_n_s32::<N>(transmute(a), transmute(b))) }
23769}
23770#[doc = "Shift Left and Insert (immediate)"]
23771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"]
23772#[inline(always)]
23773#[target_feature(enable = "neon")]
23774#[cfg_attr(test, assert_instr(sli, N = 1))]
23775#[rustc_legacy_const_generics(2)]
23776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23777pub fn vsliq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
23778    static_assert!(N >= 0 && N <= 31);
23779    unsafe { transmute(vsliq_n_s32::<N>(transmute(a), transmute(b))) }
23780}
23781#[doc = "Shift Left and Insert (immediate)"]
23782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"]
23783#[inline(always)]
23784#[target_feature(enable = "neon")]
23785#[cfg_attr(test, assert_instr(sli, N = 1))]
23786#[rustc_legacy_const_generics(2)]
23787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23788pub fn vsli_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
23789    static_assert!(N >= 0 && N <= 63);
23790    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
23791}
23792#[doc = "Shift Left and Insert (immediate)"]
23793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"]
23794#[inline(always)]
23795#[target_feature(enable = "neon")]
23796#[cfg_attr(test, assert_instr(sli, N = 1))]
23797#[rustc_legacy_const_generics(2)]
23798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23799pub fn vsliq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
23800    static_assert!(N >= 0 && N <= 63);
23801    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
23802}
23803#[doc = "Shift Left and Insert (immediate)"]
23804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"]
23805#[inline(always)]
23806#[target_feature(enable = "neon")]
23807#[cfg_attr(test, assert_instr(sli, N = 1))]
23808#[rustc_legacy_const_generics(2)]
23809#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23810pub fn vsli_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
23811    static_assert_uimm_bits!(N, 3);
23812    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
23813}
23814#[doc = "Shift Left and Insert (immediate)"]
23815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"]
23816#[inline(always)]
23817#[target_feature(enable = "neon")]
23818#[cfg_attr(test, assert_instr(sli, N = 1))]
23819#[rustc_legacy_const_generics(2)]
23820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23821pub fn vsliq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
23822    static_assert_uimm_bits!(N, 3);
23823    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
23824}
23825#[doc = "Shift Left and Insert (immediate)"]
23826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"]
23827#[inline(always)]
23828#[target_feature(enable = "neon")]
23829#[cfg_attr(test, assert_instr(sli, N = 1))]
23830#[rustc_legacy_const_generics(2)]
23831#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23832pub fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
23833    static_assert_uimm_bits!(N, 4);
23834    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
23835}
23836#[doc = "Shift Left and Insert (immediate)"]
23837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"]
23838#[inline(always)]
23839#[target_feature(enable = "neon")]
23840#[cfg_attr(test, assert_instr(sli, N = 1))]
23841#[rustc_legacy_const_generics(2)]
23842#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23843pub fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
23844    static_assert_uimm_bits!(N, 4);
23845    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
23846}
23847#[doc = "Shift Left and Insert (immediate)"]
23848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"]
23849#[inline(always)]
23850#[target_feature(enable = "neon,aes")]
23851#[cfg_attr(test, assert_instr(sli, N = 1))]
23852#[rustc_legacy_const_generics(2)]
23853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23854pub fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
23855    static_assert!(N >= 0 && N <= 63);
23856    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
23857}
23858#[doc = "Shift Left and Insert (immediate)"]
23859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"]
23860#[inline(always)]
23861#[target_feature(enable = "neon,aes")]
23862#[cfg_attr(test, assert_instr(sli, N = 1))]
23863#[rustc_legacy_const_generics(2)]
23864#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23865pub fn vsliq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
23866    static_assert!(N >= 0 && N <= 63);
23867    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
23868}
23869#[doc = "Shift left and insert"]
23870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"]
23871#[inline(always)]
23872#[target_feature(enable = "neon")]
23873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23874#[rustc_legacy_const_generics(2)]
23875#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
23876pub fn vslid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
23877    static_assert!(N >= 0 && N <= 63);
23878    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
23879}
23880#[doc = "Shift left and insert"]
23881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"]
23882#[inline(always)]
23883#[target_feature(enable = "neon")]
23884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23885#[rustc_legacy_const_generics(2)]
23886#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
23887pub fn vslid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
23888    static_assert!(N >= 0 && N <= 63);
23889    unsafe { transmute(vsli_n_u64::<N>(transmute(a), transmute(b))) }
23890}
23891#[doc = "SM3PARTW1"]
23892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"]
23893#[inline(always)]
23894#[target_feature(enable = "neon,sm4")]
23895#[cfg_attr(test, assert_instr(sm3partw1))]
23896#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23897pub fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23898    unsafe extern "unadjusted" {
23899        #[cfg_attr(
23900            any(target_arch = "aarch64", target_arch = "arm64ec"),
23901            link_name = "llvm.aarch64.crypto.sm3partw1"
23902        )]
23903        fn _vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
23904    }
23905    unsafe { _vsm3partw1q_u32(a, b, c) }
23906}
23907#[doc = "SM3PARTW2"]
23908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"]
23909#[inline(always)]
23910#[target_feature(enable = "neon,sm4")]
23911#[cfg_attr(test, assert_instr(sm3partw2))]
23912#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23913pub fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23914    unsafe extern "unadjusted" {
23915        #[cfg_attr(
23916            any(target_arch = "aarch64", target_arch = "arm64ec"),
23917            link_name = "llvm.aarch64.crypto.sm3partw2"
23918        )]
23919        fn _vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
23920    }
23921    unsafe { _vsm3partw2q_u32(a, b, c) }
23922}
23923#[doc = "SM3SS1"]
23924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"]
23925#[inline(always)]
23926#[target_feature(enable = "neon,sm4")]
23927#[cfg_attr(test, assert_instr(sm3ss1))]
23928#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23929pub fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23930    unsafe extern "unadjusted" {
23931        #[cfg_attr(
23932            any(target_arch = "aarch64", target_arch = "arm64ec"),
23933            link_name = "llvm.aarch64.crypto.sm3ss1"
23934        )]
23935        fn _vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
23936    }
23937    unsafe { _vsm3ss1q_u32(a, b, c) }
23938}
23939#[doc = "SM3TT1A"]
23940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"]
23941#[inline(always)]
23942#[target_feature(enable = "neon,sm4")]
23943#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))]
23944#[rustc_legacy_const_generics(3)]
23945#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23946pub fn vsm3tt1aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23947    static_assert_uimm_bits!(IMM2, 2);
23948    unsafe extern "unadjusted" {
23949        #[cfg_attr(
23950            any(target_arch = "aarch64", target_arch = "arm64ec"),
23951            link_name = "llvm.aarch64.crypto.sm3tt1a"
23952        )]
23953        fn _vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23954    }
23955    unsafe { _vsm3tt1aq_u32(a, b, c, IMM2 as i64) }
23956}
23957#[doc = "SM3TT1B"]
23958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"]
23959#[inline(always)]
23960#[target_feature(enable = "neon,sm4")]
23961#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))]
23962#[rustc_legacy_const_generics(3)]
23963#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23964pub fn vsm3tt1bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23965    static_assert_uimm_bits!(IMM2, 2);
23966    unsafe extern "unadjusted" {
23967        #[cfg_attr(
23968            any(target_arch = "aarch64", target_arch = "arm64ec"),
23969            link_name = "llvm.aarch64.crypto.sm3tt1b"
23970        )]
23971        fn _vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23972    }
23973    unsafe { _vsm3tt1bq_u32(a, b, c, IMM2 as i64) }
23974}
23975#[doc = "SM3TT2A"]
23976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"]
23977#[inline(always)]
23978#[target_feature(enable = "neon,sm4")]
23979#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))]
23980#[rustc_legacy_const_generics(3)]
23981#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23982pub fn vsm3tt2aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23983    static_assert_uimm_bits!(IMM2, 2);
23984    unsafe extern "unadjusted" {
23985        #[cfg_attr(
23986            any(target_arch = "aarch64", target_arch = "arm64ec"),
23987            link_name = "llvm.aarch64.crypto.sm3tt2a"
23988        )]
23989        fn _vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23990    }
23991    unsafe { _vsm3tt2aq_u32(a, b, c, IMM2 as i64) }
23992}
23993#[doc = "SM3TT2B"]
23994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"]
23995#[inline(always)]
23996#[target_feature(enable = "neon,sm4")]
23997#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))]
23998#[rustc_legacy_const_generics(3)]
23999#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24000pub fn vsm3tt2bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24001    static_assert_uimm_bits!(IMM2, 2);
24002    unsafe extern "unadjusted" {
24003        #[cfg_attr(
24004            any(target_arch = "aarch64", target_arch = "arm64ec"),
24005            link_name = "llvm.aarch64.crypto.sm3tt2b"
24006        )]
24007        fn _vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
24008    }
24009    unsafe { _vsm3tt2bq_u32(a, b, c, IMM2 as i64) }
24010}
24011#[doc = "SM4 key"]
24012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"]
24013#[inline(always)]
24014#[target_feature(enable = "neon,sm4")]
24015#[cfg_attr(test, assert_instr(sm4ekey))]
24016#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24017pub fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24018    unsafe extern "unadjusted" {
24019        #[cfg_attr(
24020            any(target_arch = "aarch64", target_arch = "arm64ec"),
24021            link_name = "llvm.aarch64.crypto.sm4ekey"
24022        )]
24023        fn _vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
24024    }
24025    unsafe { _vsm4ekeyq_u32(a, b) }
24026}
24027#[doc = "SM4 encode"]
24028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"]
24029#[inline(always)]
24030#[target_feature(enable = "neon,sm4")]
24031#[cfg_attr(test, assert_instr(sm4e))]
24032#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24033pub fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24034    unsafe extern "unadjusted" {
24035        #[cfg_attr(
24036            any(target_arch = "aarch64", target_arch = "arm64ec"),
24037            link_name = "llvm.aarch64.crypto.sm4e"
24038        )]
24039        fn _vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
24040    }
24041    unsafe { _vsm4eq_u32(a, b) }
24042}
24043#[doc = "Unsigned saturating Accumulate of Signed value."]
24044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"]
24045#[inline(always)]
24046#[target_feature(enable = "neon")]
24047#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24048#[cfg_attr(test, assert_instr(usqadd))]
24049pub fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t {
24050    unsafe extern "unadjusted" {
24051        #[cfg_attr(
24052            any(target_arch = "aarch64", target_arch = "arm64ec"),
24053            link_name = "llvm.aarch64.neon.usqadd.v8i8"
24054        )]
24055        fn _vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t;
24056    }
24057    unsafe { _vsqadd_u8(a, b) }
24058}
24059#[doc = "Unsigned saturating Accumulate of Signed value."]
24060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"]
24061#[inline(always)]
24062#[target_feature(enable = "neon")]
24063#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24064#[cfg_attr(test, assert_instr(usqadd))]
24065pub fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t {
24066    unsafe extern "unadjusted" {
24067        #[cfg_attr(
24068            any(target_arch = "aarch64", target_arch = "arm64ec"),
24069            link_name = "llvm.aarch64.neon.usqadd.v16i8"
24070        )]
24071        fn _vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t;
24072    }
24073    unsafe { _vsqaddq_u8(a, b) }
24074}
24075#[doc = "Unsigned saturating Accumulate of Signed value."]
24076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"]
24077#[inline(always)]
24078#[target_feature(enable = "neon")]
24079#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24080#[cfg_attr(test, assert_instr(usqadd))]
24081pub fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t {
24082    unsafe extern "unadjusted" {
24083        #[cfg_attr(
24084            any(target_arch = "aarch64", target_arch = "arm64ec"),
24085            link_name = "llvm.aarch64.neon.usqadd.v4i16"
24086        )]
24087        fn _vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t;
24088    }
24089    unsafe { _vsqadd_u16(a, b) }
24090}
24091#[doc = "Unsigned saturating Accumulate of Signed value."]
24092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"]
24093#[inline(always)]
24094#[target_feature(enable = "neon")]
24095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24096#[cfg_attr(test, assert_instr(usqadd))]
24097pub fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t {
24098    unsafe extern "unadjusted" {
24099        #[cfg_attr(
24100            any(target_arch = "aarch64", target_arch = "arm64ec"),
24101            link_name = "llvm.aarch64.neon.usqadd.v8i16"
24102        )]
24103        fn _vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t;
24104    }
24105    unsafe { _vsqaddq_u16(a, b) }
24106}
24107#[doc = "Unsigned saturating Accumulate of Signed value."]
24108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"]
24109#[inline(always)]
24110#[target_feature(enable = "neon")]
24111#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24112#[cfg_attr(test, assert_instr(usqadd))]
24113pub fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t {
24114    unsafe extern "unadjusted" {
24115        #[cfg_attr(
24116            any(target_arch = "aarch64", target_arch = "arm64ec"),
24117            link_name = "llvm.aarch64.neon.usqadd.v2i32"
24118        )]
24119        fn _vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t;
24120    }
24121    unsafe { _vsqadd_u32(a, b) }
24122}
24123#[doc = "Unsigned saturating Accumulate of Signed value."]
24124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"]
24125#[inline(always)]
24126#[target_feature(enable = "neon")]
24127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24128#[cfg_attr(test, assert_instr(usqadd))]
24129pub fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t {
24130    unsafe extern "unadjusted" {
24131        #[cfg_attr(
24132            any(target_arch = "aarch64", target_arch = "arm64ec"),
24133            link_name = "llvm.aarch64.neon.usqadd.v4i32"
24134        )]
24135        fn _vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t;
24136    }
24137    unsafe { _vsqaddq_u32(a, b) }
24138}
24139#[doc = "Unsigned saturating Accumulate of Signed value."]
24140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"]
24141#[inline(always)]
24142#[target_feature(enable = "neon")]
24143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24144#[cfg_attr(test, assert_instr(usqadd))]
24145pub fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t {
24146    unsafe extern "unadjusted" {
24147        #[cfg_attr(
24148            any(target_arch = "aarch64", target_arch = "arm64ec"),
24149            link_name = "llvm.aarch64.neon.usqadd.v1i64"
24150        )]
24151        fn _vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t;
24152    }
24153    unsafe { _vsqadd_u64(a, b) }
24154}
24155#[doc = "Unsigned saturating Accumulate of Signed value."]
24156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"]
24157#[inline(always)]
24158#[target_feature(enable = "neon")]
24159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24160#[cfg_attr(test, assert_instr(usqadd))]
24161pub fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t {
24162    unsafe extern "unadjusted" {
24163        #[cfg_attr(
24164            any(target_arch = "aarch64", target_arch = "arm64ec"),
24165            link_name = "llvm.aarch64.neon.usqadd.v2i64"
24166        )]
24167        fn _vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t;
24168    }
24169    unsafe { _vsqaddq_u64(a, b) }
24170}
24171#[doc = "Unsigned saturating accumulate of signed value"]
24172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"]
24173#[inline(always)]
24174#[target_feature(enable = "neon")]
24175#[cfg_attr(test, assert_instr(usqadd))]
24176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24177pub fn vsqaddb_u8(a: u8, b: i8) -> u8 {
24178    unsafe { simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) }
24179}
24180#[doc = "Unsigned saturating accumulate of signed value"]
24181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"]
24182#[inline(always)]
24183#[target_feature(enable = "neon")]
24184#[cfg_attr(test, assert_instr(usqadd))]
24185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24186pub fn vsqaddh_u16(a: u16, b: i16) -> u16 {
24187    unsafe { simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) }
24188}
24189#[doc = "Unsigned saturating accumulate of signed value"]
24190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"]
24191#[inline(always)]
24192#[target_feature(enable = "neon")]
24193#[cfg_attr(test, assert_instr(usqadd))]
24194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24195pub fn vsqaddd_u64(a: u64, b: i64) -> u64 {
24196    unsafe extern "unadjusted" {
24197        #[cfg_attr(
24198            any(target_arch = "aarch64", target_arch = "arm64ec"),
24199            link_name = "llvm.aarch64.neon.usqadd.i64"
24200        )]
24201        fn _vsqaddd_u64(a: u64, b: i64) -> u64;
24202    }
24203    unsafe { _vsqaddd_u64(a, b) }
24204}
24205#[doc = "Unsigned saturating accumulate of signed value"]
24206#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"]
24207#[inline(always)]
24208#[target_feature(enable = "neon")]
24209#[cfg_attr(test, assert_instr(usqadd))]
24210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24211pub fn vsqadds_u32(a: u32, b: i32) -> u32 {
24212    unsafe extern "unadjusted" {
24213        #[cfg_attr(
24214            any(target_arch = "aarch64", target_arch = "arm64ec"),
24215            link_name = "llvm.aarch64.neon.usqadd.i32"
24216        )]
24217        fn _vsqadds_u32(a: u32, b: i32) -> u32;
24218    }
24219    unsafe { _vsqadds_u32(a, b) }
24220}
24221#[doc = "Calculates the square root of each lane."]
24222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f16)"]
24223#[inline(always)]
24224#[cfg_attr(test, assert_instr(fsqrt))]
24225#[target_feature(enable = "neon,fp16")]
24226#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
24227#[cfg(not(target_arch = "arm64ec"))]
24228pub fn vsqrt_f16(a: float16x4_t) -> float16x4_t {
24229    unsafe { simd_fsqrt(a) }
24230}
24231#[doc = "Calculates the square root of each lane."]
24232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f16)"]
24233#[inline(always)]
24234#[cfg_attr(test, assert_instr(fsqrt))]
24235#[target_feature(enable = "neon,fp16")]
24236#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
24237#[cfg(not(target_arch = "arm64ec"))]
24238pub fn vsqrtq_f16(a: float16x8_t) -> float16x8_t {
24239    unsafe { simd_fsqrt(a) }
24240}
24241#[doc = "Calculates the square root of each lane."]
24242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"]
24243#[inline(always)]
24244#[target_feature(enable = "neon")]
24245#[cfg_attr(test, assert_instr(fsqrt))]
24246#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24247pub fn vsqrt_f32(a: float32x2_t) -> float32x2_t {
24248    unsafe { simd_fsqrt(a) }
24249}
24250#[doc = "Calculates the square root of each lane."]
24251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"]
24252#[inline(always)]
24253#[target_feature(enable = "neon")]
24254#[cfg_attr(test, assert_instr(fsqrt))]
24255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24256pub fn vsqrtq_f32(a: float32x4_t) -> float32x4_t {
24257    unsafe { simd_fsqrt(a) }
24258}
24259#[doc = "Calculates the square root of each lane."]
24260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"]
24261#[inline(always)]
24262#[target_feature(enable = "neon")]
24263#[cfg_attr(test, assert_instr(fsqrt))]
24264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24265pub fn vsqrt_f64(a: float64x1_t) -> float64x1_t {
24266    unsafe { simd_fsqrt(a) }
24267}
24268#[doc = "Calculates the square root of each lane."]
24269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"]
24270#[inline(always)]
24271#[target_feature(enable = "neon")]
24272#[cfg_attr(test, assert_instr(fsqrt))]
24273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24274pub fn vsqrtq_f64(a: float64x2_t) -> float64x2_t {
24275    unsafe { simd_fsqrt(a) }
24276}
24277#[doc = "Floating-point round to integral, using current rounding mode"]
24278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrth_f16)"]
24279#[inline(always)]
24280#[target_feature(enable = "neon,fp16")]
24281#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24282#[cfg(not(target_arch = "arm64ec"))]
24283#[cfg_attr(test, assert_instr(fsqrt))]
24284pub fn vsqrth_f16(a: f16) -> f16 {
24285    sqrtf16(a)
24286}
24287#[doc = "Shift Right and Insert (immediate)"]
24288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"]
24289#[inline(always)]
24290#[target_feature(enable = "neon")]
24291#[cfg_attr(test, assert_instr(sri, N = 1))]
24292#[rustc_legacy_const_generics(2)]
24293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24294pub fn vsri_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
24295    static_assert!(N >= 1 && N <= 8);
24296    unsafe { super::shift_right_and_insert!(u8, 8, N, a, b) }
24297}
24298#[doc = "Shift Right and Insert (immediate)"]
24299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"]
24300#[inline(always)]
24301#[target_feature(enable = "neon")]
24302#[cfg_attr(test, assert_instr(sri, N = 1))]
24303#[rustc_legacy_const_generics(2)]
24304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24305pub fn vsriq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
24306    static_assert!(N >= 1 && N <= 8);
24307    unsafe { super::shift_right_and_insert!(u8, 16, N, a, b) }
24308}
24309#[doc = "Shift Right and Insert (immediate)"]
24310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"]
24311#[inline(always)]
24312#[target_feature(enable = "neon")]
24313#[cfg_attr(test, assert_instr(sri, N = 1))]
24314#[rustc_legacy_const_generics(2)]
24315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24316pub fn vsri_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
24317    static_assert!(N >= 1 && N <= 16);
24318    unsafe { super::shift_right_and_insert!(u16, 4, N, a, b) }
24319}
24320#[doc = "Shift Right and Insert (immediate)"]
24321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"]
24322#[inline(always)]
24323#[target_feature(enable = "neon")]
24324#[cfg_attr(test, assert_instr(sri, N = 1))]
24325#[rustc_legacy_const_generics(2)]
24326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24327pub fn vsriq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
24328    static_assert!(N >= 1 && N <= 16);
24329    unsafe { super::shift_right_and_insert!(u16, 8, N, a, b) }
24330}
24331#[doc = "Shift Right and Insert (immediate)"]
24332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"]
24333#[inline(always)]
24334#[target_feature(enable = "neon")]
24335#[cfg_attr(test, assert_instr(sri, N = 1))]
24336#[rustc_legacy_const_generics(2)]
24337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24338pub fn vsri_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
24339    static_assert!(N >= 1 && N <= 32);
24340    unsafe { super::shift_right_and_insert!(u32, 2, N, a, b) }
24341}
24342#[doc = "Shift Right and Insert (immediate)"]
24343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"]
24344#[inline(always)]
24345#[target_feature(enable = "neon")]
24346#[cfg_attr(test, assert_instr(sri, N = 1))]
24347#[rustc_legacy_const_generics(2)]
24348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24349pub fn vsriq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
24350    static_assert!(N >= 1 && N <= 32);
24351    unsafe { super::shift_right_and_insert!(u32, 4, N, a, b) }
24352}
24353#[doc = "Shift Right and Insert (immediate)"]
24354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"]
24355#[inline(always)]
24356#[target_feature(enable = "neon")]
24357#[cfg_attr(test, assert_instr(sri, N = 1))]
24358#[rustc_legacy_const_generics(2)]
24359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24360pub fn vsri_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
24361    static_assert!(N >= 1 && N <= 64);
24362    unsafe { super::shift_right_and_insert!(u64, 1, N, a, b) }
24363}
24364#[doc = "Shift Right and Insert (immediate)"]
24365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"]
24366#[inline(always)]
24367#[target_feature(enable = "neon")]
24368#[cfg_attr(test, assert_instr(sri, N = 1))]
24369#[rustc_legacy_const_generics(2)]
24370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24371pub fn vsriq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
24372    static_assert!(N >= 1 && N <= 64);
24373    unsafe { super::shift_right_and_insert!(u64, 2, N, a, b) }
24374}
24375#[doc = "Shift Right and Insert (immediate)"]
24376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"]
24377#[inline(always)]
24378#[target_feature(enable = "neon")]
24379#[cfg_attr(test, assert_instr(sri, N = 1))]
24380#[rustc_legacy_const_generics(2)]
24381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24382pub fn vsri_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
24383    static_assert!(N >= 1 && N <= 8);
24384    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
24385}
24386#[doc = "Shift Right and Insert (immediate)"]
24387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"]
24388#[inline(always)]
24389#[target_feature(enable = "neon")]
24390#[cfg_attr(test, assert_instr(sri, N = 1))]
24391#[rustc_legacy_const_generics(2)]
24392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24393pub fn vsriq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
24394    static_assert!(N >= 1 && N <= 8);
24395    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
24396}
24397#[doc = "Shift Right and Insert (immediate)"]
24398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"]
24399#[inline(always)]
24400#[target_feature(enable = "neon")]
24401#[cfg_attr(test, assert_instr(sri, N = 1))]
24402#[rustc_legacy_const_generics(2)]
24403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24404pub fn vsri_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
24405    static_assert!(N >= 1 && N <= 16);
24406    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
24407}
24408#[doc = "Shift Right and Insert (immediate)"]
24409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"]
24410#[inline(always)]
24411#[target_feature(enable = "neon")]
24412#[cfg_attr(test, assert_instr(sri, N = 1))]
24413#[rustc_legacy_const_generics(2)]
24414#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24415pub fn vsriq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
24416    static_assert!(N >= 1 && N <= 16);
24417    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
24418}
24419#[doc = "Shift Right and Insert (immediate)"]
24420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"]
24421#[inline(always)]
24422#[target_feature(enable = "neon")]
24423#[cfg_attr(test, assert_instr(sri, N = 1))]
24424#[rustc_legacy_const_generics(2)]
24425#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24426pub fn vsri_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
24427    static_assert!(N >= 1 && N <= 32);
24428    unsafe { transmute(vsri_n_s32::<N>(transmute(a), transmute(b))) }
24429}
24430#[doc = "Shift Right and Insert (immediate)"]
24431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"]
24432#[inline(always)]
24433#[target_feature(enable = "neon")]
24434#[cfg_attr(test, assert_instr(sri, N = 1))]
24435#[rustc_legacy_const_generics(2)]
24436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24437pub fn vsriq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24438    static_assert!(N >= 1 && N <= 32);
24439    unsafe { transmute(vsriq_n_s32::<N>(transmute(a), transmute(b))) }
24440}
24441#[doc = "Shift Right and Insert (immediate)"]
24442#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"]
24443#[inline(always)]
24444#[target_feature(enable = "neon")]
24445#[cfg_attr(test, assert_instr(sri, N = 1))]
24446#[rustc_legacy_const_generics(2)]
24447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24448pub fn vsri_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
24449    static_assert!(N >= 1 && N <= 64);
24450    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
24451}
24452#[doc = "Shift Right and Insert (immediate)"]
24453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"]
24454#[inline(always)]
24455#[target_feature(enable = "neon")]
24456#[cfg_attr(test, assert_instr(sri, N = 1))]
24457#[rustc_legacy_const_generics(2)]
24458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24459pub fn vsriq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
24460    static_assert!(N >= 1 && N <= 64);
24461    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
24462}
24463#[doc = "Shift Right and Insert (immediate)"]
24464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"]
24465#[inline(always)]
24466#[target_feature(enable = "neon")]
24467#[cfg_attr(test, assert_instr(sri, N = 1))]
24468#[rustc_legacy_const_generics(2)]
24469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24470pub fn vsri_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
24471    static_assert!(N >= 1 && N <= 8);
24472    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
24473}
24474#[doc = "Shift Right and Insert (immediate)"]
24475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"]
24476#[inline(always)]
24477#[target_feature(enable = "neon")]
24478#[cfg_attr(test, assert_instr(sri, N = 1))]
24479#[rustc_legacy_const_generics(2)]
24480#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24481pub fn vsriq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
24482    static_assert!(N >= 1 && N <= 8);
24483    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
24484}
24485#[doc = "Shift Right and Insert (immediate)"]
24486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"]
24487#[inline(always)]
24488#[target_feature(enable = "neon")]
24489#[cfg_attr(test, assert_instr(sri, N = 1))]
24490#[rustc_legacy_const_generics(2)]
24491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24492pub fn vsri_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
24493    static_assert!(N >= 1 && N <= 16);
24494    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
24495}
24496#[doc = "Shift Right and Insert (immediate)"]
24497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"]
24498#[inline(always)]
24499#[target_feature(enable = "neon")]
24500#[cfg_attr(test, assert_instr(sri, N = 1))]
24501#[rustc_legacy_const_generics(2)]
24502#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24503pub fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
24504    static_assert!(N >= 1 && N <= 16);
24505    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
24506}
24507#[doc = "Shift Right and Insert (immediate)"]
24508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"]
24509#[inline(always)]
24510#[target_feature(enable = "neon,aes")]
24511#[cfg_attr(test, assert_instr(sri, N = 1))]
24512#[rustc_legacy_const_generics(2)]
24513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24514pub fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
24515    static_assert!(N >= 1 && N <= 64);
24516    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
24517}
24518#[doc = "Shift Right and Insert (immediate)"]
24519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"]
24520#[inline(always)]
24521#[target_feature(enable = "neon,aes")]
24522#[cfg_attr(test, assert_instr(sri, N = 1))]
24523#[rustc_legacy_const_generics(2)]
24524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24525pub fn vsriq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
24526    static_assert!(N >= 1 && N <= 64);
24527    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
24528}
24529#[doc = "Shift right and insert"]
24530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"]
24531#[inline(always)]
24532#[target_feature(enable = "neon")]
24533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24534#[rustc_legacy_const_generics(2)]
24535#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(bfxil, N = 2))]
24536pub fn vsrid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
24537    static_assert!(N >= 1 && N <= 64);
24538    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
24539}
24540#[doc = "Shift right and insert"]
24541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"]
24542#[inline(always)]
24543#[target_feature(enable = "neon")]
24544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24545#[rustc_legacy_const_generics(2)]
24546#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(bfxil, N = 2))]
24547pub fn vsrid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
24548    static_assert!(N >= 1 && N <= 64);
24549    unsafe { transmute(vsri_n_u64::<N>(transmute(a), transmute(b))) }
24550}
24551#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16)"]
24553#[doc = "## Safety"]
24554#[doc = "  * Neon intrinsic unsafe"]
24555#[inline(always)]
24556#[target_feature(enable = "neon,fp16")]
24557#[cfg_attr(test, assert_instr(str))]
24558#[allow(clippy::cast_ptr_alignment)]
24559#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24560#[cfg(not(target_arch = "arm64ec"))]
24561pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) {
24562    crate::ptr::write_unaligned(ptr.cast(), a)
24563}
24564#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16)"]
24566#[doc = "## Safety"]
24567#[doc = "  * Neon intrinsic unsafe"]
24568#[inline(always)]
24569#[target_feature(enable = "neon,fp16")]
24570#[cfg_attr(test, assert_instr(str))]
24571#[allow(clippy::cast_ptr_alignment)]
24572#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24573#[cfg(not(target_arch = "arm64ec"))]
24574pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) {
24575    crate::ptr::write_unaligned(ptr.cast(), a)
24576}
24577#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"]
24579#[doc = "## Safety"]
24580#[doc = "  * Neon intrinsic unsafe"]
24581#[inline(always)]
24582#[target_feature(enable = "neon")]
24583#[cfg_attr(test, assert_instr(str))]
24584#[allow(clippy::cast_ptr_alignment)]
24585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24586pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) {
24587    crate::ptr::write_unaligned(ptr.cast(), a)
24588}
24589#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"]
24591#[doc = "## Safety"]
24592#[doc = "  * Neon intrinsic unsafe"]
24593#[inline(always)]
24594#[target_feature(enable = "neon")]
24595#[cfg_attr(test, assert_instr(str))]
24596#[allow(clippy::cast_ptr_alignment)]
24597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24598pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) {
24599    crate::ptr::write_unaligned(ptr.cast(), a)
24600}
24601#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64)"]
24603#[doc = "## Safety"]
24604#[doc = "  * Neon intrinsic unsafe"]
24605#[inline(always)]
24606#[target_feature(enable = "neon")]
24607#[cfg_attr(test, assert_instr(str))]
24608#[allow(clippy::cast_ptr_alignment)]
24609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24610pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) {
24611    crate::ptr::write_unaligned(ptr.cast(), a)
24612}
24613#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"]
24615#[doc = "## Safety"]
24616#[doc = "  * Neon intrinsic unsafe"]
24617#[inline(always)]
24618#[target_feature(enable = "neon")]
24619#[cfg_attr(test, assert_instr(str))]
24620#[allow(clippy::cast_ptr_alignment)]
24621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24622pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) {
24623    crate::ptr::write_unaligned(ptr.cast(), a)
24624}
24625#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"]
24627#[doc = "## Safety"]
24628#[doc = "  * Neon intrinsic unsafe"]
24629#[inline(always)]
24630#[target_feature(enable = "neon")]
24631#[cfg_attr(test, assert_instr(str))]
24632#[allow(clippy::cast_ptr_alignment)]
24633#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24634pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) {
24635    crate::ptr::write_unaligned(ptr.cast(), a)
24636}
24637#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"]
24639#[doc = "## Safety"]
24640#[doc = "  * Neon intrinsic unsafe"]
24641#[inline(always)]
24642#[target_feature(enable = "neon")]
24643#[cfg_attr(test, assert_instr(str))]
24644#[allow(clippy::cast_ptr_alignment)]
24645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24646pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) {
24647    crate::ptr::write_unaligned(ptr.cast(), a)
24648}
24649#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"]
24651#[doc = "## Safety"]
24652#[doc = "  * Neon intrinsic unsafe"]
24653#[inline(always)]
24654#[target_feature(enable = "neon")]
24655#[cfg_attr(test, assert_instr(str))]
24656#[allow(clippy::cast_ptr_alignment)]
24657#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24658pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) {
24659    crate::ptr::write_unaligned(ptr.cast(), a)
24660}
24661#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"]
24663#[doc = "## Safety"]
24664#[doc = "  * Neon intrinsic unsafe"]
24665#[inline(always)]
24666#[target_feature(enable = "neon")]
24667#[cfg_attr(test, assert_instr(str))]
24668#[allow(clippy::cast_ptr_alignment)]
24669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24670pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) {
24671    crate::ptr::write_unaligned(ptr.cast(), a)
24672}
24673#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"]
24675#[doc = "## Safety"]
24676#[doc = "  * Neon intrinsic unsafe"]
24677#[inline(always)]
24678#[target_feature(enable = "neon")]
24679#[cfg_attr(test, assert_instr(str))]
24680#[allow(clippy::cast_ptr_alignment)]
24681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24682pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) {
24683    crate::ptr::write_unaligned(ptr.cast(), a)
24684}
24685#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"]
24687#[doc = "## Safety"]
24688#[doc = "  * Neon intrinsic unsafe"]
24689#[inline(always)]
24690#[target_feature(enable = "neon")]
24691#[cfg_attr(test, assert_instr(str))]
24692#[allow(clippy::cast_ptr_alignment)]
24693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24694pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) {
24695    crate::ptr::write_unaligned(ptr.cast(), a)
24696}
24697#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"]
24699#[doc = "## Safety"]
24700#[doc = "  * Neon intrinsic unsafe"]
24701#[inline(always)]
24702#[target_feature(enable = "neon")]
24703#[cfg_attr(test, assert_instr(str))]
24704#[allow(clippy::cast_ptr_alignment)]
24705#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24706pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) {
24707    crate::ptr::write_unaligned(ptr.cast(), a)
24708}
24709#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"]
24711#[doc = "## Safety"]
24712#[doc = "  * Neon intrinsic unsafe"]
24713#[inline(always)]
24714#[target_feature(enable = "neon")]
24715#[cfg_attr(test, assert_instr(str))]
24716#[allow(clippy::cast_ptr_alignment)]
24717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24718pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) {
24719    crate::ptr::write_unaligned(ptr.cast(), a)
24720}
24721#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"]
24723#[doc = "## Safety"]
24724#[doc = "  * Neon intrinsic unsafe"]
24725#[inline(always)]
24726#[target_feature(enable = "neon")]
24727#[cfg_attr(test, assert_instr(str))]
24728#[allow(clippy::cast_ptr_alignment)]
24729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24730pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) {
24731    crate::ptr::write_unaligned(ptr.cast(), a)
24732}
24733#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"]
24735#[doc = "## Safety"]
24736#[doc = "  * Neon intrinsic unsafe"]
24737#[inline(always)]
24738#[target_feature(enable = "neon")]
24739#[cfg_attr(test, assert_instr(str))]
24740#[allow(clippy::cast_ptr_alignment)]
24741#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24742pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) {
24743    crate::ptr::write_unaligned(ptr.cast(), a)
24744}
24745#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"]
24747#[doc = "## Safety"]
24748#[doc = "  * Neon intrinsic unsafe"]
24749#[inline(always)]
24750#[target_feature(enable = "neon")]
24751#[cfg_attr(test, assert_instr(str))]
24752#[allow(clippy::cast_ptr_alignment)]
24753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24754pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) {
24755    crate::ptr::write_unaligned(ptr.cast(), a)
24756}
24757#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"]
24759#[doc = "## Safety"]
24760#[doc = "  * Neon intrinsic unsafe"]
24761#[inline(always)]
24762#[target_feature(enable = "neon")]
24763#[cfg_attr(test, assert_instr(str))]
24764#[allow(clippy::cast_ptr_alignment)]
24765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24766pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) {
24767    crate::ptr::write_unaligned(ptr.cast(), a)
24768}
24769#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"]
24771#[doc = "## Safety"]
24772#[doc = "  * Neon intrinsic unsafe"]
24773#[inline(always)]
24774#[target_feature(enable = "neon")]
24775#[cfg_attr(test, assert_instr(str))]
24776#[allow(clippy::cast_ptr_alignment)]
24777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24778pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) {
24779    crate::ptr::write_unaligned(ptr.cast(), a)
24780}
24781#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"]
24783#[doc = "## Safety"]
24784#[doc = "  * Neon intrinsic unsafe"]
24785#[inline(always)]
24786#[target_feature(enable = "neon")]
24787#[cfg_attr(test, assert_instr(str))]
24788#[allow(clippy::cast_ptr_alignment)]
24789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24790pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) {
24791    crate::ptr::write_unaligned(ptr.cast(), a)
24792}
24793#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"]
24795#[doc = "## Safety"]
24796#[doc = "  * Neon intrinsic unsafe"]
24797#[inline(always)]
24798#[target_feature(enable = "neon")]
24799#[cfg_attr(test, assert_instr(str))]
24800#[allow(clippy::cast_ptr_alignment)]
24801#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24802pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) {
24803    crate::ptr::write_unaligned(ptr.cast(), a)
24804}
24805#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"]
24807#[doc = "## Safety"]
24808#[doc = "  * Neon intrinsic unsafe"]
24809#[inline(always)]
24810#[target_feature(enable = "neon")]
24811#[cfg_attr(test, assert_instr(str))]
24812#[allow(clippy::cast_ptr_alignment)]
24813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24814pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) {
24815    crate::ptr::write_unaligned(ptr.cast(), a)
24816}
24817#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"]
24819#[doc = "## Safety"]
24820#[doc = "  * Neon intrinsic unsafe"]
24821#[inline(always)]
24822#[target_feature(enable = "neon")]
24823#[cfg_attr(test, assert_instr(str))]
24824#[allow(clippy::cast_ptr_alignment)]
24825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24826pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) {
24827    crate::ptr::write_unaligned(ptr.cast(), a)
24828}
24829#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"]
24831#[doc = "## Safety"]
24832#[doc = "  * Neon intrinsic unsafe"]
24833#[inline(always)]
24834#[target_feature(enable = "neon")]
24835#[cfg_attr(test, assert_instr(str))]
24836#[allow(clippy::cast_ptr_alignment)]
24837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24838pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) {
24839    crate::ptr::write_unaligned(ptr.cast(), a)
24840}
24841#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"]
24843#[doc = "## Safety"]
24844#[doc = "  * Neon intrinsic unsafe"]
24845#[inline(always)]
24846#[target_feature(enable = "neon")]
24847#[cfg_attr(test, assert_instr(str))]
24848#[allow(clippy::cast_ptr_alignment)]
24849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24850pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) {
24851    crate::ptr::write_unaligned(ptr.cast(), a)
24852}
24853#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"]
24855#[doc = "## Safety"]
24856#[doc = "  * Neon intrinsic unsafe"]
24857#[inline(always)]
24858#[target_feature(enable = "neon")]
24859#[cfg_attr(test, assert_instr(str))]
24860#[allow(clippy::cast_ptr_alignment)]
24861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24862pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) {
24863    crate::ptr::write_unaligned(ptr.cast(), a)
24864}
24865#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"]
24867#[doc = "## Safety"]
24868#[doc = "  * Neon intrinsic unsafe"]
24869#[inline(always)]
24870#[target_feature(enable = "neon,aes")]
24871#[cfg_attr(test, assert_instr(str))]
24872#[allow(clippy::cast_ptr_alignment)]
24873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24874pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) {
24875    crate::ptr::write_unaligned(ptr.cast(), a)
24876}
24877#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"]
24879#[doc = "## Safety"]
24880#[doc = "  * Neon intrinsic unsafe"]
24881#[inline(always)]
24882#[target_feature(enable = "neon,aes")]
24883#[cfg_attr(test, assert_instr(str))]
24884#[allow(clippy::cast_ptr_alignment)]
24885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24886pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) {
24887    crate::ptr::write_unaligned(ptr.cast(), a)
24888}
24889#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"]
24891#[doc = "## Safety"]
24892#[doc = "  * Neon intrinsic unsafe"]
24893#[inline(always)]
24894#[target_feature(enable = "neon")]
24895#[cfg_attr(test, assert_instr(st1))]
24896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24897pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) {
24898    unsafe extern "unadjusted" {
24899        #[cfg_attr(
24900            any(target_arch = "aarch64", target_arch = "arm64ec"),
24901            link_name = "llvm.aarch64.neon.st1x2.v1f64.p0"
24902        )]
24903        fn _vst1_f64_x2(a: float64x1_t, b: float64x1_t, ptr: *mut f64);
24904    }
24905    _vst1_f64_x2(b.0, b.1, a)
24906}
24907#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"]
24909#[doc = "## Safety"]
24910#[doc = "  * Neon intrinsic unsafe"]
24911#[inline(always)]
24912#[target_feature(enable = "neon")]
24913#[cfg_attr(test, assert_instr(st1))]
24914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24915pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) {
24916    unsafe extern "unadjusted" {
24917        #[cfg_attr(
24918            any(target_arch = "aarch64", target_arch = "arm64ec"),
24919            link_name = "llvm.aarch64.neon.st1x2.v2f64.p0"
24920        )]
24921        fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64);
24922    }
24923    _vst1q_f64_x2(b.0, b.1, a)
24924}
24925#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"]
24927#[doc = "## Safety"]
24928#[doc = "  * Neon intrinsic unsafe"]
24929#[inline(always)]
24930#[target_feature(enable = "neon")]
24931#[cfg_attr(test, assert_instr(st1))]
24932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24933pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) {
24934    unsafe extern "unadjusted" {
24935        #[cfg_attr(
24936            any(target_arch = "aarch64", target_arch = "arm64ec"),
24937            link_name = "llvm.aarch64.neon.st1x3.v1f64.p0"
24938        )]
24939        fn _vst1_f64_x3(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64);
24940    }
24941    _vst1_f64_x3(b.0, b.1, b.2, a)
24942}
24943#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"]
24945#[doc = "## Safety"]
24946#[doc = "  * Neon intrinsic unsafe"]
24947#[inline(always)]
24948#[target_feature(enable = "neon")]
24949#[cfg_attr(test, assert_instr(st1))]
24950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24951pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) {
24952    unsafe extern "unadjusted" {
24953        #[cfg_attr(
24954            any(target_arch = "aarch64", target_arch = "arm64ec"),
24955            link_name = "llvm.aarch64.neon.st1x3.v2f64.p0"
24956        )]
24957        fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64);
24958    }
24959    _vst1q_f64_x3(b.0, b.1, b.2, a)
24960}
24961#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"]
24963#[doc = "## Safety"]
24964#[doc = "  * Neon intrinsic unsafe"]
24965#[inline(always)]
24966#[target_feature(enable = "neon")]
24967#[cfg_attr(test, assert_instr(st1))]
24968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24969pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) {
24970    unsafe extern "unadjusted" {
24971        #[cfg_attr(
24972            any(target_arch = "aarch64", target_arch = "arm64ec"),
24973            link_name = "llvm.aarch64.neon.st1x4.v1f64.p0"
24974        )]
24975        fn _vst1_f64_x4(
24976            a: float64x1_t,
24977            b: float64x1_t,
24978            c: float64x1_t,
24979            d: float64x1_t,
24980            ptr: *mut f64,
24981        );
24982    }
24983    _vst1_f64_x4(b.0, b.1, b.2, b.3, a)
24984}
24985#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"]
24987#[doc = "## Safety"]
24988#[doc = "  * Neon intrinsic unsafe"]
24989#[inline(always)]
24990#[target_feature(enable = "neon")]
24991#[cfg_attr(test, assert_instr(st1))]
24992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24993pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) {
24994    unsafe extern "unadjusted" {
24995        #[cfg_attr(
24996            any(target_arch = "aarch64", target_arch = "arm64ec"),
24997            link_name = "llvm.aarch64.neon.st1x4.v2f64.p0"
24998        )]
24999        fn _vst1q_f64_x4(
25000            a: float64x2_t,
25001            b: float64x2_t,
25002            c: float64x2_t,
25003            d: float64x2_t,
25004            ptr: *mut f64,
25005        );
25006    }
25007    _vst1q_f64_x4(b.0, b.1, b.2, b.3, a)
25008}
25009#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
25010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"]
25011#[doc = "## Safety"]
25012#[doc = "  * Neon intrinsic unsafe"]
25013#[inline(always)]
25014#[target_feature(enable = "neon")]
25015#[cfg_attr(test, assert_instr(nop, LANE = 0))]
25016#[rustc_legacy_const_generics(2)]
25017#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25018pub unsafe fn vst1_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1_t) {
25019    static_assert!(LANE == 0);
25020    *a = simd_extract!(b, LANE as u32);
25021}
25022#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
25023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"]
25024#[doc = "## Safety"]
25025#[doc = "  * Neon intrinsic unsafe"]
25026#[inline(always)]
25027#[target_feature(enable = "neon")]
25028#[cfg_attr(test, assert_instr(nop, LANE = 0))]
25029#[rustc_legacy_const_generics(2)]
25030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25031pub unsafe fn vst1q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2_t) {
25032    static_assert_uimm_bits!(LANE, 1);
25033    *a = simd_extract!(b, LANE as u32);
25034}
25035#[doc = "Store multiple 2-element structures from two registers"]
25036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"]
25037#[doc = "## Safety"]
25038#[doc = "  * Neon intrinsic unsafe"]
25039#[inline(always)]
25040#[target_feature(enable = "neon")]
25041#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25042#[cfg_attr(test, assert_instr(st1))]
25043pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) {
25044    unsafe extern "unadjusted" {
25045        #[cfg_attr(
25046            any(target_arch = "aarch64", target_arch = "arm64ec"),
25047            link_name = "llvm.aarch64.neon.st2.v1f64.p0"
25048        )]
25049        fn _vst2_f64(a: float64x1_t, b: float64x1_t, ptr: *mut i8);
25050    }
25051    _vst2_f64(b.0, b.1, a as _)
25052}
25053#[doc = "Store multiple 2-element structures from two registers"]
25054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"]
25055#[doc = "## Safety"]
25056#[doc = "  * Neon intrinsic unsafe"]
25057#[inline(always)]
25058#[target_feature(enable = "neon")]
25059#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25060#[rustc_legacy_const_generics(2)]
25061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25062pub unsafe fn vst2_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x2_t) {
25063    static_assert!(LANE == 0);
25064    unsafe extern "unadjusted" {
25065        #[cfg_attr(
25066            any(target_arch = "aarch64", target_arch = "arm64ec"),
25067            link_name = "llvm.aarch64.neon.st2lane.v1f64.p0"
25068        )]
25069        fn _vst2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8);
25070    }
25071    _vst2_lane_f64(b.0, b.1, LANE as i64, a as _)
25072}
25073#[doc = "Store multiple 2-element structures from two registers"]
25074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"]
25075#[doc = "## Safety"]
25076#[doc = "  * Neon intrinsic unsafe"]
25077#[inline(always)]
25078#[target_feature(enable = "neon")]
25079#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25080#[rustc_legacy_const_generics(2)]
25081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25082pub unsafe fn vst2_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x2_t) {
25083    static_assert!(LANE == 0);
25084    unsafe extern "unadjusted" {
25085        #[cfg_attr(
25086            any(target_arch = "aarch64", target_arch = "arm64ec"),
25087            link_name = "llvm.aarch64.neon.st2lane.v1i64.p0"
25088        )]
25089        fn _vst2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8);
25090    }
25091    _vst2_lane_s64(b.0, b.1, LANE as i64, a as _)
25092}
25093#[doc = "Store multiple 2-element structures from two registers"]
25094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"]
25095#[doc = "## Safety"]
25096#[doc = "  * Neon intrinsic unsafe"]
25097#[inline(always)]
25098#[target_feature(enable = "neon,aes")]
25099#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25100#[rustc_legacy_const_generics(2)]
25101#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25102pub unsafe fn vst2_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x2_t) {
25103    static_assert!(LANE == 0);
25104    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
25105}
25106#[doc = "Store multiple 2-element structures from two registers"]
25107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"]
25108#[doc = "## Safety"]
25109#[doc = "  * Neon intrinsic unsafe"]
25110#[inline(always)]
25111#[target_feature(enable = "neon")]
25112#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25113#[rustc_legacy_const_generics(2)]
25114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25115pub unsafe fn vst2_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x2_t) {
25116    static_assert!(LANE == 0);
25117    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
25118}
25119#[doc = "Store multiple 2-element structures from two registers"]
25120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"]
25121#[doc = "## Safety"]
25122#[doc = "  * Neon intrinsic unsafe"]
25123#[inline(always)]
25124#[target_feature(enable = "neon")]
25125#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25126#[cfg_attr(test, assert_instr(st2))]
25127pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) {
25128    unsafe extern "unadjusted" {
25129        #[cfg_attr(
25130            any(target_arch = "aarch64", target_arch = "arm64ec"),
25131            link_name = "llvm.aarch64.neon.st2.v2f64.p0"
25132        )]
25133        fn _vst2q_f64(a: float64x2_t, b: float64x2_t, ptr: *mut i8);
25134    }
25135    _vst2q_f64(b.0, b.1, a as _)
25136}
25137#[doc = "Store multiple 2-element structures from two registers"]
25138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"]
25139#[doc = "## Safety"]
25140#[doc = "  * Neon intrinsic unsafe"]
25141#[inline(always)]
25142#[target_feature(enable = "neon")]
25143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25144#[cfg_attr(test, assert_instr(st2))]
25145pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) {
25146    unsafe extern "unadjusted" {
25147        #[cfg_attr(
25148            any(target_arch = "aarch64", target_arch = "arm64ec"),
25149            link_name = "llvm.aarch64.neon.st2.v2i64.p0"
25150        )]
25151        fn _vst2q_s64(a: int64x2_t, b: int64x2_t, ptr: *mut i8);
25152    }
25153    _vst2q_s64(b.0, b.1, a as _)
25154}
25155#[doc = "Store multiple 2-element structures from two registers"]
25156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"]
25157#[doc = "## Safety"]
25158#[doc = "  * Neon intrinsic unsafe"]
25159#[inline(always)]
25160#[target_feature(enable = "neon")]
25161#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25162#[rustc_legacy_const_generics(2)]
25163#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25164pub unsafe fn vst2q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x2_t) {
25165    static_assert_uimm_bits!(LANE, 1);
25166    unsafe extern "unadjusted" {
25167        #[cfg_attr(
25168            any(target_arch = "aarch64", target_arch = "arm64ec"),
25169            link_name = "llvm.aarch64.neon.st2lane.v2f64.p0"
25170        )]
25171        fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8);
25172    }
25173    _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _)
25174}
25175#[doc = "Store multiple 2-element structures from two registers"]
25176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"]
25177#[doc = "## Safety"]
25178#[doc = "  * Neon intrinsic unsafe"]
25179#[inline(always)]
25180#[target_feature(enable = "neon")]
25181#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25182#[rustc_legacy_const_generics(2)]
25183#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25184pub unsafe fn vst2q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x2_t) {
25185    static_assert_uimm_bits!(LANE, 4);
25186    unsafe extern "unadjusted" {
25187        #[cfg_attr(
25188            any(target_arch = "aarch64", target_arch = "arm64ec"),
25189            link_name = "llvm.aarch64.neon.st2lane.v16i8.p0"
25190        )]
25191        fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8);
25192    }
25193    _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _)
25194}
25195#[doc = "Store multiple 2-element structures from two registers"]
25196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"]
25197#[doc = "## Safety"]
25198#[doc = "  * Neon intrinsic unsafe"]
25199#[inline(always)]
25200#[target_feature(enable = "neon")]
25201#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25202#[rustc_legacy_const_generics(2)]
25203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25204pub unsafe fn vst2q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x2_t) {
25205    static_assert_uimm_bits!(LANE, 1);
25206    unsafe extern "unadjusted" {
25207        #[cfg_attr(
25208            any(target_arch = "aarch64", target_arch = "arm64ec"),
25209            link_name = "llvm.aarch64.neon.st2lane.v2i64.p0"
25210        )]
25211        fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8);
25212    }
25213    _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _)
25214}
25215#[doc = "Store multiple 2-element structures from two registers"]
25216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"]
25217#[doc = "## Safety"]
25218#[doc = "  * Neon intrinsic unsafe"]
25219#[inline(always)]
25220#[target_feature(enable = "neon,aes")]
25221#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25222#[rustc_legacy_const_generics(2)]
25223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25224pub unsafe fn vst2q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x2_t) {
25225    static_assert_uimm_bits!(LANE, 1);
25226    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
25227}
25228#[doc = "Store multiple 2-element structures from two registers"]
25229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"]
25230#[doc = "## Safety"]
25231#[doc = "  * Neon intrinsic unsafe"]
25232#[inline(always)]
25233#[target_feature(enable = "neon")]
25234#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25235#[rustc_legacy_const_generics(2)]
25236#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25237pub unsafe fn vst2q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x2_t) {
25238    static_assert_uimm_bits!(LANE, 4);
25239    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
25240}
25241#[doc = "Store multiple 2-element structures from two registers"]
25242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"]
25243#[doc = "## Safety"]
25244#[doc = "  * Neon intrinsic unsafe"]
25245#[inline(always)]
25246#[target_feature(enable = "neon")]
25247#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25248#[rustc_legacy_const_generics(2)]
25249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25250pub unsafe fn vst2q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x2_t) {
25251    static_assert_uimm_bits!(LANE, 1);
25252    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
25253}
25254#[doc = "Store multiple 2-element structures from two registers"]
25255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"]
25256#[doc = "## Safety"]
25257#[doc = "  * Neon intrinsic unsafe"]
25258#[inline(always)]
25259#[target_feature(enable = "neon")]
25260#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25261#[rustc_legacy_const_generics(2)]
25262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25263pub unsafe fn vst2q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x2_t) {
25264    static_assert_uimm_bits!(LANE, 4);
25265    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
25266}
25267#[doc = "Store multiple 2-element structures from two registers"]
25268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"]
25269#[doc = "## Safety"]
25270#[doc = "  * Neon intrinsic unsafe"]
25271#[inline(always)]
25272#[target_feature(enable = "neon,aes")]
25273#[cfg_attr(test, assert_instr(st2))]
25274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25275pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) {
25276    vst2q_s64(transmute(a), transmute(b))
25277}
25278#[doc = "Store multiple 2-element structures from two registers"]
25279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"]
25280#[doc = "## Safety"]
25281#[doc = "  * Neon intrinsic unsafe"]
25282#[inline(always)]
25283#[target_feature(enable = "neon")]
25284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25285#[cfg_attr(test, assert_instr(st2))]
25286pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) {
25287    vst2q_s64(transmute(a), transmute(b))
25288}
25289#[doc = "Store multiple 3-element structures from three registers"]
25290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"]
25291#[doc = "## Safety"]
25292#[doc = "  * Neon intrinsic unsafe"]
25293#[inline(always)]
25294#[target_feature(enable = "neon")]
25295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25296#[cfg_attr(test, assert_instr(nop))]
25297pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) {
25298    unsafe extern "unadjusted" {
25299        #[cfg_attr(
25300            any(target_arch = "aarch64", target_arch = "arm64ec"),
25301            link_name = "llvm.aarch64.neon.st3.v1f64.p0"
25302        )]
25303        fn _vst3_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut i8);
25304    }
25305    _vst3_f64(b.0, b.1, b.2, a as _)
25306}
25307#[doc = "Store multiple 3-element structures from three registers"]
25308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"]
25309#[doc = "## Safety"]
25310#[doc = "  * Neon intrinsic unsafe"]
25311#[inline(always)]
25312#[target_feature(enable = "neon")]
25313#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25314#[rustc_legacy_const_generics(2)]
25315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25316pub unsafe fn vst3_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x3_t) {
25317    static_assert!(LANE == 0);
25318    unsafe extern "unadjusted" {
25319        #[cfg_attr(
25320            any(target_arch = "aarch64", target_arch = "arm64ec"),
25321            link_name = "llvm.aarch64.neon.st3lane.v1f64.p0"
25322        )]
25323        fn _vst3_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8);
25324    }
25325    _vst3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
25326}
25327#[doc = "Store multiple 3-element structures from three registers"]
25328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"]
25329#[doc = "## Safety"]
25330#[doc = "  * Neon intrinsic unsafe"]
25331#[inline(always)]
25332#[target_feature(enable = "neon")]
25333#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25334#[rustc_legacy_const_generics(2)]
25335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25336pub unsafe fn vst3_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x3_t) {
25337    static_assert!(LANE == 0);
25338    unsafe extern "unadjusted" {
25339        #[cfg_attr(
25340            any(target_arch = "aarch64", target_arch = "arm64ec"),
25341            link_name = "llvm.aarch64.neon.st3lane.v1i64.p0"
25342        )]
25343        fn _vst3_lane_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8);
25344    }
25345    _vst3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
25346}
25347#[doc = "Store multiple 3-element structures from three registers"]
25348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"]
25349#[doc = "## Safety"]
25350#[doc = "  * Neon intrinsic unsafe"]
25351#[inline(always)]
25352#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25353#[target_feature(enable = "neon,aes")]
25354#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25355#[rustc_legacy_const_generics(2)]
25356pub unsafe fn vst3_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x3_t) {
25357    static_assert!(LANE == 0);
25358    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
25359}
25360#[doc = "Store multiple 3-element structures from three registers"]
25361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"]
25362#[doc = "## Safety"]
25363#[doc = "  * Neon intrinsic unsafe"]
25364#[inline(always)]
25365#[target_feature(enable = "neon")]
25366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25367#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25368#[rustc_legacy_const_generics(2)]
25369pub unsafe fn vst3_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x3_t) {
25370    static_assert!(LANE == 0);
25371    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
25372}
25373#[doc = "Store multiple 3-element structures from three registers"]
25374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"]
25375#[doc = "## Safety"]
25376#[doc = "  * Neon intrinsic unsafe"]
25377#[inline(always)]
25378#[target_feature(enable = "neon")]
25379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25380#[cfg_attr(test, assert_instr(st3))]
25381pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) {
25382    unsafe extern "unadjusted" {
25383        #[cfg_attr(
25384            any(target_arch = "aarch64", target_arch = "arm64ec"),
25385            link_name = "llvm.aarch64.neon.st3.v2f64.p0"
25386        )]
25387        fn _vst3q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut i8);
25388    }
25389    _vst3q_f64(b.0, b.1, b.2, a as _)
25390}
25391#[doc = "Store multiple 3-element structures from three registers"]
25392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"]
25393#[doc = "## Safety"]
25394#[doc = "  * Neon intrinsic unsafe"]
25395#[inline(always)]
25396#[target_feature(enable = "neon")]
25397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25398#[cfg_attr(test, assert_instr(st3))]
25399pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) {
25400    unsafe extern "unadjusted" {
25401        #[cfg_attr(
25402            any(target_arch = "aarch64", target_arch = "arm64ec"),
25403            link_name = "llvm.aarch64.neon.st3.v2i64.p0"
25404        )]
25405        fn _vst3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i8);
25406    }
25407    _vst3q_s64(b.0, b.1, b.2, a as _)
25408}
25409#[doc = "Store multiple 3-element structures from three registers"]
25410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"]
25411#[doc = "## Safety"]
25412#[doc = "  * Neon intrinsic unsafe"]
25413#[inline(always)]
25414#[target_feature(enable = "neon")]
25415#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25416#[rustc_legacy_const_generics(2)]
25417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25418pub unsafe fn vst3q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x3_t) {
25419    static_assert_uimm_bits!(LANE, 1);
25420    unsafe extern "unadjusted" {
25421        #[cfg_attr(
25422            any(target_arch = "aarch64", target_arch = "arm64ec"),
25423            link_name = "llvm.aarch64.neon.st3lane.v2f64.p0"
25424        )]
25425        fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8);
25426    }
25427    _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
25428}
25429#[doc = "Store multiple 3-element structures from three registers"]
25430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"]
25431#[doc = "## Safety"]
25432#[doc = "  * Neon intrinsic unsafe"]
25433#[inline(always)]
25434#[target_feature(enable = "neon")]
25435#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25436#[rustc_legacy_const_generics(2)]
25437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25438pub unsafe fn vst3q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x3_t) {
25439    static_assert_uimm_bits!(LANE, 4);
25440    unsafe extern "unadjusted" {
25441        #[cfg_attr(
25442            any(target_arch = "aarch64", target_arch = "arm64ec"),
25443            link_name = "llvm.aarch64.neon.st3lane.v16i8.p0"
25444        )]
25445        fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8);
25446    }
25447    _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
25448}
25449#[doc = "Store multiple 3-element structures from three registers"]
25450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"]
25451#[doc = "## Safety"]
25452#[doc = "  * Neon intrinsic unsafe"]
25453#[inline(always)]
25454#[target_feature(enable = "neon")]
25455#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25456#[rustc_legacy_const_generics(2)]
25457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25458pub unsafe fn vst3q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x3_t) {
25459    static_assert_uimm_bits!(LANE, 1);
25460    unsafe extern "unadjusted" {
25461        #[cfg_attr(
25462            any(target_arch = "aarch64", target_arch = "arm64ec"),
25463            link_name = "llvm.aarch64.neon.st3lane.v2i64.p0"
25464        )]
25465        fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8);
25466    }
25467    _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
25468}
25469#[doc = "Store multiple 3-element structures from three registers"]
25470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"]
25471#[doc = "## Safety"]
25472#[doc = "  * Neon intrinsic unsafe"]
25473#[inline(always)]
25474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25475#[target_feature(enable = "neon,aes")]
25476#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25477#[rustc_legacy_const_generics(2)]
25478pub unsafe fn vst3q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x3_t) {
25479    static_assert_uimm_bits!(LANE, 1);
25480    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
25481}
25482#[doc = "Store multiple 3-element structures from three registers"]
25483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"]
25484#[doc = "## Safety"]
25485#[doc = "  * Neon intrinsic unsafe"]
25486#[inline(always)]
25487#[target_feature(enable = "neon")]
25488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25489#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25490#[rustc_legacy_const_generics(2)]
25491pub unsafe fn vst3q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x3_t) {
25492    static_assert_uimm_bits!(LANE, 4);
25493    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
25494}
25495#[doc = "Store multiple 3-element structures from three registers"]
25496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"]
25497#[doc = "## Safety"]
25498#[doc = "  * Neon intrinsic unsafe"]
25499#[inline(always)]
25500#[target_feature(enable = "neon")]
25501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25502#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25503#[rustc_legacy_const_generics(2)]
25504pub unsafe fn vst3q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x3_t) {
25505    static_assert_uimm_bits!(LANE, 1);
25506    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
25507}
25508#[doc = "Store multiple 3-element structures from three registers"]
25509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"]
25510#[doc = "## Safety"]
25511#[doc = "  * Neon intrinsic unsafe"]
25512#[inline(always)]
25513#[target_feature(enable = "neon")]
25514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25515#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25516#[rustc_legacy_const_generics(2)]
25517pub unsafe fn vst3q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x3_t) {
25518    static_assert_uimm_bits!(LANE, 4);
25519    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
25520}
25521#[doc = "Store multiple 3-element structures from three registers"]
25522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"]
25523#[doc = "## Safety"]
25524#[doc = "  * Neon intrinsic unsafe"]
25525#[inline(always)]
25526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25527#[target_feature(enable = "neon,aes")]
25528#[cfg_attr(test, assert_instr(st3))]
25529pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) {
25530    vst3q_s64(transmute(a), transmute(b))
25531}
25532#[doc = "Store multiple 3-element structures from three registers"]
25533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"]
25534#[doc = "## Safety"]
25535#[doc = "  * Neon intrinsic unsafe"]
25536#[inline(always)]
25537#[target_feature(enable = "neon")]
25538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25539#[cfg_attr(test, assert_instr(st3))]
25540pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) {
25541    vst3q_s64(transmute(a), transmute(b))
25542}
25543#[doc = "Store multiple 4-element structures from four registers"]
25544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"]
25545#[doc = "## Safety"]
25546#[doc = "  * Neon intrinsic unsafe"]
25547#[inline(always)]
25548#[target_feature(enable = "neon")]
25549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25550#[cfg_attr(test, assert_instr(nop))]
25551pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) {
25552    unsafe extern "unadjusted" {
25553        #[cfg_attr(
25554            any(target_arch = "aarch64", target_arch = "arm64ec"),
25555            link_name = "llvm.aarch64.neon.st4.v1f64.p0"
25556        )]
25557        fn _vst4_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, ptr: *mut i8);
25558    }
25559    _vst4_f64(b.0, b.1, b.2, b.3, a as _)
25560}
25561#[doc = "Store multiple 4-element structures from four registers"]
25562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"]
25563#[doc = "## Safety"]
25564#[doc = "  * Neon intrinsic unsafe"]
25565#[inline(always)]
25566#[target_feature(enable = "neon")]
25567#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25568#[rustc_legacy_const_generics(2)]
25569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25570pub unsafe fn vst4_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x4_t) {
25571    static_assert!(LANE == 0);
25572    unsafe extern "unadjusted" {
25573        #[cfg_attr(
25574            any(target_arch = "aarch64", target_arch = "arm64ec"),
25575            link_name = "llvm.aarch64.neon.st4lane.v1f64.p0"
25576        )]
25577        fn _vst4_lane_f64(
25578            a: float64x1_t,
25579            b: float64x1_t,
25580            c: float64x1_t,
25581            d: float64x1_t,
25582            n: i64,
25583            ptr: *mut i8,
25584        );
25585    }
25586    _vst4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25587}
25588#[doc = "Store multiple 4-element structures from four registers"]
25589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"]
25590#[doc = "## Safety"]
25591#[doc = "  * Neon intrinsic unsafe"]
25592#[inline(always)]
25593#[target_feature(enable = "neon")]
25594#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25595#[rustc_legacy_const_generics(2)]
25596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25597pub unsafe fn vst4_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x4_t) {
25598    static_assert!(LANE == 0);
25599    unsafe extern "unadjusted" {
25600        #[cfg_attr(
25601            any(target_arch = "aarch64", target_arch = "arm64ec"),
25602            link_name = "llvm.aarch64.neon.st4lane.v1i64.p0"
25603        )]
25604        fn _vst4_lane_s64(
25605            a: int64x1_t,
25606            b: int64x1_t,
25607            c: int64x1_t,
25608            d: int64x1_t,
25609            n: i64,
25610            ptr: *mut i8,
25611        );
25612    }
25613    _vst4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25614}
25615#[doc = "Store multiple 4-element structures from four registers"]
25616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"]
25617#[doc = "## Safety"]
25618#[doc = "  * Neon intrinsic unsafe"]
25619#[inline(always)]
25620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25621#[target_feature(enable = "neon,aes")]
25622#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25623#[rustc_legacy_const_generics(2)]
25624pub unsafe fn vst4_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x4_t) {
25625    static_assert!(LANE == 0);
25626    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
25627}
25628#[doc = "Store multiple 4-element structures from four registers"]
25629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"]
25630#[doc = "## Safety"]
25631#[doc = "  * Neon intrinsic unsafe"]
25632#[inline(always)]
25633#[target_feature(enable = "neon")]
25634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25635#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25636#[rustc_legacy_const_generics(2)]
25637pub unsafe fn vst4_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x4_t) {
25638    static_assert!(LANE == 0);
25639    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
25640}
25641#[doc = "Store multiple 4-element structures from four registers"]
25642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"]
25643#[doc = "## Safety"]
25644#[doc = "  * Neon intrinsic unsafe"]
25645#[inline(always)]
25646#[target_feature(enable = "neon")]
25647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25648#[cfg_attr(test, assert_instr(st4))]
25649pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) {
25650    unsafe extern "unadjusted" {
25651        #[cfg_attr(
25652            any(target_arch = "aarch64", target_arch = "arm64ec"),
25653            link_name = "llvm.aarch64.neon.st4.v2f64.p0"
25654        )]
25655        fn _vst4q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut i8);
25656    }
25657    _vst4q_f64(b.0, b.1, b.2, b.3, a as _)
25658}
25659#[doc = "Store multiple 4-element structures from four registers"]
25660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"]
25661#[doc = "## Safety"]
25662#[doc = "  * Neon intrinsic unsafe"]
25663#[inline(always)]
25664#[target_feature(enable = "neon")]
25665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25666#[cfg_attr(test, assert_instr(st4))]
25667pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) {
25668    unsafe extern "unadjusted" {
25669        #[cfg_attr(
25670            any(target_arch = "aarch64", target_arch = "arm64ec"),
25671            link_name = "llvm.aarch64.neon.st4.v2i64.p0"
25672        )]
25673        fn _vst4q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i8);
25674    }
25675    _vst4q_s64(b.0, b.1, b.2, b.3, a as _)
25676}
25677#[doc = "Store multiple 4-element structures from four registers"]
25678#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"]
25679#[doc = "## Safety"]
25680#[doc = "  * Neon intrinsic unsafe"]
25681#[inline(always)]
25682#[target_feature(enable = "neon")]
25683#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25684#[rustc_legacy_const_generics(2)]
25685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25686pub unsafe fn vst4q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x4_t) {
25687    static_assert_uimm_bits!(LANE, 1);
25688    unsafe extern "unadjusted" {
25689        #[cfg_attr(
25690            any(target_arch = "aarch64", target_arch = "arm64ec"),
25691            link_name = "llvm.aarch64.neon.st4lane.v2f64.p0"
25692        )]
25693        fn _vst4q_lane_f64(
25694            a: float64x2_t,
25695            b: float64x2_t,
25696            c: float64x2_t,
25697            d: float64x2_t,
25698            n: i64,
25699            ptr: *mut i8,
25700        );
25701    }
25702    _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25703}
25704#[doc = "Store multiple 4-element structures from four registers"]
25705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"]
25706#[doc = "## Safety"]
25707#[doc = "  * Neon intrinsic unsafe"]
25708#[inline(always)]
25709#[target_feature(enable = "neon")]
25710#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25711#[rustc_legacy_const_generics(2)]
25712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25713pub unsafe fn vst4q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x4_t) {
25714    static_assert_uimm_bits!(LANE, 4);
25715    unsafe extern "unadjusted" {
25716        #[cfg_attr(
25717            any(target_arch = "aarch64", target_arch = "arm64ec"),
25718            link_name = "llvm.aarch64.neon.st4lane.v16i8.p0"
25719        )]
25720        fn _vst4q_lane_s8(
25721            a: int8x16_t,
25722            b: int8x16_t,
25723            c: int8x16_t,
25724            d: int8x16_t,
25725            n: i64,
25726            ptr: *mut i8,
25727        );
25728    }
25729    _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25730}
25731#[doc = "Store multiple 4-element structures from four registers"]
25732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"]
25733#[doc = "## Safety"]
25734#[doc = "  * Neon intrinsic unsafe"]
25735#[inline(always)]
25736#[target_feature(enable = "neon")]
25737#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25738#[rustc_legacy_const_generics(2)]
25739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25740pub unsafe fn vst4q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x4_t) {
25741    static_assert_uimm_bits!(LANE, 1);
25742    unsafe extern "unadjusted" {
25743        #[cfg_attr(
25744            any(target_arch = "aarch64", target_arch = "arm64ec"),
25745            link_name = "llvm.aarch64.neon.st4lane.v2i64.p0"
25746        )]
25747        fn _vst4q_lane_s64(
25748            a: int64x2_t,
25749            b: int64x2_t,
25750            c: int64x2_t,
25751            d: int64x2_t,
25752            n: i64,
25753            ptr: *mut i8,
25754        );
25755    }
25756    _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25757}
25758#[doc = "Store multiple 4-element structures from four registers"]
25759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"]
25760#[doc = "## Safety"]
25761#[doc = "  * Neon intrinsic unsafe"]
25762#[inline(always)]
25763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25764#[target_feature(enable = "neon,aes")]
25765#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25766#[rustc_legacy_const_generics(2)]
25767pub unsafe fn vst4q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x4_t) {
25768    static_assert_uimm_bits!(LANE, 1);
25769    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
25770}
25771#[doc = "Store multiple 4-element structures from four registers"]
25772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"]
25773#[doc = "## Safety"]
25774#[doc = "  * Neon intrinsic unsafe"]
25775#[inline(always)]
25776#[target_feature(enable = "neon")]
25777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25778#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25779#[rustc_legacy_const_generics(2)]
25780pub unsafe fn vst4q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x4_t) {
25781    static_assert_uimm_bits!(LANE, 4);
25782    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
25783}
25784#[doc = "Store multiple 4-element structures from four registers"]
25785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"]
25786#[doc = "## Safety"]
25787#[doc = "  * Neon intrinsic unsafe"]
25788#[inline(always)]
25789#[target_feature(enable = "neon")]
25790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25791#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25792#[rustc_legacy_const_generics(2)]
25793pub unsafe fn vst4q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x4_t) {
25794    static_assert_uimm_bits!(LANE, 1);
25795    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
25796}
25797#[doc = "Store multiple 4-element structures from four registers"]
25798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"]
25799#[doc = "## Safety"]
25800#[doc = "  * Neon intrinsic unsafe"]
25801#[inline(always)]
25802#[target_feature(enable = "neon")]
25803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25804#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25805#[rustc_legacy_const_generics(2)]
25806pub unsafe fn vst4q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x4_t) {
25807    static_assert_uimm_bits!(LANE, 4);
25808    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
25809}
25810#[doc = "Store multiple 4-element structures from four registers"]
25811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"]
25812#[doc = "## Safety"]
25813#[doc = "  * Neon intrinsic unsafe"]
25814#[inline(always)]
25815#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25816#[target_feature(enable = "neon,aes")]
25817#[cfg_attr(test, assert_instr(st4))]
25818pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) {
25819    vst4q_s64(transmute(a), transmute(b))
25820}
25821#[doc = "Store multiple 4-element structures from four registers"]
25822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"]
25823#[doc = "## Safety"]
25824#[doc = "  * Neon intrinsic unsafe"]
25825#[inline(always)]
25826#[target_feature(enable = "neon")]
25827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25828#[cfg_attr(test, assert_instr(st4))]
25829pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) {
25830    vst4q_s64(transmute(a), transmute(b))
25831}
25832#[doc = "Store-Release a single-element structure from one lane of one register."]
25833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_f64)"]
25834#[inline(always)]
25835#[target_feature(enable = "neon,rcpc3")]
25836#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25837#[rustc_legacy_const_generics(2)]
25838#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25839#[cfg(target_has_atomic = "64")]
25840pub fn vstl1_lane_f64<const LANE: i32>(ptr: *mut f64, val: float64x1_t) {
25841    static_assert!(LANE == 0);
25842    unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25843}
25844#[doc = "Store-Release a single-element structure from one lane of one register."]
25845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_f64)"]
25846#[inline(always)]
25847#[target_feature(enable = "neon,rcpc3")]
25848#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25849#[rustc_legacy_const_generics(2)]
25850#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25851#[cfg(target_has_atomic = "64")]
25852pub fn vstl1q_lane_f64<const LANE: i32>(ptr: *mut f64, val: float64x2_t) {
25853    static_assert_uimm_bits!(LANE, 1);
25854    unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25855}
25856#[doc = "Store-Release a single-element structure from one lane of one register."]
25857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_u64)"]
25858#[inline(always)]
25859#[target_feature(enable = "neon,rcpc3")]
25860#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25861#[rustc_legacy_const_generics(2)]
25862#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25863#[cfg(target_has_atomic = "64")]
25864pub fn vstl1_lane_u64<const LANE: i32>(ptr: *mut u64, val: uint64x1_t) {
25865    static_assert!(LANE == 0);
25866    unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25867}
25868#[doc = "Store-Release a single-element structure from one lane of one register."]
25869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_u64)"]
25870#[inline(always)]
25871#[target_feature(enable = "neon,rcpc3")]
25872#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25873#[rustc_legacy_const_generics(2)]
25874#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25875#[cfg(target_has_atomic = "64")]
25876pub fn vstl1q_lane_u64<const LANE: i32>(ptr: *mut u64, val: uint64x2_t) {
25877    static_assert_uimm_bits!(LANE, 1);
25878    unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25879}
25880#[doc = "Store-Release a single-element structure from one lane of one register."]
25881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_p64)"]
25882#[inline(always)]
25883#[target_feature(enable = "neon,rcpc3")]
25884#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25885#[rustc_legacy_const_generics(2)]
25886#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25887#[cfg(target_has_atomic = "64")]
25888pub fn vstl1_lane_p64<const LANE: i32>(ptr: *mut p64, val: poly64x1_t) {
25889    static_assert!(LANE == 0);
25890    unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25891}
25892#[doc = "Store-Release a single-element structure from one lane of one register."]
25893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_p64)"]
25894#[inline(always)]
25895#[target_feature(enable = "neon,rcpc3")]
25896#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25897#[rustc_legacy_const_generics(2)]
25898#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25899#[cfg(target_has_atomic = "64")]
25900pub fn vstl1q_lane_p64<const LANE: i32>(ptr: *mut p64, val: poly64x2_t) {
25901    static_assert_uimm_bits!(LANE, 1);
25902    unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25903}
25904#[doc = "Store-Release a single-element structure from one lane of one register."]
25905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_s64)"]
25906#[inline(always)]
25907#[target_feature(enable = "neon,rcpc3")]
25908#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25909#[rustc_legacy_const_generics(2)]
25910#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25911#[cfg(target_has_atomic = "64")]
25912pub fn vstl1_lane_s64<const LANE: i32>(ptr: *mut i64, val: int64x1_t) {
25913    static_assert!(LANE == 0);
25914    let atomic_dst = ptr as *mut crate::sync::atomic::AtomicI64;
25915    unsafe {
25916        let lane: i64 = simd_extract!(val, LANE as u32);
25917        (*atomic_dst).store(transmute(lane), crate::sync::atomic::Ordering::Release)
25918    }
25919}
25920#[doc = "Store-Release a single-element structure from one lane of one register."]
25921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_s64)"]
25922#[inline(always)]
25923#[target_feature(enable = "neon,rcpc3")]
25924#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25925#[rustc_legacy_const_generics(2)]
25926#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25927#[cfg(target_has_atomic = "64")]
25928pub fn vstl1q_lane_s64<const LANE: i32>(ptr: *mut i64, val: int64x2_t) {
25929    static_assert_uimm_bits!(LANE, 1);
25930    let atomic_dst = ptr as *mut crate::sync::atomic::AtomicI64;
25931    unsafe {
25932        let lane: i64 = simd_extract!(val, LANE as u32);
25933        (*atomic_dst).store(transmute(lane), crate::sync::atomic::Ordering::Release)
25934    }
25935}
25936#[doc = "Subtract"]
25937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"]
25938#[inline(always)]
25939#[target_feature(enable = "neon")]
25940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25941#[cfg_attr(test, assert_instr(fsub))]
25942pub fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
25943    unsafe { simd_sub(a, b) }
25944}
25945#[doc = "Subtract"]
25946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"]
25947#[inline(always)]
25948#[target_feature(enable = "neon")]
25949#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25950#[cfg_attr(test, assert_instr(fsub))]
25951pub fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
25952    unsafe { simd_sub(a, b) }
25953}
25954#[doc = "Subtract"]
25955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"]
25956#[inline(always)]
25957#[target_feature(enable = "neon")]
25958#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25959#[cfg_attr(test, assert_instr(sub))]
25960pub fn vsubd_s64(a: i64, b: i64) -> i64 {
25961    a.wrapping_sub(b)
25962}
25963#[doc = "Subtract"]
25964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"]
25965#[inline(always)]
25966#[target_feature(enable = "neon")]
25967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25968#[cfg_attr(test, assert_instr(sub))]
25969pub fn vsubd_u64(a: u64, b: u64) -> u64 {
25970    a.wrapping_sub(b)
25971}
25972#[doc = "Subtract"]
25973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubh_f16)"]
25974#[inline(always)]
25975#[target_feature(enable = "neon,fp16")]
25976#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25977#[cfg(not(target_arch = "arm64ec"))]
25978#[cfg_attr(test, assert_instr(fsub))]
25979pub fn vsubh_f16(a: f16, b: f16) -> f16 {
25980    a - b
25981}
25982#[doc = "Signed Subtract Long"]
25983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"]
25984#[inline(always)]
25985#[target_feature(enable = "neon")]
25986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25987#[cfg_attr(test, assert_instr(ssubl2))]
25988pub fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
25989    unsafe {
25990        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
25991        let d: int16x8_t = simd_cast(c);
25992        let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
25993        let f: int16x8_t = simd_cast(e);
25994        simd_sub(d, f)
25995    }
25996}
25997#[doc = "Signed Subtract Long"]
25998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"]
25999#[inline(always)]
26000#[target_feature(enable = "neon")]
26001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26002#[cfg_attr(test, assert_instr(ssubl2))]
26003pub fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
26004    unsafe {
26005        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
26006        let d: int32x4_t = simd_cast(c);
26007        let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
26008        let f: int32x4_t = simd_cast(e);
26009        simd_sub(d, f)
26010    }
26011}
26012#[doc = "Signed Subtract Long"]
26013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"]
26014#[inline(always)]
26015#[target_feature(enable = "neon")]
26016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26017#[cfg_attr(test, assert_instr(ssubl2))]
26018pub fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
26019    unsafe {
26020        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
26021        let d: int64x2_t = simd_cast(c);
26022        let e: int32x2_t = simd_shuffle!(b, b, [2, 3]);
26023        let f: int64x2_t = simd_cast(e);
26024        simd_sub(d, f)
26025    }
26026}
26027#[doc = "Unsigned Subtract Long"]
26028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"]
26029#[inline(always)]
26030#[target_feature(enable = "neon")]
26031#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26032#[cfg_attr(test, assert_instr(usubl2))]
26033pub fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
26034    unsafe {
26035        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
26036        let d: uint16x8_t = simd_cast(c);
26037        let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
26038        let f: uint16x8_t = simd_cast(e);
26039        simd_sub(d, f)
26040    }
26041}
26042#[doc = "Unsigned Subtract Long"]
26043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"]
26044#[inline(always)]
26045#[target_feature(enable = "neon")]
26046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26047#[cfg_attr(test, assert_instr(usubl2))]
26048pub fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
26049    unsafe {
26050        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
26051        let d: uint32x4_t = simd_cast(c);
26052        let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
26053        let f: uint32x4_t = simd_cast(e);
26054        simd_sub(d, f)
26055    }
26056}
26057#[doc = "Unsigned Subtract Long"]
26058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"]
26059#[inline(always)]
26060#[target_feature(enable = "neon")]
26061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26062#[cfg_attr(test, assert_instr(usubl2))]
26063pub fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
26064    unsafe {
26065        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
26066        let d: uint64x2_t = simd_cast(c);
26067        let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
26068        let f: uint64x2_t = simd_cast(e);
26069        simd_sub(d, f)
26070    }
26071}
26072#[doc = "Signed Subtract Wide"]
26073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"]
26074#[inline(always)]
26075#[target_feature(enable = "neon")]
26076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26077#[cfg_attr(test, assert_instr(ssubw2))]
26078pub fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
26079    unsafe {
26080        let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
26081        simd_sub(a, simd_cast(c))
26082    }
26083}
26084#[doc = "Signed Subtract Wide"]
26085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"]
26086#[inline(always)]
26087#[target_feature(enable = "neon")]
26088#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26089#[cfg_attr(test, assert_instr(ssubw2))]
26090pub fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
26091    unsafe {
26092        let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
26093        simd_sub(a, simd_cast(c))
26094    }
26095}
26096#[doc = "Signed Subtract Wide"]
26097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"]
26098#[inline(always)]
26099#[target_feature(enable = "neon")]
26100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26101#[cfg_attr(test, assert_instr(ssubw2))]
26102pub fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
26103    unsafe {
26104        let c: int32x2_t = simd_shuffle!(b, b, [2, 3]);
26105        simd_sub(a, simd_cast(c))
26106    }
26107}
26108#[doc = "Unsigned Subtract Wide"]
26109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"]
26110#[inline(always)]
26111#[target_feature(enable = "neon")]
26112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26113#[cfg_attr(test, assert_instr(usubw2))]
26114pub fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
26115    unsafe {
26116        let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
26117        simd_sub(a, simd_cast(c))
26118    }
26119}
26120#[doc = "Unsigned Subtract Wide"]
26121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"]
26122#[inline(always)]
26123#[target_feature(enable = "neon")]
26124#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26125#[cfg_attr(test, assert_instr(usubw2))]
26126pub fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
26127    unsafe {
26128        let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
26129        simd_sub(a, simd_cast(c))
26130    }
26131}
26132#[doc = "Unsigned Subtract Wide"]
26133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"]
26134#[inline(always)]
26135#[target_feature(enable = "neon")]
26136#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26137#[cfg_attr(test, assert_instr(usubw2))]
26138pub fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
26139    unsafe {
26140        let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
26141        simd_sub(a, simd_cast(c))
26142    }
26143}
26144#[doc = "Table look-up"]
26145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"]
26146#[inline(always)]
26147#[target_feature(enable = "neon")]
26148#[cfg_attr(test, assert_instr(tbl))]
26149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26150pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
26151    vqtbl1_s8(vcombine_s8(a, unsafe { crate::mem::zeroed() }), unsafe {
26152        {
26153            transmute(b)
26154        }
26155    })
26156}
26157#[doc = "Table look-up"]
26158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"]
26159#[inline(always)]
26160#[target_feature(enable = "neon")]
26161#[cfg_attr(test, assert_instr(tbl))]
26162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26163pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
26164    vqtbl1_u8(vcombine_u8(a, unsafe { crate::mem::zeroed() }), b)
26165}
26166#[doc = "Table look-up"]
26167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"]
26168#[inline(always)]
26169#[target_feature(enable = "neon")]
26170#[cfg_attr(test, assert_instr(tbl))]
26171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26172pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t {
26173    vqtbl1_p8(vcombine_p8(a, unsafe { crate::mem::zeroed() }), b)
26174}
26175#[doc = "Table look-up"]
26176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"]
26177#[inline(always)]
26178#[target_feature(enable = "neon")]
26179#[cfg_attr(test, assert_instr(tbl))]
26180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26181pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t {
26182    unsafe { vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)) }
26183}
26184#[doc = "Table look-up"]
26185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
26186#[inline(always)]
26187#[target_feature(enable = "neon")]
26188#[cfg_attr(test, assert_instr(tbl))]
26189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26190pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
26191    unsafe { transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)) }
26192}
26193#[doc = "Table look-up"]
26194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
26195#[inline(always)]
26196#[target_feature(enable = "neon")]
26197#[cfg_attr(test, assert_instr(tbl))]
26198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26199pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
26200    unsafe { transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)) }
26201}
26202#[doc = "Table look-up"]
26203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"]
26204#[inline(always)]
26205#[target_feature(enable = "neon")]
26206#[cfg_attr(test, assert_instr(tbl))]
26207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26208pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t {
26209    let x = int8x16x2_t(
26210        vcombine_s8(a.0, a.1),
26211        vcombine_s8(a.2, unsafe { crate::mem::zeroed() }),
26212    );
26213    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
26214}
26215#[doc = "Table look-up"]
26216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
26217#[inline(always)]
26218#[target_feature(enable = "neon")]
26219#[cfg_attr(test, assert_instr(tbl))]
26220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26221pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
26222    let x = uint8x16x2_t(
26223        vcombine_u8(a.0, a.1),
26224        vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
26225    );
26226    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
26227}
26228#[doc = "Table look-up"]
26229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
26230#[inline(always)]
26231#[target_feature(enable = "neon")]
26232#[cfg_attr(test, assert_instr(tbl))]
26233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26234pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
26235    let x = poly8x16x2_t(
26236        vcombine_p8(a.0, a.1),
26237        vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
26238    );
26239    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
26240}
26241#[doc = "Table look-up"]
26242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"]
26243#[inline(always)]
26244#[target_feature(enable = "neon")]
26245#[cfg_attr(test, assert_instr(tbl))]
26246#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26247pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t {
26248    let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3));
26249    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
26250}
26251#[doc = "Table look-up"]
26252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
26253#[inline(always)]
26254#[target_feature(enable = "neon")]
26255#[cfg_attr(test, assert_instr(tbl))]
26256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26257pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
26258    let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
26259    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
26260}
26261#[doc = "Table look-up"]
26262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
26263#[inline(always)]
26264#[target_feature(enable = "neon")]
26265#[cfg_attr(test, assert_instr(tbl))]
26266#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26267pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
26268    let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
26269    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
26270}
26271#[doc = "Extended table look-up"]
26272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"]
26273#[inline(always)]
26274#[target_feature(enable = "neon")]
26275#[cfg_attr(test, assert_instr(tbx))]
26276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26277pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
26278    unsafe {
26279        simd_select(
26280            simd_lt::<int8x8_t, int8x8_t>(c, transmute(i8x8::splat(8))),
26281            transmute(vqtbx1(
26282                transmute(a),
26283                transmute(vcombine_s8(b, crate::mem::zeroed())),
26284                transmute(c),
26285            )),
26286            a,
26287        )
26288    }
26289}
26290#[doc = "Extended table look-up"]
26291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"]
26292#[inline(always)]
26293#[target_feature(enable = "neon")]
26294#[cfg_attr(test, assert_instr(tbx))]
26295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26296pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
26297    unsafe {
26298        simd_select(
26299            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
26300            transmute(vqtbx1(
26301                transmute(a),
26302                transmute(vcombine_u8(b, crate::mem::zeroed())),
26303                c,
26304            )),
26305            a,
26306        )
26307    }
26308}
26309#[doc = "Extended table look-up"]
26310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"]
26311#[inline(always)]
26312#[target_feature(enable = "neon")]
26313#[cfg_attr(test, assert_instr(tbx))]
26314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26315pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t {
26316    unsafe {
26317        simd_select(
26318            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
26319            transmute(vqtbx1(
26320                transmute(a),
26321                transmute(vcombine_p8(b, crate::mem::zeroed())),
26322                c,
26323            )),
26324            a,
26325        )
26326    }
26327}
26328#[doc = "Extended table look-up"]
26329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"]
26330#[inline(always)]
26331#[target_feature(enable = "neon")]
26332#[cfg_attr(test, assert_instr(tbx))]
26333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26334pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t {
26335    unsafe { vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)) }
26336}
26337#[doc = "Extended table look-up"]
26338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
26339#[inline(always)]
26340#[target_feature(enable = "neon")]
26341#[cfg_attr(test, assert_instr(tbx))]
26342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26343pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
26344    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)) }
26345}
26346#[doc = "Extended table look-up"]
26347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
26348#[inline(always)]
26349#[target_feature(enable = "neon")]
26350#[cfg_attr(test, assert_instr(tbx))]
26351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26352pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
26353    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)) }
26354}
26355#[doc = "Extended table look-up"]
26356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"]
26357#[inline(always)]
26358#[target_feature(enable = "neon")]
26359#[cfg_attr(test, assert_instr(tbx))]
26360#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26361pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t {
26362    let x = int8x16x2_t(
26363        vcombine_s8(b.0, b.1),
26364        vcombine_s8(b.2, unsafe { crate::mem::zeroed() }),
26365    );
26366    unsafe {
26367        transmute(simd_select(
26368            simd_lt::<int8x8_t, int8x8_t>(transmute(c), transmute(i8x8::splat(24))),
26369            transmute(vqtbx2(
26370                transmute(a),
26371                transmute(x.0),
26372                transmute(x.1),
26373                transmute(c),
26374            )),
26375            a,
26376        ))
26377    }
26378}
26379#[doc = "Extended table look-up"]
26380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
26381#[inline(always)]
26382#[target_feature(enable = "neon")]
26383#[cfg_attr(test, assert_instr(tbx))]
26384#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26385pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
26386    let x = uint8x16x2_t(
26387        vcombine_u8(b.0, b.1),
26388        vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
26389    );
26390    unsafe {
26391        transmute(simd_select(
26392            simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
26393            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
26394            a,
26395        ))
26396    }
26397}
26398#[doc = "Extended table look-up"]
26399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
26400#[inline(always)]
26401#[target_feature(enable = "neon")]
26402#[cfg_attr(test, assert_instr(tbx))]
26403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26404pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
26405    let x = poly8x16x2_t(
26406        vcombine_p8(b.0, b.1),
26407        vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
26408    );
26409    unsafe {
26410        transmute(simd_select(
26411            simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
26412            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
26413            a,
26414        ))
26415    }
26416}
26417#[doc = "Extended table look-up"]
26418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"]
26419#[inline(always)]
26420#[target_feature(enable = "neon")]
26421#[cfg_attr(test, assert_instr(tbx))]
26422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26423pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t {
26424    unsafe {
26425        vqtbx2(
26426            transmute(a),
26427            transmute(vcombine_s8(b.0, b.1)),
26428            transmute(vcombine_s8(b.2, b.3)),
26429            transmute(c),
26430        )
26431    }
26432}
26433#[doc = "Extended table look-up"]
26434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
26435#[inline(always)]
26436#[target_feature(enable = "neon")]
26437#[cfg_attr(test, assert_instr(tbx))]
26438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26439pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
26440    unsafe {
26441        transmute(vqtbx2(
26442            transmute(a),
26443            transmute(vcombine_u8(b.0, b.1)),
26444            transmute(vcombine_u8(b.2, b.3)),
26445            c,
26446        ))
26447    }
26448}
26449#[doc = "Extended table look-up"]
26450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
26451#[inline(always)]
26452#[target_feature(enable = "neon")]
26453#[cfg_attr(test, assert_instr(tbx))]
26454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26455pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
26456    unsafe {
26457        transmute(vqtbx2(
26458            transmute(a),
26459            transmute(vcombine_p8(b.0, b.1)),
26460            transmute(vcombine_p8(b.2, b.3)),
26461            c,
26462        ))
26463    }
26464}
26465#[doc = "Transpose vectors"]
26466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"]
26467#[inline(always)]
26468#[target_feature(enable = "neon,fp16")]
26469#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26470#[cfg(not(target_arch = "arm64ec"))]
26471#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26472pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
26473    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26474}
26475#[doc = "Transpose vectors"]
26476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f16)"]
26477#[inline(always)]
26478#[target_feature(enable = "neon,fp16")]
26479#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26480#[cfg(not(target_arch = "arm64ec"))]
26481#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26482pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
26483    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26484}
26485#[doc = "Transpose vectors"]
26486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"]
26487#[inline(always)]
26488#[target_feature(enable = "neon")]
26489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26490#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26491pub fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
26492    unsafe { simd_shuffle!(a, b, [0, 2]) }
26493}
26494#[doc = "Transpose vectors"]
26495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"]
26496#[inline(always)]
26497#[target_feature(enable = "neon")]
26498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26499#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26500pub fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26501    unsafe { simd_shuffle!(a, b, [0, 2]) }
26502}
26503#[doc = "Transpose vectors"]
26504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"]
26505#[inline(always)]
26506#[target_feature(enable = "neon")]
26507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26508#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26509pub fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
26510    unsafe { simd_shuffle!(a, b, [0, 2]) }
26511}
26512#[doc = "Transpose vectors"]
26513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"]
26514#[inline(always)]
26515#[target_feature(enable = "neon")]
26516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26517#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26518pub fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
26519    unsafe { simd_shuffle!(a, b, [0, 2]) }
26520}
26521#[doc = "Transpose vectors"]
26522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"]
26523#[inline(always)]
26524#[target_feature(enable = "neon")]
26525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26526#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26527pub fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
26528    unsafe { simd_shuffle!(a, b, [0, 2]) }
26529}
26530#[doc = "Transpose vectors"]
26531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"]
26532#[inline(always)]
26533#[target_feature(enable = "neon")]
26534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26535#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26536pub fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
26537    unsafe { simd_shuffle!(a, b, [0, 2]) }
26538}
26539#[doc = "Transpose vectors"]
26540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"]
26541#[inline(always)]
26542#[target_feature(enable = "neon")]
26543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26544#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26545pub fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
26546    unsafe { simd_shuffle!(a, b, [0, 2]) }
26547}
26548#[doc = "Transpose vectors"]
26549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"]
26550#[inline(always)]
26551#[target_feature(enable = "neon")]
26552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26553#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26554pub fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
26555    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26556}
26557#[doc = "Transpose vectors"]
26558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"]
26559#[inline(always)]
26560#[target_feature(enable = "neon")]
26561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26562#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26563pub fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
26564    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26565}
26566#[doc = "Transpose vectors"]
26567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"]
26568#[inline(always)]
26569#[target_feature(enable = "neon")]
26570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26571#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26572pub fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
26573    unsafe {
26574        simd_shuffle!(
26575            a,
26576            b,
26577            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
26578        )
26579    }
26580}
26581#[doc = "Transpose vectors"]
26582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"]
26583#[inline(always)]
26584#[target_feature(enable = "neon")]
26585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26586#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26587pub fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
26588    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26589}
26590#[doc = "Transpose vectors"]
26591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"]
26592#[inline(always)]
26593#[target_feature(enable = "neon")]
26594#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26595#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26596pub fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
26597    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26598}
26599#[doc = "Transpose vectors"]
26600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"]
26601#[inline(always)]
26602#[target_feature(enable = "neon")]
26603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26604#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26605pub fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
26606    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26607}
26608#[doc = "Transpose vectors"]
26609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"]
26610#[inline(always)]
26611#[target_feature(enable = "neon")]
26612#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26613#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26614pub fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
26615    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26616}
26617#[doc = "Transpose vectors"]
26618#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"]
26619#[inline(always)]
26620#[target_feature(enable = "neon")]
26621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26622#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26623pub fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
26624    unsafe {
26625        simd_shuffle!(
26626            a,
26627            b,
26628            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
26629        )
26630    }
26631}
26632#[doc = "Transpose vectors"]
26633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"]
26634#[inline(always)]
26635#[target_feature(enable = "neon")]
26636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26637#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26638pub fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
26639    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26640}
26641#[doc = "Transpose vectors"]
26642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"]
26643#[inline(always)]
26644#[target_feature(enable = "neon")]
26645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26646#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26647pub fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
26648    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26649}
26650#[doc = "Transpose vectors"]
26651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"]
26652#[inline(always)]
26653#[target_feature(enable = "neon")]
26654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26655#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26656pub fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
26657    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26658}
26659#[doc = "Transpose vectors"]
26660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"]
26661#[inline(always)]
26662#[target_feature(enable = "neon")]
26663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26664#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26665pub fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
26666    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26667}
26668#[doc = "Transpose vectors"]
26669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"]
26670#[inline(always)]
26671#[target_feature(enable = "neon")]
26672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26673#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26674pub fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
26675    unsafe {
26676        simd_shuffle!(
26677            a,
26678            b,
26679            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
26680        )
26681    }
26682}
26683#[doc = "Transpose vectors"]
26684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"]
26685#[inline(always)]
26686#[target_feature(enable = "neon")]
26687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26688#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26689pub fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
26690    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26691}
26692#[doc = "Transpose vectors"]
26693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"]
26694#[inline(always)]
26695#[target_feature(enable = "neon")]
26696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26697#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26698pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
26699    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26700}
26701#[doc = "Transpose vectors"]
26702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f16)"]
26703#[inline(always)]
26704#[target_feature(enable = "neon,fp16")]
26705#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26706#[cfg(not(target_arch = "arm64ec"))]
26707#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26708pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
26709    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26710}
26711#[doc = "Transpose vectors"]
26712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f16)"]
26713#[inline(always)]
26714#[target_feature(enable = "neon,fp16")]
26715#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26716#[cfg(not(target_arch = "arm64ec"))]
26717#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26718pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
26719    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26720}
26721#[doc = "Transpose vectors"]
26722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"]
26723#[inline(always)]
26724#[target_feature(enable = "neon")]
26725#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26726#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26727pub fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
26728    unsafe { simd_shuffle!(a, b, [1, 3]) }
26729}
26730#[doc = "Transpose vectors"]
26731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"]
26732#[inline(always)]
26733#[target_feature(enable = "neon")]
26734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26735#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26736pub fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26737    unsafe { simd_shuffle!(a, b, [1, 3]) }
26738}
26739#[doc = "Transpose vectors"]
26740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"]
26741#[inline(always)]
26742#[target_feature(enable = "neon")]
26743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26744#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26745pub fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
26746    unsafe { simd_shuffle!(a, b, [1, 3]) }
26747}
26748#[doc = "Transpose vectors"]
26749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"]
26750#[inline(always)]
26751#[target_feature(enable = "neon")]
26752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26753#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26754pub fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
26755    unsafe { simd_shuffle!(a, b, [1, 3]) }
26756}
26757#[doc = "Transpose vectors"]
26758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"]
26759#[inline(always)]
26760#[target_feature(enable = "neon")]
26761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26762#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26763pub fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
26764    unsafe { simd_shuffle!(a, b, [1, 3]) }
26765}
26766#[doc = "Transpose vectors"]
26767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"]
26768#[inline(always)]
26769#[target_feature(enable = "neon")]
26770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26771#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26772pub fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
26773    unsafe { simd_shuffle!(a, b, [1, 3]) }
26774}
26775#[doc = "Transpose vectors"]
26776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"]
26777#[inline(always)]
26778#[target_feature(enable = "neon")]
26779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26780#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26781pub fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
26782    unsafe { simd_shuffle!(a, b, [1, 3]) }
26783}
26784#[doc = "Transpose vectors"]
26785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"]
26786#[inline(always)]
26787#[target_feature(enable = "neon")]
26788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26789#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26790pub fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
26791    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26792}
26793#[doc = "Transpose vectors"]
26794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"]
26795#[inline(always)]
26796#[target_feature(enable = "neon")]
26797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26798#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26799pub fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
26800    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26801}
26802#[doc = "Transpose vectors"]
26803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"]
26804#[inline(always)]
26805#[target_feature(enable = "neon")]
26806#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26807#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26808pub fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
26809    unsafe {
26810        simd_shuffle!(
26811            a,
26812            b,
26813            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
26814        )
26815    }
26816}
26817#[doc = "Transpose vectors"]
26818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"]
26819#[inline(always)]
26820#[target_feature(enable = "neon")]
26821#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26822#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26823pub fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
26824    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26825}
26826#[doc = "Transpose vectors"]
26827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"]
26828#[inline(always)]
26829#[target_feature(enable = "neon")]
26830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26831#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26832pub fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
26833    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26834}
26835#[doc = "Transpose vectors"]
26836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"]
26837#[inline(always)]
26838#[target_feature(enable = "neon")]
26839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26840#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26841pub fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
26842    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26843}
26844#[doc = "Transpose vectors"]
26845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"]
26846#[inline(always)]
26847#[target_feature(enable = "neon")]
26848#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26849#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26850pub fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
26851    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26852}
26853#[doc = "Transpose vectors"]
26854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"]
26855#[inline(always)]
26856#[target_feature(enable = "neon")]
26857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26858#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26859pub fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
26860    unsafe {
26861        simd_shuffle!(
26862            a,
26863            b,
26864            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
26865        )
26866    }
26867}
26868#[doc = "Transpose vectors"]
26869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"]
26870#[inline(always)]
26871#[target_feature(enable = "neon")]
26872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26873#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26874pub fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
26875    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26876}
26877#[doc = "Transpose vectors"]
26878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"]
26879#[inline(always)]
26880#[target_feature(enable = "neon")]
26881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26882#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26883pub fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
26884    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26885}
26886#[doc = "Transpose vectors"]
26887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"]
26888#[inline(always)]
26889#[target_feature(enable = "neon")]
26890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26891#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26892pub fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
26893    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26894}
26895#[doc = "Transpose vectors"]
26896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"]
26897#[inline(always)]
26898#[target_feature(enable = "neon")]
26899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26900#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26901pub fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
26902    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26903}
26904#[doc = "Transpose vectors"]
26905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"]
26906#[inline(always)]
26907#[target_feature(enable = "neon")]
26908#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26909#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26910pub fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
26911    unsafe {
26912        simd_shuffle!(
26913            a,
26914            b,
26915            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
26916        )
26917    }
26918}
26919#[doc = "Transpose vectors"]
26920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"]
26921#[inline(always)]
26922#[target_feature(enable = "neon")]
26923#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26924#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26925pub fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
26926    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26927}
26928#[doc = "Transpose vectors"]
26929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"]
26930#[inline(always)]
26931#[target_feature(enable = "neon")]
26932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26933#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26934pub fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
26935    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26936}
26937#[doc = "Signed compare bitwise Test bits nonzero"]
26938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"]
26939#[inline(always)]
26940#[target_feature(enable = "neon")]
26941#[cfg_attr(test, assert_instr(cmtst))]
26942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26943pub fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
26944    unsafe {
26945        let c: int64x1_t = simd_and(a, b);
26946        let d: i64x1 = i64x1::new(0);
26947        simd_ne(c, transmute(d))
26948    }
26949}
26950#[doc = "Signed compare bitwise Test bits nonzero"]
26951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"]
26952#[inline(always)]
26953#[target_feature(enable = "neon")]
26954#[cfg_attr(test, assert_instr(cmtst))]
26955#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26956pub fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
26957    unsafe {
26958        let c: int64x2_t = simd_and(a, b);
26959        let d: i64x2 = i64x2::new(0, 0);
26960        simd_ne(c, transmute(d))
26961    }
26962}
26963#[doc = "Signed compare bitwise Test bits nonzero"]
26964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"]
26965#[inline(always)]
26966#[target_feature(enable = "neon")]
26967#[cfg_attr(test, assert_instr(cmtst))]
26968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26969pub fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
26970    unsafe {
26971        let c: poly64x1_t = simd_and(a, b);
26972        let d: i64x1 = i64x1::new(0);
26973        simd_ne(c, transmute(d))
26974    }
26975}
26976#[doc = "Signed compare bitwise Test bits nonzero"]
26977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"]
26978#[inline(always)]
26979#[target_feature(enable = "neon")]
26980#[cfg_attr(test, assert_instr(cmtst))]
26981#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26982pub fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
26983    unsafe {
26984        let c: poly64x2_t = simd_and(a, b);
26985        let d: i64x2 = i64x2::new(0, 0);
26986        simd_ne(c, transmute(d))
26987    }
26988}
26989#[doc = "Unsigned compare bitwise Test bits nonzero"]
26990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"]
26991#[inline(always)]
26992#[target_feature(enable = "neon")]
26993#[cfg_attr(test, assert_instr(cmtst))]
26994#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26995pub fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
26996    unsafe {
26997        let c: uint64x1_t = simd_and(a, b);
26998        let d: u64x1 = u64x1::new(0);
26999        simd_ne(c, transmute(d))
27000    }
27001}
27002#[doc = "Unsigned compare bitwise Test bits nonzero"]
27003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"]
27004#[inline(always)]
27005#[target_feature(enable = "neon")]
27006#[cfg_attr(test, assert_instr(cmtst))]
27007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27008pub fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27009    unsafe {
27010        let c: uint64x2_t = simd_and(a, b);
27011        let d: u64x2 = u64x2::new(0, 0);
27012        simd_ne(c, transmute(d))
27013    }
27014}
27015#[doc = "Compare bitwise test bits nonzero"]
27016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"]
27017#[inline(always)]
27018#[target_feature(enable = "neon")]
27019#[cfg_attr(test, assert_instr(tst))]
27020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27021pub fn vtstd_s64(a: i64, b: i64) -> u64 {
27022    unsafe { transmute(vtst_s64(transmute(a), transmute(b))) }
27023}
27024#[doc = "Compare bitwise test bits nonzero"]
27025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"]
27026#[inline(always)]
27027#[target_feature(enable = "neon")]
27028#[cfg_attr(test, assert_instr(tst))]
27029#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27030pub fn vtstd_u64(a: u64, b: u64) -> u64 {
27031    unsafe { transmute(vtst_u64(transmute(a), transmute(b))) }
27032}
27033#[doc = "Signed saturating Accumulate of Unsigned value."]
27034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"]
27035#[inline(always)]
27036#[target_feature(enable = "neon")]
27037#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27038#[cfg_attr(test, assert_instr(suqadd))]
27039pub fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t {
27040    unsafe extern "unadjusted" {
27041        #[cfg_attr(
27042            any(target_arch = "aarch64", target_arch = "arm64ec"),
27043            link_name = "llvm.aarch64.neon.suqadd.v8i8"
27044        )]
27045        fn _vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t;
27046    }
27047    unsafe { _vuqadd_s8(a, b) }
27048}
27049#[doc = "Signed saturating Accumulate of Unsigned value."]
27050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"]
27051#[inline(always)]
27052#[target_feature(enable = "neon")]
27053#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27054#[cfg_attr(test, assert_instr(suqadd))]
27055pub fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
27056    unsafe extern "unadjusted" {
27057        #[cfg_attr(
27058            any(target_arch = "aarch64", target_arch = "arm64ec"),
27059            link_name = "llvm.aarch64.neon.suqadd.v16i8"
27060        )]
27061        fn _vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
27062    }
27063    unsafe { _vuqaddq_s8(a, b) }
27064}
27065#[doc = "Signed saturating Accumulate of Unsigned value."]
27066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"]
27067#[inline(always)]
27068#[target_feature(enable = "neon")]
27069#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27070#[cfg_attr(test, assert_instr(suqadd))]
27071pub fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t {
27072    unsafe extern "unadjusted" {
27073        #[cfg_attr(
27074            any(target_arch = "aarch64", target_arch = "arm64ec"),
27075            link_name = "llvm.aarch64.neon.suqadd.v4i16"
27076        )]
27077        fn _vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t;
27078    }
27079    unsafe { _vuqadd_s16(a, b) }
27080}
27081#[doc = "Signed saturating Accumulate of Unsigned value."]
27082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"]
27083#[inline(always)]
27084#[target_feature(enable = "neon")]
27085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27086#[cfg_attr(test, assert_instr(suqadd))]
27087pub fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t {
27088    unsafe extern "unadjusted" {
27089        #[cfg_attr(
27090            any(target_arch = "aarch64", target_arch = "arm64ec"),
27091            link_name = "llvm.aarch64.neon.suqadd.v8i16"
27092        )]
27093        fn _vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t;
27094    }
27095    unsafe { _vuqaddq_s16(a, b) }
27096}
27097#[doc = "Signed saturating Accumulate of Unsigned value."]
27098#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"]
27099#[inline(always)]
27100#[target_feature(enable = "neon")]
27101#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27102#[cfg_attr(test, assert_instr(suqadd))]
27103pub fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t {
27104    unsafe extern "unadjusted" {
27105        #[cfg_attr(
27106            any(target_arch = "aarch64", target_arch = "arm64ec"),
27107            link_name = "llvm.aarch64.neon.suqadd.v2i32"
27108        )]
27109        fn _vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t;
27110    }
27111    unsafe { _vuqadd_s32(a, b) }
27112}
27113#[doc = "Signed saturating Accumulate of Unsigned value."]
27114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"]
27115#[inline(always)]
27116#[target_feature(enable = "neon")]
27117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27118#[cfg_attr(test, assert_instr(suqadd))]
27119pub fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t {
27120    unsafe extern "unadjusted" {
27121        #[cfg_attr(
27122            any(target_arch = "aarch64", target_arch = "arm64ec"),
27123            link_name = "llvm.aarch64.neon.suqadd.v4i32"
27124        )]
27125        fn _vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t;
27126    }
27127    unsafe { _vuqaddq_s32(a, b) }
27128}
27129#[doc = "Signed saturating Accumulate of Unsigned value."]
27130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"]
27131#[inline(always)]
27132#[target_feature(enable = "neon")]
27133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27134#[cfg_attr(test, assert_instr(suqadd))]
27135pub fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t {
27136    unsafe extern "unadjusted" {
27137        #[cfg_attr(
27138            any(target_arch = "aarch64", target_arch = "arm64ec"),
27139            link_name = "llvm.aarch64.neon.suqadd.v1i64"
27140        )]
27141        fn _vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t;
27142    }
27143    unsafe { _vuqadd_s64(a, b) }
27144}
27145#[doc = "Signed saturating Accumulate of Unsigned value."]
27146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"]
27147#[inline(always)]
27148#[target_feature(enable = "neon")]
27149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27150#[cfg_attr(test, assert_instr(suqadd))]
27151pub fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t {
27152    unsafe extern "unadjusted" {
27153        #[cfg_attr(
27154            any(target_arch = "aarch64", target_arch = "arm64ec"),
27155            link_name = "llvm.aarch64.neon.suqadd.v2i64"
27156        )]
27157        fn _vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t;
27158    }
27159    unsafe { _vuqaddq_s64(a, b) }
27160}
27161#[doc = "Signed saturating accumulate of unsigned value"]
27162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"]
27163#[inline(always)]
27164#[target_feature(enable = "neon")]
27165#[cfg_attr(test, assert_instr(suqadd))]
27166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27167pub fn vuqaddb_s8(a: i8, b: u8) -> i8 {
27168    unsafe { simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) }
27169}
27170#[doc = "Signed saturating accumulate of unsigned value"]
27171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"]
27172#[inline(always)]
27173#[target_feature(enable = "neon")]
27174#[cfg_attr(test, assert_instr(suqadd))]
27175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27176pub fn vuqaddh_s16(a: i16, b: u16) -> i16 {
27177    unsafe { simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) }
27178}
27179#[doc = "Signed saturating accumulate of unsigned value"]
27180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"]
27181#[inline(always)]
27182#[target_feature(enable = "neon")]
27183#[cfg_attr(test, assert_instr(suqadd))]
27184#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27185pub fn vuqaddd_s64(a: i64, b: u64) -> i64 {
27186    unsafe extern "unadjusted" {
27187        #[cfg_attr(
27188            any(target_arch = "aarch64", target_arch = "arm64ec"),
27189            link_name = "llvm.aarch64.neon.suqadd.i64"
27190        )]
27191        fn _vuqaddd_s64(a: i64, b: u64) -> i64;
27192    }
27193    unsafe { _vuqaddd_s64(a, b) }
27194}
27195#[doc = "Signed saturating accumulate of unsigned value"]
27196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"]
27197#[inline(always)]
27198#[target_feature(enable = "neon")]
27199#[cfg_attr(test, assert_instr(suqadd))]
27200#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27201pub fn vuqadds_s32(a: i32, b: u32) -> i32 {
27202    unsafe extern "unadjusted" {
27203        #[cfg_attr(
27204            any(target_arch = "aarch64", target_arch = "arm64ec"),
27205            link_name = "llvm.aarch64.neon.suqadd.i32"
27206        )]
27207        fn _vuqadds_s32(a: i32, b: u32) -> i32;
27208    }
27209    unsafe { _vuqadds_s32(a, b) }
27210}
27211#[doc = "Unzip vectors"]
27212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f16)"]
27213#[inline(always)]
27214#[target_feature(enable = "neon,fp16")]
27215#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27216#[cfg(not(target_arch = "arm64ec"))]
27217#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27218pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27219    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27220}
27221#[doc = "Unzip vectors"]
27222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f16)"]
27223#[inline(always)]
27224#[target_feature(enable = "neon,fp16")]
27225#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27226#[cfg(not(target_arch = "arm64ec"))]
27227#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27228pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27229    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27230}
27231#[doc = "Unzip vectors"]
27232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"]
27233#[inline(always)]
27234#[target_feature(enable = "neon")]
27235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27236#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27237pub fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27238    unsafe { simd_shuffle!(a, b, [0, 2]) }
27239}
27240#[doc = "Unzip vectors"]
27241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"]
27242#[inline(always)]
27243#[target_feature(enable = "neon")]
27244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27245#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27246pub fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27247    unsafe { simd_shuffle!(a, b, [0, 2]) }
27248}
27249#[doc = "Unzip vectors"]
27250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"]
27251#[inline(always)]
27252#[target_feature(enable = "neon")]
27253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27254#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27255pub fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27256    unsafe { simd_shuffle!(a, b, [0, 2]) }
27257}
27258#[doc = "Unzip vectors"]
27259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"]
27260#[inline(always)]
27261#[target_feature(enable = "neon")]
27262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27263#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27264pub fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27265    unsafe { simd_shuffle!(a, b, [0, 2]) }
27266}
27267#[doc = "Unzip vectors"]
27268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"]
27269#[inline(always)]
27270#[target_feature(enable = "neon")]
27271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27272#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27273pub fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27274    unsafe { simd_shuffle!(a, b, [0, 2]) }
27275}
27276#[doc = "Unzip vectors"]
27277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"]
27278#[inline(always)]
27279#[target_feature(enable = "neon")]
27280#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27281#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27282pub fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27283    unsafe { simd_shuffle!(a, b, [0, 2]) }
27284}
27285#[doc = "Unzip vectors"]
27286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"]
27287#[inline(always)]
27288#[target_feature(enable = "neon")]
27289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27290#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27291pub fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27292    unsafe { simd_shuffle!(a, b, [0, 2]) }
27293}
27294#[doc = "Unzip vectors"]
27295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"]
27296#[inline(always)]
27297#[target_feature(enable = "neon")]
27298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27299#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27300pub fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27301    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27302}
27303#[doc = "Unzip vectors"]
27304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"]
27305#[inline(always)]
27306#[target_feature(enable = "neon")]
27307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27308#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27309pub fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27310    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27311}
27312#[doc = "Unzip vectors"]
27313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"]
27314#[inline(always)]
27315#[target_feature(enable = "neon")]
27316#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27317#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27318pub fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27319    unsafe {
27320        simd_shuffle!(
27321            a,
27322            b,
27323            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
27324        )
27325    }
27326}
27327#[doc = "Unzip vectors"]
27328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"]
27329#[inline(always)]
27330#[target_feature(enable = "neon")]
27331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27332#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27333pub fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27334    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27335}
27336#[doc = "Unzip vectors"]
27337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"]
27338#[inline(always)]
27339#[target_feature(enable = "neon")]
27340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27341#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27342pub fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27343    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27344}
27345#[doc = "Unzip vectors"]
27346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"]
27347#[inline(always)]
27348#[target_feature(enable = "neon")]
27349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27350#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27351pub fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27352    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27353}
27354#[doc = "Unzip vectors"]
27355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"]
27356#[inline(always)]
27357#[target_feature(enable = "neon")]
27358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27359#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27360pub fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27361    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27362}
27363#[doc = "Unzip vectors"]
27364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"]
27365#[inline(always)]
27366#[target_feature(enable = "neon")]
27367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27368#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27369pub fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27370    unsafe {
27371        simd_shuffle!(
27372            a,
27373            b,
27374            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
27375        )
27376    }
27377}
27378#[doc = "Unzip vectors"]
27379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"]
27380#[inline(always)]
27381#[target_feature(enable = "neon")]
27382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27383#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27384pub fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27385    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27386}
27387#[doc = "Unzip vectors"]
27388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"]
27389#[inline(always)]
27390#[target_feature(enable = "neon")]
27391#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27392#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27393pub fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27394    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27395}
27396#[doc = "Unzip vectors"]
27397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"]
27398#[inline(always)]
27399#[target_feature(enable = "neon")]
27400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27401#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27402pub fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27403    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27404}
27405#[doc = "Unzip vectors"]
27406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"]
27407#[inline(always)]
27408#[target_feature(enable = "neon")]
27409#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27410#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27411pub fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27412    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27413}
27414#[doc = "Unzip vectors"]
27415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"]
27416#[inline(always)]
27417#[target_feature(enable = "neon")]
27418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27419#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27420pub fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27421    unsafe {
27422        simd_shuffle!(
27423            a,
27424            b,
27425            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
27426        )
27427    }
27428}
27429#[doc = "Unzip vectors"]
27430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"]
27431#[inline(always)]
27432#[target_feature(enable = "neon")]
27433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27434#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27435pub fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27436    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27437}
27438#[doc = "Unzip vectors"]
27439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"]
27440#[inline(always)]
27441#[target_feature(enable = "neon")]
27442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27443#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27444pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27445    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27446}
27447#[doc = "Unzip vectors"]
27448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f16)"]
27449#[inline(always)]
27450#[target_feature(enable = "neon,fp16")]
27451#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27452#[cfg(not(target_arch = "arm64ec"))]
27453#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27454pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27455    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27456}
27457#[doc = "Unzip vectors"]
27458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f16)"]
27459#[inline(always)]
27460#[target_feature(enable = "neon,fp16")]
27461#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27462#[cfg(not(target_arch = "arm64ec"))]
27463#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27464pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27465    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27466}
27467#[doc = "Unzip vectors"]
27468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"]
27469#[inline(always)]
27470#[target_feature(enable = "neon")]
27471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27472#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27473pub fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27474    unsafe { simd_shuffle!(a, b, [1, 3]) }
27475}
27476#[doc = "Unzip vectors"]
27477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"]
27478#[inline(always)]
27479#[target_feature(enable = "neon")]
27480#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27481#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27482pub fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27483    unsafe { simd_shuffle!(a, b, [1, 3]) }
27484}
27485#[doc = "Unzip vectors"]
27486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"]
27487#[inline(always)]
27488#[target_feature(enable = "neon")]
27489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27490#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27491pub fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27492    unsafe { simd_shuffle!(a, b, [1, 3]) }
27493}
27494#[doc = "Unzip vectors"]
27495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"]
27496#[inline(always)]
27497#[target_feature(enable = "neon")]
27498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27499#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27500pub fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27501    unsafe { simd_shuffle!(a, b, [1, 3]) }
27502}
27503#[doc = "Unzip vectors"]
27504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"]
27505#[inline(always)]
27506#[target_feature(enable = "neon")]
27507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27508#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27509pub fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27510    unsafe { simd_shuffle!(a, b, [1, 3]) }
27511}
27512#[doc = "Unzip vectors"]
27513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"]
27514#[inline(always)]
27515#[target_feature(enable = "neon")]
27516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27517#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27518pub fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27519    unsafe { simd_shuffle!(a, b, [1, 3]) }
27520}
27521#[doc = "Unzip vectors"]
27522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"]
27523#[inline(always)]
27524#[target_feature(enable = "neon")]
27525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27526#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27527pub fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27528    unsafe { simd_shuffle!(a, b, [1, 3]) }
27529}
27530#[doc = "Unzip vectors"]
27531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"]
27532#[inline(always)]
27533#[target_feature(enable = "neon")]
27534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27535#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27536pub fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27537    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27538}
27539#[doc = "Unzip vectors"]
27540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"]
27541#[inline(always)]
27542#[target_feature(enable = "neon")]
27543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27544#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27545pub fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27546    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27547}
27548#[doc = "Unzip vectors"]
27549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"]
27550#[inline(always)]
27551#[target_feature(enable = "neon")]
27552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27553#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27554pub fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27555    unsafe {
27556        simd_shuffle!(
27557            a,
27558            b,
27559            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
27560        )
27561    }
27562}
27563#[doc = "Unzip vectors"]
27564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"]
27565#[inline(always)]
27566#[target_feature(enable = "neon")]
27567#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27568#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27569pub fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27570    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27571}
27572#[doc = "Unzip vectors"]
27573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"]
27574#[inline(always)]
27575#[target_feature(enable = "neon")]
27576#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27577#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27578pub fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27579    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27580}
27581#[doc = "Unzip vectors"]
27582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"]
27583#[inline(always)]
27584#[target_feature(enable = "neon")]
27585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27586#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27587pub fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27588    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27589}
27590#[doc = "Unzip vectors"]
27591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"]
27592#[inline(always)]
27593#[target_feature(enable = "neon")]
27594#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27595#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27596pub fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27597    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27598}
27599#[doc = "Unzip vectors"]
27600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"]
27601#[inline(always)]
27602#[target_feature(enable = "neon")]
27603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27604#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27605pub fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27606    unsafe {
27607        simd_shuffle!(
27608            a,
27609            b,
27610            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
27611        )
27612    }
27613}
27614#[doc = "Unzip vectors"]
27615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"]
27616#[inline(always)]
27617#[target_feature(enable = "neon")]
27618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27619#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27620pub fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27621    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27622}
27623#[doc = "Unzip vectors"]
27624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"]
27625#[inline(always)]
27626#[target_feature(enable = "neon")]
27627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27628#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27629pub fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27630    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27631}
27632#[doc = "Unzip vectors"]
27633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"]
27634#[inline(always)]
27635#[target_feature(enable = "neon")]
27636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27637#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27638pub fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27639    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27640}
27641#[doc = "Unzip vectors"]
27642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"]
27643#[inline(always)]
27644#[target_feature(enable = "neon")]
27645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27646#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27647pub fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27648    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27649}
27650#[doc = "Unzip vectors"]
27651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"]
27652#[inline(always)]
27653#[target_feature(enable = "neon")]
27654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27655#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27656pub fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27657    unsafe {
27658        simd_shuffle!(
27659            a,
27660            b,
27661            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
27662        )
27663    }
27664}
27665#[doc = "Unzip vectors"]
27666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"]
27667#[inline(always)]
27668#[target_feature(enable = "neon")]
27669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27670#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27671pub fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27672    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27673}
27674#[doc = "Unzip vectors"]
27675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"]
27676#[inline(always)]
27677#[target_feature(enable = "neon")]
27678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27679#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27680pub fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27681    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27682}
27683#[doc = "Exclusive OR and rotate"]
27684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"]
27685#[inline(always)]
27686#[target_feature(enable = "neon,sha3")]
27687#[cfg_attr(test, assert_instr(xar, IMM6 = 0))]
27688#[rustc_legacy_const_generics(2)]
27689#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
27690pub fn vxarq_u64<const IMM6: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27691    static_assert_uimm_bits!(IMM6, 6);
27692    unsafe extern "unadjusted" {
27693        #[cfg_attr(
27694            any(target_arch = "aarch64", target_arch = "arm64ec"),
27695            link_name = "llvm.aarch64.crypto.xar"
27696        )]
27697        fn _vxarq_u64(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t;
27698    }
27699    unsafe { _vxarq_u64(a, b, IMM6 as i64) }
27700}
27701#[doc = "Zip vectors"]
27702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"]
27703#[inline(always)]
27704#[target_feature(enable = "neon,fp16")]
27705#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27706#[cfg(not(target_arch = "arm64ec"))]
27707#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27708pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27709    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27710}
27711#[doc = "Zip vectors"]
27712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f16)"]
27713#[inline(always)]
27714#[target_feature(enable = "neon,fp16")]
27715#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27716#[cfg(not(target_arch = "arm64ec"))]
27717#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27718pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27719    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27720}
27721#[doc = "Zip vectors"]
27722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"]
27723#[inline(always)]
27724#[target_feature(enable = "neon")]
27725#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27726#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27727pub fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27728    unsafe { simd_shuffle!(a, b, [0, 2]) }
27729}
27730#[doc = "Zip vectors"]
27731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"]
27732#[inline(always)]
27733#[target_feature(enable = "neon")]
27734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27735#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27736pub fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27737    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27738}
27739#[doc = "Zip vectors"]
27740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"]
27741#[inline(always)]
27742#[target_feature(enable = "neon")]
27743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27744#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27745pub fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27746    unsafe { simd_shuffle!(a, b, [0, 2]) }
27747}
27748#[doc = "Zip vectors"]
27749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"]
27750#[inline(always)]
27751#[target_feature(enable = "neon")]
27752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27753#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27754pub fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27755    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27756}
27757#[doc = "Zip vectors"]
27758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"]
27759#[inline(always)]
27760#[target_feature(enable = "neon")]
27761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27762#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27763pub fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27764    unsafe {
27765        simd_shuffle!(
27766            a,
27767            b,
27768            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
27769        )
27770    }
27771}
27772#[doc = "Zip vectors"]
27773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"]
27774#[inline(always)]
27775#[target_feature(enable = "neon")]
27776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27777#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27778pub fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27779    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27780}
27781#[doc = "Zip vectors"]
27782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"]
27783#[inline(always)]
27784#[target_feature(enable = "neon")]
27785#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27786#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27787pub fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27788    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27789}
27790#[doc = "Zip vectors"]
27791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"]
27792#[inline(always)]
27793#[target_feature(enable = "neon")]
27794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27795#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27796pub fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27797    unsafe { simd_shuffle!(a, b, [0, 2]) }
27798}
27799#[doc = "Zip vectors"]
27800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"]
27801#[inline(always)]
27802#[target_feature(enable = "neon")]
27803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27804#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27805pub fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27806    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27807}
27808#[doc = "Zip vectors"]
27809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"]
27810#[inline(always)]
27811#[target_feature(enable = "neon")]
27812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27813#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27814pub fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27815    unsafe { simd_shuffle!(a, b, [0, 2]) }
27816}
27817#[doc = "Zip vectors"]
27818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"]
27819#[inline(always)]
27820#[target_feature(enable = "neon")]
27821#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27822#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27823pub fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27824    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27825}
27826#[doc = "Zip vectors"]
27827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"]
27828#[inline(always)]
27829#[target_feature(enable = "neon")]
27830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27831#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27832pub fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27833    unsafe {
27834        simd_shuffle!(
27835            a,
27836            b,
27837            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
27838        )
27839    }
27840}
27841#[doc = "Zip vectors"]
27842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"]
27843#[inline(always)]
27844#[target_feature(enable = "neon")]
27845#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27846#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27847pub fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27848    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27849}
27850#[doc = "Zip vectors"]
27851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"]
27852#[inline(always)]
27853#[target_feature(enable = "neon")]
27854#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27855#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27856pub fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27857    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27858}
27859#[doc = "Zip vectors"]
27860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"]
27861#[inline(always)]
27862#[target_feature(enable = "neon")]
27863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27864#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27865pub fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27866    unsafe { simd_shuffle!(a, b, [0, 2]) }
27867}
27868#[doc = "Zip vectors"]
27869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"]
27870#[inline(always)]
27871#[target_feature(enable = "neon")]
27872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27873#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27874pub fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27875    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27876}
27877#[doc = "Zip vectors"]
27878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"]
27879#[inline(always)]
27880#[target_feature(enable = "neon")]
27881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27882#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27883pub fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27884    unsafe { simd_shuffle!(a, b, [0, 2]) }
27885}
27886#[doc = "Zip vectors"]
27887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"]
27888#[inline(always)]
27889#[target_feature(enable = "neon")]
27890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27891#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27892pub fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27893    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27894}
27895#[doc = "Zip vectors"]
27896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"]
27897#[inline(always)]
27898#[target_feature(enable = "neon")]
27899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27900#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27901pub fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27902    unsafe {
27903        simd_shuffle!(
27904            a,
27905            b,
27906            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
27907        )
27908    }
27909}
27910#[doc = "Zip vectors"]
27911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"]
27912#[inline(always)]
27913#[target_feature(enable = "neon")]
27914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27915#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27916pub fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27917    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27918}
27919#[doc = "Zip vectors"]
27920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"]
27921#[inline(always)]
27922#[target_feature(enable = "neon")]
27923#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27924#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27925pub fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27926    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27927}
27928#[doc = "Zip vectors"]
27929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"]
27930#[inline(always)]
27931#[target_feature(enable = "neon")]
27932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27933#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27934pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27935    unsafe { simd_shuffle!(a, b, [0, 2]) }
27936}
27937#[doc = "Zip vectors"]
27938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f16)"]
27939#[inline(always)]
27940#[target_feature(enable = "neon,fp16")]
27941#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27942#[cfg(not(target_arch = "arm64ec"))]
27943#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27944pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27945    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27946}
27947#[doc = "Zip vectors"]
27948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f16)"]
27949#[inline(always)]
27950#[target_feature(enable = "neon,fp16")]
27951#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27952#[cfg(not(target_arch = "arm64ec"))]
27953#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27954pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27955    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27956}
27957#[doc = "Zip vectors"]
27958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"]
27959#[inline(always)]
27960#[target_feature(enable = "neon")]
27961#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27962#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27963pub fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27964    unsafe { simd_shuffle!(a, b, [1, 3]) }
27965}
27966#[doc = "Zip vectors"]
27967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"]
27968#[inline(always)]
27969#[target_feature(enable = "neon")]
27970#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27971#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27972pub fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27973    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27974}
27975#[doc = "Zip vectors"]
27976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"]
27977#[inline(always)]
27978#[target_feature(enable = "neon")]
27979#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27980#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27981pub fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27982    unsafe { simd_shuffle!(a, b, [1, 3]) }
27983}
27984#[doc = "Zip vectors"]
27985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"]
27986#[inline(always)]
27987#[target_feature(enable = "neon")]
27988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27989#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27990pub fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27991    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27992}
27993#[doc = "Zip vectors"]
27994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"]
27995#[inline(always)]
27996#[target_feature(enable = "neon")]
27997#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27998#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27999pub fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28000    unsafe {
28001        simd_shuffle!(
28002            a,
28003            b,
28004            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
28005        )
28006    }
28007}
28008#[doc = "Zip vectors"]
28009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"]
28010#[inline(always)]
28011#[target_feature(enable = "neon")]
28012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28013#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28014pub fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28015    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28016}
28017#[doc = "Zip vectors"]
28018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"]
28019#[inline(always)]
28020#[target_feature(enable = "neon")]
28021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28022#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28023pub fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28024    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28025}
28026#[doc = "Zip vectors"]
28027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"]
28028#[inline(always)]
28029#[target_feature(enable = "neon")]
28030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28031#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28032pub fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28033    unsafe { simd_shuffle!(a, b, [1, 3]) }
28034}
28035#[doc = "Zip vectors"]
28036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"]
28037#[inline(always)]
28038#[target_feature(enable = "neon")]
28039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28040#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28041pub fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28042    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28043}
28044#[doc = "Zip vectors"]
28045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"]
28046#[inline(always)]
28047#[target_feature(enable = "neon")]
28048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28049#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28050pub fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28051    unsafe { simd_shuffle!(a, b, [1, 3]) }
28052}
28053#[doc = "Zip vectors"]
28054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"]
28055#[inline(always)]
28056#[target_feature(enable = "neon")]
28057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28058#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28059pub fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28060    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28061}
28062#[doc = "Zip vectors"]
28063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"]
28064#[inline(always)]
28065#[target_feature(enable = "neon")]
28066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28067#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28068pub fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28069    unsafe {
28070        simd_shuffle!(
28071            a,
28072            b,
28073            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
28074        )
28075    }
28076}
28077#[doc = "Zip vectors"]
28078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"]
28079#[inline(always)]
28080#[target_feature(enable = "neon")]
28081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28082#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28083pub fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28084    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28085}
28086#[doc = "Zip vectors"]
28087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"]
28088#[inline(always)]
28089#[target_feature(enable = "neon")]
28090#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28091#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28092pub fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28093    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28094}
28095#[doc = "Zip vectors"]
28096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"]
28097#[inline(always)]
28098#[target_feature(enable = "neon")]
28099#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28100#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28101pub fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28102    unsafe { simd_shuffle!(a, b, [1, 3]) }
28103}
28104#[doc = "Zip vectors"]
28105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"]
28106#[inline(always)]
28107#[target_feature(enable = "neon")]
28108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28109#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28110pub fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28111    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28112}
28113#[doc = "Zip vectors"]
28114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"]
28115#[inline(always)]
28116#[target_feature(enable = "neon")]
28117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28118#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28119pub fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28120    unsafe { simd_shuffle!(a, b, [1, 3]) }
28121}
28122#[doc = "Zip vectors"]
28123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"]
28124#[inline(always)]
28125#[target_feature(enable = "neon")]
28126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28127#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28128pub fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28129    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28130}
28131#[doc = "Zip vectors"]
28132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"]
28133#[inline(always)]
28134#[target_feature(enable = "neon")]
28135#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28136#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28137pub fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28138    unsafe {
28139        simd_shuffle!(
28140            a,
28141            b,
28142            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
28143        )
28144    }
28145}
28146#[doc = "Zip vectors"]
28147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"]
28148#[inline(always)]
28149#[target_feature(enable = "neon")]
28150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28151#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28152pub fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28153    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28154}
28155#[doc = "Zip vectors"]
28156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"]
28157#[inline(always)]
28158#[target_feature(enable = "neon")]
28159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28160#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28161pub fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28162    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28163}
28164#[doc = "Zip vectors"]
28165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"]
28166#[inline(always)]
28167#[target_feature(enable = "neon")]
28168#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28169#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28170pub fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28171    unsafe { simd_shuffle!(a, b, [1, 3]) }
28172}