11#error "Never use <avx512vlbwintrin.h> directly; include <immintrin.h> instead."
14#ifndef __AVX512VLBWINTRIN_H
15#define __AVX512VLBWINTRIN_H
18#define __DEFAULT_FN_ATTRS128 \
19 __attribute__((__always_inline__, __nodebug__, \
20 __target__("avx512vl,avx512bw,no-evex512"), \
21 __min_vector_width__(128)))
22#define __DEFAULT_FN_ATTRS256 \
23 __attribute__((__always_inline__, __nodebug__, \
24 __target__("avx512vl,avx512bw,no-evex512"), \
25 __min_vector_width__(256)))
29#define _mm_cmp_epi8_mask(a, b, p) \
30 ((__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
31 (__v16qi)(__m128i)(b), (int)(p), \
34#define _mm_mask_cmp_epi8_mask(m, a, b, p) \
35 ((__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
36 (__v16qi)(__m128i)(b), (int)(p), \
39#define _mm_cmp_epu8_mask(a, b, p) \
40 ((__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
41 (__v16qi)(__m128i)(b), (int)(p), \
44#define _mm_mask_cmp_epu8_mask(m, a, b, p) \
45 ((__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
46 (__v16qi)(__m128i)(b), (int)(p), \
49#define _mm256_cmp_epi8_mask(a, b, p) \
50 ((__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
51 (__v32qi)(__m256i)(b), (int)(p), \
54#define _mm256_mask_cmp_epi8_mask(m, a, b, p) \
55 ((__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
56 (__v32qi)(__m256i)(b), (int)(p), \
59#define _mm256_cmp_epu8_mask(a, b, p) \
60 ((__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
61 (__v32qi)(__m256i)(b), (int)(p), \
64#define _mm256_mask_cmp_epu8_mask(m, a, b, p) \
65 ((__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
66 (__v32qi)(__m256i)(b), (int)(p), \
69#define _mm_cmp_epi16_mask(a, b, p) \
70 ((__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
71 (__v8hi)(__m128i)(b), (int)(p), \
74#define _mm_mask_cmp_epi16_mask(m, a, b, p) \
75 ((__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
76 (__v8hi)(__m128i)(b), (int)(p), \
79#define _mm_cmp_epu16_mask(a, b, p) \
80 ((__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
81 (__v8hi)(__m128i)(b), (int)(p), \
84#define _mm_mask_cmp_epu16_mask(m, a, b, p) \
85 ((__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
86 (__v8hi)(__m128i)(b), (int)(p), \
89#define _mm256_cmp_epi16_mask(a, b, p) \
90 ((__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
91 (__v16hi)(__m256i)(b), (int)(p), \
94#define _mm256_mask_cmp_epi16_mask(m, a, b, p) \
95 ((__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
96 (__v16hi)(__m256i)(b), (int)(p), \
99#define _mm256_cmp_epu16_mask(a, b, p) \
100 ((__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
101 (__v16hi)(__m256i)(b), (int)(p), \
104#define _mm256_mask_cmp_epu16_mask(m, a, b, p) \
105 ((__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
106 (__v16hi)(__m256i)(b), (int)(p), \
109#define _mm_cmpeq_epi8_mask(A, B) \
110 _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
111#define _mm_mask_cmpeq_epi8_mask(k, A, B) \
112 _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_EQ)
113#define _mm_cmpge_epi8_mask(A, B) \
114 _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_GE)
115#define _mm_mask_cmpge_epi8_mask(k, A, B) \
116 _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GE)
117#define _mm_cmpgt_epi8_mask(A, B) \
118 _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_GT)
119#define _mm_mask_cmpgt_epi8_mask(k, A, B) \
120 _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GT)
121#define _mm_cmple_epi8_mask(A, B) \
122 _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_LE)
123#define _mm_mask_cmple_epi8_mask(k, A, B) \
124 _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LE)
125#define _mm_cmplt_epi8_mask(A, B) \
126 _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_LT)
127#define _mm_mask_cmplt_epi8_mask(k, A, B) \
128 _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LT)
129#define _mm_cmpneq_epi8_mask(A, B) \
130 _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_NE)
131#define _mm_mask_cmpneq_epi8_mask(k, A, B) \
132 _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_NE)
134#define _mm256_cmpeq_epi8_mask(A, B) \
135 _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
136#define _mm256_mask_cmpeq_epi8_mask(k, A, B) \
137 _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_EQ)
138#define _mm256_cmpge_epi8_mask(A, B) \
139 _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_GE)
140#define _mm256_mask_cmpge_epi8_mask(k, A, B) \
141 _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GE)
142#define _mm256_cmpgt_epi8_mask(A, B) \
143 _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_GT)
144#define _mm256_mask_cmpgt_epi8_mask(k, A, B) \
145 _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GT)
146#define _mm256_cmple_epi8_mask(A, B) \
147 _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_LE)
148#define _mm256_mask_cmple_epi8_mask(k, A, B) \
149 _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LE)
150#define _mm256_cmplt_epi8_mask(A, B) \
151 _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_LT)
152#define _mm256_mask_cmplt_epi8_mask(k, A, B) \
153 _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LT)
154#define _mm256_cmpneq_epi8_mask(A, B) \
155 _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_NE)
156#define _mm256_mask_cmpneq_epi8_mask(k, A, B) \
157 _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_NE)
159#define _mm_cmpeq_epu8_mask(A, B) \
160 _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_EQ)
161#define _mm_mask_cmpeq_epu8_mask(k, A, B) \
162 _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_EQ)
163#define _mm_cmpge_epu8_mask(A, B) \
164 _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_GE)
165#define _mm_mask_cmpge_epu8_mask(k, A, B) \
166 _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GE)
167#define _mm_cmpgt_epu8_mask(A, B) \
168 _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_GT)
169#define _mm_mask_cmpgt_epu8_mask(k, A, B) \
170 _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GT)
171#define _mm_cmple_epu8_mask(A, B) \
172 _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_LE)
173#define _mm_mask_cmple_epu8_mask(k, A, B) \
174 _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LE)
175#define _mm_cmplt_epu8_mask(A, B) \
176 _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_LT)
177#define _mm_mask_cmplt_epu8_mask(k, A, B) \
178 _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LT)
179#define _mm_cmpneq_epu8_mask(A, B) \
180 _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_NE)
181#define _mm_mask_cmpneq_epu8_mask(k, A, B) \
182 _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_NE)
184#define _mm256_cmpeq_epu8_mask(A, B) \
185 _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_EQ)
186#define _mm256_mask_cmpeq_epu8_mask(k, A, B) \
187 _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_EQ)
188#define _mm256_cmpge_epu8_mask(A, B) \
189 _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_GE)
190#define _mm256_mask_cmpge_epu8_mask(k, A, B) \
191 _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GE)
192#define _mm256_cmpgt_epu8_mask(A, B) \
193 _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_GT)
194#define _mm256_mask_cmpgt_epu8_mask(k, A, B) \
195 _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GT)
196#define _mm256_cmple_epu8_mask(A, B) \
197 _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_LE)
198#define _mm256_mask_cmple_epu8_mask(k, A, B) \
199 _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LE)
200#define _mm256_cmplt_epu8_mask(A, B) \
201 _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_LT)
202#define _mm256_mask_cmplt_epu8_mask(k, A, B) \
203 _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LT)
204#define _mm256_cmpneq_epu8_mask(A, B) \
205 _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_NE)
206#define _mm256_mask_cmpneq_epu8_mask(k, A, B) \
207 _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_NE)
209#define _mm_cmpeq_epi16_mask(A, B) \
210 _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_EQ)
211#define _mm_mask_cmpeq_epi16_mask(k, A, B) \
212 _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_EQ)
213#define _mm_cmpge_epi16_mask(A, B) \
214 _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_GE)
215#define _mm_mask_cmpge_epi16_mask(k, A, B) \
216 _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GE)
217#define _mm_cmpgt_epi16_mask(A, B) \
218 _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_GT)
219#define _mm_mask_cmpgt_epi16_mask(k, A, B) \
220 _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GT)
221#define _mm_cmple_epi16_mask(A, B) \
222 _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_LE)
223#define _mm_mask_cmple_epi16_mask(k, A, B) \
224 _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LE)
225#define _mm_cmplt_epi16_mask(A, B) \
226 _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_LT)
227#define _mm_mask_cmplt_epi16_mask(k, A, B) \
228 _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LT)
229#define _mm_cmpneq_epi16_mask(A, B) \
230 _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_NE)
231#define _mm_mask_cmpneq_epi16_mask(k, A, B) \
232 _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_NE)
234#define _mm256_cmpeq_epi16_mask(A, B) \
235 _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_EQ)
236#define _mm256_mask_cmpeq_epi16_mask(k, A, B) \
237 _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_EQ)
238#define _mm256_cmpge_epi16_mask(A, B) \
239 _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_GE)
240#define _mm256_mask_cmpge_epi16_mask(k, A, B) \
241 _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GE)
242#define _mm256_cmpgt_epi16_mask(A, B) \
243 _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_GT)
244#define _mm256_mask_cmpgt_epi16_mask(k, A, B) \
245 _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GT)
246#define _mm256_cmple_epi16_mask(A, B) \
247 _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_LE)
248#define _mm256_mask_cmple_epi16_mask(k, A, B) \
249 _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LE)
250#define _mm256_cmplt_epi16_mask(A, B) \
251 _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_LT)
252#define _mm256_mask_cmplt_epi16_mask(k, A, B) \
253 _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LT)
254#define _mm256_cmpneq_epi16_mask(A, B) \
255 _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_NE)
256#define _mm256_mask_cmpneq_epi16_mask(k, A, B) \
257 _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_NE)
259#define _mm_cmpeq_epu16_mask(A, B) \
260 _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_EQ)
261#define _mm_mask_cmpeq_epu16_mask(k, A, B) \
262 _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_EQ)
263#define _mm_cmpge_epu16_mask(A, B) \
264 _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_GE)
265#define _mm_mask_cmpge_epu16_mask(k, A, B) \
266 _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GE)
267#define _mm_cmpgt_epu16_mask(A, B) \
268 _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_GT)
269#define _mm_mask_cmpgt_epu16_mask(k, A, B) \
270 _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GT)
271#define _mm_cmple_epu16_mask(A, B) \
272 _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_LE)
273#define _mm_mask_cmple_epu16_mask(k, A, B) \
274 _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LE)
275#define _mm_cmplt_epu16_mask(A, B) \
276 _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_LT)
277#define _mm_mask_cmplt_epu16_mask(k, A, B) \
278 _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LT)
279#define _mm_cmpneq_epu16_mask(A, B) \
280 _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_NE)
281#define _mm_mask_cmpneq_epu16_mask(k, A, B) \
282 _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE)
284#define _mm256_cmpeq_epu16_mask(A, B) \
285 _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_EQ)
286#define _mm256_mask_cmpeq_epu16_mask(k, A, B) \
287 _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_EQ)
288#define _mm256_cmpge_epu16_mask(A, B) \
289 _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_GE)
290#define _mm256_mask_cmpge_epu16_mask(k, A, B) \
291 _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GE)
292#define _mm256_cmpgt_epu16_mask(A, B) \
293 _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_GT)
294#define _mm256_mask_cmpgt_epu16_mask(k, A, B) \
295 _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GT)
296#define _mm256_cmple_epu16_mask(A, B) \
297 _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_LE)
298#define _mm256_mask_cmple_epu16_mask(k, A, B) \
299 _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LE)
300#define _mm256_cmplt_epu16_mask(A, B) \
301 _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_LT)
302#define _mm256_mask_cmplt_epu16_mask(k, A, B) \
303 _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LT)
304#define _mm256_cmpneq_epu16_mask(A, B) \
305 _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_NE)
306#define _mm256_mask_cmpneq_epu16_mask(k, A, B) \
307 _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE)
311 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
318 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
325 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
332 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
339 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
346 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
353 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
360 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
367 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
374 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
381 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
388 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
395 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
402 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
409 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
416 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
423 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
430 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
437 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
444 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
452 return (__m128i) __builtin_ia32_selectb_128 ((
__mmask16) __U,
460 return (__m256i) __builtin_ia32_selectb_256 ((
__mmask32) __U,
468 return (__m128i) __builtin_ia32_selectw_128 ((
__mmask8) __U,
476 return (__m256i) __builtin_ia32_selectw_256 ((
__mmask16) __U,
484 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
492 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
500 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
508 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
516 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
524 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
532 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
540 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
547 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__M,
555 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__M,
563 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__M,
571 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__M,
579 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__M,
587 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__M,
595 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__M,
603 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__M,
611 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__M,
619 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__M,
627 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__M,
635 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__M,
643 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__M,
651 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__M,
659 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__M,
667 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__M,
675 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
683 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
691 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
699 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
707 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
715 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
723 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
731 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
739 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
747 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
755 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
763 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
771 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
779 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
787 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
795 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
803 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
811 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
819 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
827 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
835 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
843 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
851 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
859 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
867 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__M,
875 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__M,
883 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__M,
891 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__M,
899 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__M,
907 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__M,
915 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__M,
923 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__M,
931 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__M,
939 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__M,
947 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__M,
955 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__M,
963 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__M,
971 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__M,
979 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__M,
987 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__M,
995 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__M,
1003 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__M,
1011 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__M,
1019 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__M,
1027 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__M,
1035 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__M,
1043 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__M,
1051 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__M,
1059 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__M,
1067 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__M,
1075 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__M,
1083 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__M,
1091 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__M,
1099 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__M,
1107 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__M,
1115 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__M,
1123 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
1131 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
1139 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
1147 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
1155 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
1163 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
1171 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
1179 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
1187 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1195 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1203 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1211 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1219 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
1227 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
1235 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
1243 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
1251 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1259 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1267 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1275 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1283 return (__m128i)__builtin_ia32_vpermi2varhi128((__v8hi)__A, (__v8hi)__I,
1291 return (__m128i)__builtin_ia32_selectw_128(__U,
1300 return (__m128i)__builtin_ia32_selectw_128(__U,
1309 return (__m128i)__builtin_ia32_selectw_128(__U,
1317 return (__m256i)__builtin_ia32_vpermi2varhi256((__v16hi)__A, (__v16hi)__I,
1325 return (__m256i)__builtin_ia32_selectw_256(__U,
1334 return (__m256i)__builtin_ia32_selectw_256(__U,
1343 return (__m256i)__builtin_ia32_selectw_256(__U,
1350 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1357 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1365 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1372 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1379 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
1386 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
1393 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
1400 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
1407 return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
1414 return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
1421 return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
1428 return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
1435 return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
1442 return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
1449 return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
1456 return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
1463 return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
1470 return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
1477 return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
1484 return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
1491 return (__m128i)__builtin_shufflevector(
1492 __builtin_convertvector((__v8hi)__A, __v8qi),
1493 (__v8qi){0, 0, 0, 0, 0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
1499 return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
1506 return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
1514 __builtin_ia32_pmovwb128mem_mask ((__v16qi *)
__P, (__v8hi) __A, __M);
1521 __builtin_ia32_pmovswb128mem_mask ((__v16qi *)
__P, (__v8hi) __A, __M);
1527 __builtin_ia32_pmovuswb128mem_mask ((__v16qi *)
__P, (__v8hi) __A, __M);
1532 return (__m128i)__builtin_convertvector((__v16hi) __A, __v16qi);
1537 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__M,
1544 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__M,
1552 __builtin_ia32_pmovwb256mem_mask ((__v16qi *)
__P, (__v16hi) __A, __M);
1558 __builtin_ia32_pmovswb256mem_mask ((__v16qi *)
__P, (__v16hi) __A, __M);
1564 __builtin_ia32_pmovuswb256mem_mask ((__v16qi*)
__P, (__v16hi) __A, __M);
1569 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1576 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1583 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1590 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1597 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1604 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1611 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1618 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1625 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1632 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1639 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1646 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1653 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
1660 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
1667 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
1674 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
1681 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1688 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1695 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1702 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1709 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
1716 return (__m128i)__builtin_ia32_selectb_128((
__mmask16)__U,
1723 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
1730 return (__m256i)__builtin_ia32_selectb_256((
__mmask32)__U,
1737 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1744 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1751 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1758 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1766 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1774 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1782 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1790 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1799 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1807 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1815 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1823 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1829#define _mm_mask_shufflehi_epi16(W, U, A, imm) \
1830 ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
1831 (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
1832 (__v8hi)(__m128i)(W)))
1834#define _mm_maskz_shufflehi_epi16(U, A, imm) \
1835 ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
1836 (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
1837 (__v8hi)_mm_setzero_si128()))
1839#define _mm256_mask_shufflehi_epi16(W, U, A, imm) \
1840 ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
1841 (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
1842 (__v16hi)(__m256i)(W)))
1844#define _mm256_maskz_shufflehi_epi16(U, A, imm) \
1845 ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
1846 (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
1847 (__v16hi)_mm256_setzero_si256()))
1849#define _mm_mask_shufflelo_epi16(W, U, A, imm) \
1850 ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
1851 (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
1852 (__v8hi)(__m128i)(W)))
1854#define _mm_maskz_shufflelo_epi16(U, A, imm) \
1855 ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
1856 (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
1857 (__v8hi)_mm_setzero_si128()))
1859#define _mm256_mask_shufflelo_epi16(W, U, A, imm) \
1860 ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
1861 (__v16hi)_mm256_shufflelo_epi16((A), \
1863 (__v16hi)(__m256i)(W)))
1865#define _mm256_maskz_shufflelo_epi16(U, A, imm) \
1866 ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
1867 (__v16hi)_mm256_shufflelo_epi16((A), \
1869 (__v16hi)_mm256_setzero_si256()))
1874 return (__m256i)__builtin_ia32_psllv16hi((__v16hi)__A, (__v16hi)__B);
1880 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1888 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1896 return (__m128i)__builtin_ia32_psllv8hi((__v8hi)__A, (__v8hi)__B);
1902 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1910 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1918 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1926 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1934 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1942 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1950 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1958 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
1967 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1975 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1983 return (__m256i)__builtin_ia32_psrlv16hi((__v16hi)__A, (__v16hi)__B);
1989 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
1997 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
2005 return (__m128i)__builtin_ia32_psrlv8hi((__v8hi)__A, (__v8hi)__B);
2011 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
2019 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
2027 return (__m256i)__builtin_ia32_psrav16hi((__v16hi)__A, (__v16hi)__B);
2033 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
2041 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
2049 return (__m128i)__builtin_ia32_psrav8hi((__v8hi)__A, (__v8hi)__B);
2055 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
2063 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
2071 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
2079 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
2087 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
2095 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
2103 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
2111 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
2120 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
2128 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
2136 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
2144 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
2152 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
2160 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
2168 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
2176 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__U,
2184 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
2192 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__U,
2200 return (__m128i) __builtin_ia32_selectw_128 ((
__mmask8) __U,
2208 return (__m128i) __builtin_ia32_selectw_128 ((
__mmask8) __U,
2216 return (__m256i) __builtin_ia32_selectw_256 ((
__mmask16) __U,
2224 return (__m256i) __builtin_ia32_selectw_256 ((
__mmask16) __U,
2232 return (__m128i) __builtin_ia32_selectb_128 ((
__mmask16) __U,
2240 return (__m128i) __builtin_ia32_selectb_128 ((
__mmask16) __U,
2248 return (__m256i) __builtin_ia32_selectb_256 ((
__mmask32) __U,
2256 return (__m256i) __builtin_ia32_selectb_256 ((
__mmask32) __U,
2265 return (__m128i) __builtin_ia32_selectb_128(__M,
2273 return (__m128i) __builtin_ia32_selectb_128(__M,
2281 return (__m256i) __builtin_ia32_selectb_256(__M,
2289 return (__m256i) __builtin_ia32_selectb_256(__M,
2297 struct __loadu_epi16 {
2300 return ((
const struct __loadu_epi16*)
__P)->__v;
2306 return (__m128i) __builtin_ia32_loaddquhi128_mask ((
const __v8hi *)
__P,
2314 return (__m128i) __builtin_ia32_loaddquhi128_mask ((
const __v8hi *)
__P,
2323 struct __loadu_epi16 {
2326 return ((
const struct __loadu_epi16*)
__P)->__v;
2332 return (__m256i) __builtin_ia32_loaddquhi256_mask ((
const __v16hi *)
__P,
2340 return (__m256i) __builtin_ia32_loaddquhi256_mask ((
const __v16hi *)
__P,
2349 struct __loadu_epi8 {
2352 return ((
const struct __loadu_epi8*)
__P)->__v;
2358 return (__m128i) __builtin_ia32_loaddquqi128_mask ((
const __v16qi *)
__P,
2366 return (__m128i) __builtin_ia32_loaddquqi128_mask ((
const __v16qi *)
__P,
2375 struct __loadu_epi8 {
2378 return ((
const struct __loadu_epi8*)
__P)->__v;
2384 return (__m256i) __builtin_ia32_loaddquqi256_mask ((
const __v32qi *)
__P,
2392 return (__m256i) __builtin_ia32_loaddquqi256_mask ((
const __v32qi *)
__P,
2401 struct __storeu_epi16 {
2404 ((
struct __storeu_epi16*)
__P)->__v = __A;
2410 __builtin_ia32_storedquhi128_mask ((__v8hi *)
__P,
2418 struct __storeu_epi16 {
2421 ((
struct __storeu_epi16*)
__P)->__v = __A;
2427 __builtin_ia32_storedquhi256_mask ((__v16hi *)
__P,
2435 struct __storeu_epi8 {
2438 ((
struct __storeu_epi8*)
__P)->__v = __A;
2444 __builtin_ia32_storedquqi128_mask ((__v16qi *)
__P,
2452 struct __storeu_epi8 {
2455 ((
struct __storeu_epi8*)
__P)->__v = __A;
2461 __builtin_ia32_storedquqi256_mask ((__v32qi *)
__P,
2576 return (
__mmask16) __builtin_ia32_cvtb2mask128 ((__v16qi) __A);
2582 return (
__mmask32) __builtin_ia32_cvtb2mask256 ((__v32qi) __A);
2588 return (
__mmask8) __builtin_ia32_cvtw2mask128 ((__v8hi) __A);
2594 return (
__mmask16) __builtin_ia32_cvtw2mask256 ((__v16hi) __A);
2600 return (__m128i) __builtin_ia32_cvtmask2b128 (__A);
2606 return (__m256i) __builtin_ia32_cvtmask2b256 (__A);
2612 return (__m128i) __builtin_ia32_cvtmask2w128 (__A);
2618 return (__m256i) __builtin_ia32_cvtmask2w256 (__A);
2624 return (__m128i)__builtin_ia32_selectb_128(__M,
2632 return (__m128i)__builtin_ia32_selectb_128(__M,
2640 return (__m256i)__builtin_ia32_selectb_256(__M,
2648 return (__m256i)__builtin_ia32_selectb_256(__M,
2656 return (__m128i)__builtin_ia32_selectw_128(__M,
2664 return (__m128i)__builtin_ia32_selectw_128(__M,
2672 return (__m256i)__builtin_ia32_selectw_256(__M,
2680 return (__m256i)__builtin_ia32_selectw_256(__M,
2688 return (__m256i) __builtin_ia32_selectw_256 (__M,
2696 return (__m256i) __builtin_ia32_selectw_256(__M,
2704 return (__m128i) __builtin_ia32_selectw_128(__M,
2712 return (__m128i) __builtin_ia32_selectw_128(__M,
2720 return (__m128i)__builtin_ia32_permvarhi128((__v8hi) __B, (__v8hi) __A);
2726 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__M,
2735 return (__m128i)__builtin_ia32_selectw_128((
__mmask8)__M,
2743 return (__m256i)__builtin_ia32_permvarhi256((__v16hi) __B, (__v16hi) __A);
2750 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__M,
2759 return (__m256i)__builtin_ia32_selectw_256((
__mmask16)__M,
2764#define _mm_mask_alignr_epi8(W, U, A, B, N) \
2765 ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
2766 (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
2767 (__v16qi)(__m128i)(W)))
2769#define _mm_maskz_alignr_epi8(U, A, B, N) \
2770 ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
2771 (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
2772 (__v16qi)_mm_setzero_si128()))
2774#define _mm256_mask_alignr_epi8(W, U, A, B, N) \
2775 ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
2776 (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
2777 (__v32qi)(__m256i)(W)))
2779#define _mm256_maskz_alignr_epi8(U, A, B, N) \
2780 ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
2781 (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
2782 (__v32qi)_mm256_setzero_si256()))
2784#define _mm_dbsad_epu8(A, B, imm) \
2785 ((__m128i)__builtin_ia32_dbpsadbw128((__v16qi)(__m128i)(A), \
2786 (__v16qi)(__m128i)(B), (int)(imm)))
2788#define _mm_mask_dbsad_epu8(W, U, A, B, imm) \
2789 ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
2790 (__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \
2791 (__v8hi)(__m128i)(W)))
2793#define _mm_maskz_dbsad_epu8(U, A, B, imm) \
2794 ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
2795 (__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \
2796 (__v8hi)_mm_setzero_si128()))
2798#define _mm256_dbsad_epu8(A, B, imm) \
2799 ((__m256i)__builtin_ia32_dbpsadbw256((__v32qi)(__m256i)(A), \
2800 (__v32qi)(__m256i)(B), (int)(imm)))
2802#define _mm256_mask_dbsad_epu8(W, U, A, B, imm) \
2803 ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
2804 (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \
2805 (__v16hi)(__m256i)(W)))
2807#define _mm256_maskz_dbsad_epu8(U, A, B, imm) \
2808 ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
2809 (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \
2810 (__v16hi)_mm256_setzero_si256()))
2814 return __builtin_reduce_add((__v8hi)__W);
2819 return __builtin_reduce_mul((__v8hi)__W);
2824 return __builtin_reduce_and((__v8hi)__W);
2829 return __builtin_reduce_or((__v8hi)__W);
2835 return __builtin_reduce_add((__v8hi)__W);
2841 return __builtin_reduce_mul((__v8hi)__W);
2847 return __builtin_reduce_and((__v8hi)__W);
2853 return __builtin_reduce_or((__v8hi)__W);
2858 return __builtin_reduce_max((__v8hi)__V);
2863 return __builtin_reduce_max((__v8hu)__V);
2868 return __builtin_reduce_min((__v8hi)__V);
2873 return __builtin_reduce_min((__v8hu)__V);
2879 return __builtin_reduce_max((__v8hi)__V);
2885 return __builtin_reduce_max((__v8hu)__V);
2891 return __builtin_reduce_min((__v8hi)__V);
2897 return __builtin_reduce_min((__v8hu)__V);
2902 return __builtin_reduce_add((__v16hi)__W);
2907 return __builtin_reduce_mul((__v16hi)__W);
2912 return __builtin_reduce_and((__v16hi)__W);
2917 return __builtin_reduce_or((__v16hi)__W);
2923 return __builtin_reduce_add((__v16hi)__W);
2929 return __builtin_reduce_mul((__v16hi)__W);
2935 return __builtin_reduce_and((__v16hi)__W);
2941 return __builtin_reduce_or((__v16hi)__W);
2946 return __builtin_reduce_max((__v16hi)__V);
2951 return __builtin_reduce_max((__v16hu)__V);
2956 return __builtin_reduce_min((__v16hi)__V);
2961 return __builtin_reduce_min((__v16hu)__V);
2967 return __builtin_reduce_max((__v16hi)__V);
2973 return __builtin_reduce_max((__v16hu)__V);
2979 return __builtin_reduce_min((__v16hi)__V);
2985 return __builtin_reduce_min((__v16hu)__V);
2990 return __builtin_reduce_add((__v16qs)__W);
2995 return __builtin_reduce_mul((__v16qs)__W);
3000 return __builtin_reduce_and((__v16qs)__W);
3005 return __builtin_reduce_or((__v16qs)__W);
3011 return __builtin_reduce_add((__v16qs)__W);
3017 return __builtin_reduce_mul((__v16qs)__W);
3023 return __builtin_reduce_and((__v16qs)__W);
3029 return __builtin_reduce_or((__v16qs)__W);
3034 return __builtin_reduce_max((__v16qs)__V);
3039 return __builtin_reduce_max((__v16qu)__V);
3044 return __builtin_reduce_min((__v16qs)__V);
3049 return __builtin_reduce_min((__v16qu)__V);
3055 return __builtin_reduce_max((__v16qs)__V);
3061 return __builtin_reduce_max((__v16qu)__V);
3067 return __builtin_reduce_min((__v16qs)__V);
3073 return __builtin_reduce_min((__v16qu)__V);
3078 return __builtin_reduce_add((__v32qs)__W);
3083 return __builtin_reduce_mul((__v32qs)__W);
3088 return __builtin_reduce_and((__v32qs)__W);
3093 return __builtin_reduce_or((__v32qs)__W);
3099 return __builtin_reduce_add((__v32qs)__W);
3105 return __builtin_reduce_mul((__v32qs)__W);
3111 return __builtin_reduce_and((__v32qs)__W);
3117 return __builtin_reduce_or((__v32qs)__W);
3122 return __builtin_reduce_max((__v32qs)__V);
3127 return __builtin_reduce_max((__v32qu)__V);
3132 return __builtin_reduce_min((__v32qs)__V);
3137 return __builtin_reduce_min((__v32qu)__V);
3143 return __builtin_reduce_max((__v32qs)__V);
3149 return __builtin_reduce_max((__v32qu)__V);
3155 return __builtin_reduce_min((__v32qs)__V);
3161 return __builtin_reduce_min((__v32qu)__V);
3164#undef __DEFAULT_FN_ATTRS128
3165#undef __DEFAULT_FN_ATTRS256
_Float16 __2f16 __attribute__((ext_vector_type(2)))
Zeroes the upper 128 bits (bits 255:128) of all YMM registers.
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_abs_epi8(__m256i __a)
Computes the absolute value of each signed byte in the 256-bit integer vector __a and returns each va...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srl_epi16(__m256i __a, __m128i __count)
Shifts each 16-bit element of the 256-bit vector of [16 x i16] in __a right by the number of bits giv...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_and_si256(__m256i __a, __m256i __b)
Computes the bitwise AND of the 256-bit integer vectors in __a and __b.
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_max_epu16(__m256i __a, __m256i __b)
Compares the corresponding unsigned 16-bit integers in the two 256-bit vectors of [16 x i16] in __a a...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_min_epu16(__m256i __a, __m256i __b)
Compares the corresponding unsigned 16-bit integers in the two 256-bit vectors of [16 x i16] in __a a...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_packs_epi32(__m256i __a, __m256i __b)
Converts the elements of two 256-bit vectors of [8 x i32] to 16-bit integers using signed saturation,...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepi8_epi16(__m128i __V)
Sign-extends bytes from the 128-bit integer vector in __V and returns the 16-bit values in the corres...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_shuffle_epi8(__m256i __a, __m256i __b)
Shuffles 8-bit integers in the 256-bit integer vector __a according to control information in the 256...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_broadcastb_epi8(__m128i __X)
Broadcasts the low byte from the 128-bit integer vector in __X to all bytes of the 128-bit result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepu8_epi16(__m128i __V)
Zero-extends bytes from the 128-bit integer vector in __V and returns the 16-bit values in the corres...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_subs_epu8(__m256i __a, __m256i __b)
Subtracts 8-bit integers from corresponding bytes of two 256-bit integer vectors using unsigned satur...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_min_epi8(__m256i __a, __m256i __b)
Compares the corresponding signed bytes in the two 256-bit integer vectors in __a and __b and returns...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sub_epi16(__m256i __a, __m256i __b)
Subtracts 16-bit integers from corresponding elements of two 256-bit vectors of [16 x i16].
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_max_epi8(__m256i __a, __m256i __b)
Compares the corresponding signed bytes in the two 256-bit integer vectors in __a and __b and returns...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maddubs_epi16(__m256i __a, __m256i __b)
Multiplies each unsigned byte from the 256-bit integer vector in __a with the corresponding signed by...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_unpackhi_epi8(__m256i __a, __m256i __b)
Unpacks and interleaves 8-bit integers from parts of the 256-bit integer vectors in __a and __b to fo...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_adds_epu16(__m256i __a, __m256i __b)
Adds 16-bit integers from corresponding elements of two 256-bit vectors of [16 x i16] using unsigned ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mulhrs_epi16(__m256i __a, __m256i __b)
Multiplies signed 16-bit integer elements of two 256-bit vectors of [16 x i16], truncates the 32-bit ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sll_epi16(__m256i __a, __m128i __count)
Shifts each 16-bit element of the 256-bit vector of [16 x i16] in __a left by the number of bits spec...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_add_epi8(__m256i __a, __m256i __b)
Adds 8-bit integers from corresponding bytes of two 256-bit integer vectors and returns the lower 8 b...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_add_epi16(__m256i __a, __m256i __b)
Adds 16-bit integers from corresponding elements of two 256-bit vectors of [16 x i16] and returns the...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_subs_epi8(__m256i __a, __m256i __b)
Subtracts 8-bit integers from corresponding bytes of two 256-bit integer vectors using signed saturat...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_max_epu8(__m256i __a, __m256i __b)
Compares the corresponding unsigned bytes in the two 256-bit integer vectors in __a and __b and retur...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_min_epi16(__m256i __a, __m256i __b)
Compares the corresponding signed 16-bit integers in the two 256-bit vectors of [16 x i16] in __a and...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_max_epi16(__m256i __a, __m256i __b)
Compares the corresponding signed 16-bit integers in the two 256-bit vectors of [16 x i16] in __a and...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_broadcastb_epi8(__m128i __X)
Broadcasts the low byte from the 128-bit integer vector in __X to all bytes of the 256-bit result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_unpackhi_epi16(__m256i __a, __m256i __b)
Unpacks and interleaves 16-bit integers from parts of the 256-bit vectors of [16 x i16] in __a and __...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mulhi_epi16(__m256i __a, __m256i __b)
Multiplies signed 16-bit integer elements of two 256-bit vectors of [16 x i16], and returns the upper...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_packs_epi16(__m256i __a, __m256i __b)
Converts the elements of two 256-bit vectors of [16 x i16] to 8-bit integers using signed saturation,...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_broadcastw_epi16(__m128i __X)
Broadcasts the low element from the 128-bit vector of [8 x i16] in __X to all elements of the result'...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sub_epi8(__m256i __a, __m256i __b)
Subtracts 8-bit integers from corresponding bytes of two 256-bit integer vectors.
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mulhi_epu16(__m256i __a, __m256i __b)
Multiplies unsigned 16-bit integer elements of two 256-bit vectors of [16 x i16], and returns the upp...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_slli_epi16(__m256i __a, int __count)
Shifts each 16-bit element of the 256-bit vector of [16 x i16] in __a left by __count bits,...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_madd_epi16(__m256i __a, __m256i __b)
Multiplies corresponding 16-bit elements of two 256-bit vectors of [16 x i16], forming 32-bit interme...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_avg_epu16(__m256i __a, __m256i __b)
Computes the averages of the corresponding unsigned 16-bit integers in the two 256-bit vectors of [16...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_min_epu8(__m256i __a, __m256i __b)
Compares the corresponding unsigned bytes in the two 256-bit integer vectors in __a and __b and retur...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_packus_epi32(__m256i __V1, __m256i __V2)
Converts elements from two 256-bit vectors of [8 x i32] to 16-bit integers using unsigned saturation,...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_unpacklo_epi8(__m256i __a, __m256i __b)
Unpacks and interleaves 8-bit integers from parts of the 256-bit integer vectors in __a and __b to fo...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_subs_epi16(__m256i __a, __m256i __b)
Subtracts 16-bit integers from corresponding elements of two 256-bit vectors of [16 x i16] using sign...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_broadcastw_epi16(__m128i __X)
Broadcasts the low element from the 128-bit vector of [8 x i16] in __X to all elements of the result'...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mullo_epi16(__m256i __a, __m256i __b)
Multiplies signed 16-bit integer elements of two 256-bit vectors of [16 x i16], and returns the lower...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srai_epi16(__m256i __a, int __count)
Shifts each 16-bit element of the 256-bit vector of [16 x i16] in __a right by __count bits,...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_unpacklo_epi16(__m256i __a, __m256i __b)
Unpacks and interleaves 16-bit integers from parts of the 256-bit vectors of [16 x i16] in __a and __...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_adds_epu8(__m256i __a, __m256i __b)
Adds 8-bit integers from corresponding bytes of two 256-bit integer vectors using unsigned saturation...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_packus_epi16(__m256i __a, __m256i __b)
Converts elements from two 256-bit vectors of [16 x i16] to 8-bit integers using unsigned saturation,...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_adds_epi8(__m256i __a, __m256i __b)
Adds 8-bit integers from corresponding bytes of two 256-bit integer vectors using signed saturation,...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sra_epi16(__m256i __a, __m128i __count)
Shifts each 16-bit element of the 256-bit vector of [16 x i16] in __a right by the number of bits giv...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_abs_epi16(__m256i __a)
Computes the absolute value of each signed 16-bit element in the 256-bit vector of [16 x i16] in __a ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_adds_epi16(__m256i __a, __m256i __b)
Adds 16-bit integers from corresponding elements of two 256-bit vectors of [16 x i16] using signed sa...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_avg_epu8(__m256i __a, __m256i __b)
Computes the averages of the corresponding unsigned bytes in the two 256-bit integer vectors in __a a...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_subs_epu16(__m256i __a, __m256i __b)
Subtracts 16-bit integers from corresponding elements of two 256-bit vectors of [16 x i16] using unsi...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srli_epi16(__m256i __a, int __count)
Shifts each 16-bit element of the 256-bit vector of [16 x i16] in __a right by __count bits,...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sra_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_madd_epi16(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srlv_epi16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_blend_epi8(__mmask32 __U, __m256i __A, __m256i __W)
#define _mm256_mask_cmpeq_epi16_mask(k, A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtepi16_epi8(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_max_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 _mm256_mask_test_epi8_mask(__mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srlv_epi16(__mmask16 __U, __m256i __A, __m256i __B)
static __inline void __DEFAULT_FN_ATTRS128 _mm_storeu_epi16(void *__P, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srlv_epi16(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu8_epi16(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_broadcastb_epi8(__m128i __O, __mmask16 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_subs_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi16_epi8(__mmask8 __M, __m128i __A)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 _mm_movepi8_mask(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_loadu_epi8(__m128i __W, __mmask16 __U, void const *__P)
static __inline__ signed char __DEFAULT_FN_ATTRS128 _mm_mask_reduce_max_epi8(__mmask16 __M, __m128i __V)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mulhi_epi16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mullo_epi16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi8_epi16(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_avg_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_loadu_epi8(void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_srlv_epi16(__m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_broadcastb_epi8(__m256i __O, __mmask32 __M, __m128i __A)
static __inline__ short __DEFAULT_FN_ATTRS256 _mm256_mask_reduce_add_epi16(__mmask16 __M, __m256i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_abs_epi8(__mmask16 __U, __m128i __A)
#define _mm256_cmpeq_epi8_mask(A, B)
static __inline__ signed char __DEFAULT_FN_ATTRS128 _mm_reduce_max_epi8(__m128i __V)
static __inline__ unsigned char __DEFAULT_FN_ATTRS256 _mm256_reduce_min_epu8(__m256i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_movepi16_mask(__m128i __A)
static __inline__ unsigned short __DEFAULT_FN_ATTRS256 _mm256_reduce_min_epu16(__m256i __V)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_avg_epu16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask2_permutex2var_epi16(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_adds_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ signed char __DEFAULT_FN_ATTRS128 _mm_mask_reduce_min_epi8(__mmask16 __M, __m128i __V)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu8_epi16(__mmask8 __U, __m128i __A)
static __inline__ unsigned char __DEFAULT_FN_ATTRS256 _mm256_mask_reduce_max_epu8(__mmask32 __M, __m256i __V)
#define _mm_cmpneq_epi16_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_maddubs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpackhi_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mulhi_epu16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_abs_epi16(__m256i __W, __mmask16 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_avg_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
static __inline__ signed char __DEFAULT_FN_ATTRS256 _mm256_reduce_and_epi8(__m256i __W)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpackhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_add_epi8(__mmask16 __U, __m128i __A, __m128i __B)
#define _mm_mask_cmpeq_epi8_mask(k, A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_adds_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sub_epi16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sub_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
static __inline__ short __DEFAULT_FN_ATTRS256 _mm256_mask_reduce_and_epi16(__mmask16 __M, __m256i __W)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_unpackhi_epi8(__mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_unpackhi_epi16(__mmask16 __U, __m256i __A, __m256i __B)
static __inline__ short __DEFAULT_FN_ATTRS128 _mm_reduce_or_epi16(__m128i __W)
static __inline__ short __DEFAULT_FN_ATTRS256 _mm256_reduce_and_epi16(__m256i __W)
static __inline__ short __DEFAULT_FN_ATTRS128 _mm_mask_reduce_max_epi16(__mmask16 __M, __m128i __V)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 _mm_testn_epi8_mask(__m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mullo_epi16(__mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_testn_epi16_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcastb_epi8(__mmask32 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_avg_epu16(__mmask16 __U, __m256i __A, __m256i __B)
#define _mm_cmpneq_epi8_mask(A, B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi16_storeu_epi8(void *__P, __mmask16 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_add_epi16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_testn_epi16_mask(__m128i __A, __m128i __B)
#define _mm256_mask_cmpneq_epi8_mask(k, A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sll_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mov_epi8(__m128i __W, __mmask16 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_permutexvar_epi16(__mmask16 __M, __m256i __A, __m256i __B)
static __inline__ short __DEFAULT_FN_ATTRS128 _mm_mask_reduce_add_epi16(__mmask8 __M, __m128i __W)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_test_epi16_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi16_epi8(__m128i __O, __mmask16 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_subs_epi8(__mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_packs_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_min_epi8(__mmask32 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_packus_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_packus_epi32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_abs_epi16(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_subs_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline __m256i __DEFAULT_FN_ATTRS256 _mm256_loadu_epi16(void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_subs_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi16_epi8(__mmask16 __M, __m256i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_epi16(void *__P, __mmask16 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_avg_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi16_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_add_epi16(__mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_min_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_packus_epi32(__mmask16 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sub_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_max_epu8(__mmask16 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srli_epi16(__mmask8 __U, __m128i __A, int __B)
#define _mm256_cmpneq_epi8_mask(A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutexvar_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_min_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_adds_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_shuffle_epi8(__mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srav_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 _mm_mask_testn_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_min_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_unpackhi_epi16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS256 _mm256_mask_test_epi16_mask(__mmask16 __U, __m256i __A, __m256i __B)
static __inline__ signed char __DEFAULT_FN_ATTRS128 _mm_reduce_min_epi8(__m128i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_add_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_packs_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_packs_epi16(__mmask32 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_set1_epi16(__mmask8 __M, short __A)
static __inline__ signed char __DEFAULT_FN_ATTRS256 _mm256_mask_reduce_min_epi8(__mmask32 __M, __m256i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_min_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_set1_epi8(__m256i __O, __mmask32 __M, char __A)
static __inline__ signed char __DEFAULT_FN_ATTRS256 _mm256_mask_reduce_and_epi8(__mmask32 __M, __m256i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_epi8(__mmask16 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_min_epu8(__mmask32 __M, __m256i __A, __m256i __B)
static __inline__ signed char __DEFAULT_FN_ATTRS256 _mm256_reduce_min_epi8(__m256i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mulhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sllv_epi16(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_adds_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mulhi_epi16(__mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_test_epi16_mask(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi16_epi8(__mmask16 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_set1_epi8(__m128i __O, __mmask16 __M, char __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_max_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ short __DEFAULT_FN_ATTRS128 _mm_mask_reduce_min_epi16(__mmask16 __M, __m128i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_adds_epi8(__mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_subs_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_subs_epu8(__mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_subs_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_adds_epu8(__mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 _mm256_mask_testn_epi8_mask(__mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sub_epi8(__mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 _mm256_movepi8_mask(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srav_epi16(__mmask16 __U, __m256i __A, __m256i __B)
#define _mm_cmpeq_epi8_mask(A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_max_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_madd_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi16_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_packus_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_add_epi8(__mmask32 __U, __m256i __A, __m256i __B)
static __inline void __DEFAULT_FN_ATTRS256 _mm256_storeu_epi16(void *__P, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sll_epi16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sub_epi16(__mmask16 __U, __m256i __A, __m256i __B)
static __inline__ unsigned short __DEFAULT_FN_ATTRS128 _mm_mask_reduce_max_epu16(__mmask16 __M, __m128i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_set1_epi8(__mmask32 __M, char __A)
static __inline__ short __DEFAULT_FN_ATTRS256 _mm256_mask_reduce_or_epi16(__mmask16 __M, __m256i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi16_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_min_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 _mm256_testn_epi8_mask(__m256i __A, __m256i __B)
static __inline__ signed char __DEFAULT_FN_ATTRS256 _mm256_mask_reduce_add_epi8(__mmask32 __M, __m256i __W)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sub_epi8(__mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu8_epi16(__mmask16 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_adds_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_epi8(__m256i __W, __mmask32 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mulhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ unsigned short __DEFAULT_FN_ATTRS256 _mm256_mask_reduce_max_epu16(__mmask16 __M, __m256i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_permutex2var_epi16(__m256i __A, __m256i __I, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sllv_epi16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ short __DEFAULT_FN_ATTRS256 _mm256_reduce_mul_epi16(__m256i __W)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mov_epi8(__mmask32 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sub_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
static __inline__ unsigned char __DEFAULT_FN_ATTRS128 _mm_mask_reduce_min_epu8(__mmask16 __M, __m128i __V)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srav_epi16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_adds_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_permutex2var_epi16(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sub_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_max_epu8(__mmask32 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_srav_epi16(__m128i __A, __m128i __B)
static __inline__ signed char __DEFAULT_FN_ATTRS128 _mm_reduce_and_epi8(__m128i __W)
static __inline__ short __DEFAULT_FN_ATTRS256 _mm256_reduce_or_epi16(__m256i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_adds_epi8(__mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_subs_epi16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_epi8(void *__P, __mmask32 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_min_epi16(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srlv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mulhi_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_max_epi8(__mmask16 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sllv_epi16(__m256i __A, __m256i __B)
static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 _mm256_test_epi8_mask(__m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_set1_epi16(__mmask16 __M, short __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_packs_epi16(__mmask16 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_packs_epi32(__mmask16 __M, __m256i __A, __m256i __B)
static __inline__ signed char __DEFAULT_FN_ATTRS128 _mm_reduce_add_epi8(__m128i __W)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_epi16(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_abs_epi16(__mmask16 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srlv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_movm_epi8(__mmask16 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_unpacklo_epi16(__mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_packus_epi16(__mmask32 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mullo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_blend_epi8(__mmask16 __U, __m128i __A, __m128i __W)
#define _mm256_cmpeq_epi16_mask(A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_min_epu16(__mmask16 __M, __m256i __A, __m256i __B)
static __inline void __DEFAULT_FN_ATTRS128 _mm_storeu_epi8(void *__P, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_permutex2var_epi16(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B)
static __inline__ short __DEFAULT_FN_ATTRS256 _mm256_reduce_add_epi16(__m256i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_max_epi16(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ short __DEFAULT_FN_ATTRS128 _mm_mask_reduce_mul_epi16(__mmask8 __M, __m128i __W)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mov_epi16(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_packus_epi16(__mmask16 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sra_epi16(__mmask16 __U, __m256i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_abs_epi8(__mmask32 __U, __m256i __A)
static __inline__ signed char __DEFAULT_FN_ATTRS128 _mm_mask_reduce_add_epi8(__mmask16 __M, __m128i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi16_epi8(__m128i __A)
static __inline__ signed char __DEFAULT_FN_ATTRS256 _mm256_mask_reduce_or_epi8(__mmask32 __M, __m256i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_adds_epi16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS256 _mm256_movepi16_mask(__m256i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi16_storeu_epi8(void *__P, __mmask16 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srli_epi16(__mmask16 __U, __m256i __A, int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_set1_epi16(__m128i __O, __mmask8 __M, short __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_epi16(__m256i __W, __mmask16 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_permutexvar_epi16(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_subs_epi8(__mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srl_epi16(__mmask16 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi8_epi16(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi16_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ signed char __DEFAULT_FN_ATTRS256 _mm256_reduce_mul_epi8(__m256i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_broadcastw_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srav_epi16(__m256i __A, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi16_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_adds_epu16(__mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_abs_epi8(__m256i __W, __mmask32 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_min_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mulhrs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_set1_epi8(__mmask16 __M, char __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_unpackhi_epi8(__mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_epi8(__mmask32 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mov_epi16(__m256i __W, __mmask16 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutex2var_epi16(__m256i __A, __mmask16 __U, __m256i __I, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi16_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 _mm_test_epi8_mask(__m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi16_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_adds_epu8(__mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_subs_epu16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_subs_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
static __inline__ short __DEFAULT_FN_ATTRS256 _mm256_mask_reduce_min_epi16(__mmask16 __M, __m256i __V)
static __inline__ signed char __DEFAULT_FN_ATTRS128 _mm_reduce_or_epi8(__m128i __W)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi16_storeu_epi8(void *__P, __mmask16 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_avg_epu8(__mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_permutex2var_epi16(__mmask16 __U, __m256i __A, __m256i __I, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_packs_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
#define __DEFAULT_FN_ATTRS256
#define _mm_mask_cmpneq_epi16_mask(k, A, B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 _mm_mask_test_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS256 _mm256_test_epi16_mask(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi16_epi8(__mmask16 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_subs_epu8(__mmask32 __U, __m256i __A, __m256i __B)
static __inline__ short __DEFAULT_FN_ATTRS256 _mm256_mask_reduce_max_epi16(__mmask16 __M, __m256i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_maddubs_epi16(__mmask16 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_avg_epu8(__mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_shuffle_epi8(__mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_add_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_min_epu8(__mmask16 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_movm_epi8(__mmask32 __A)
#define _mm_cmpeq_epi16_mask(A, B)
#define _mm_mask_cmpeq_epi16_mask(k, A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_max_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
#define __DEFAULT_FN_ATTRS128
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mulhi_epu16(__mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi16_epi8(__m128i __O, __mmask16 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_adds_epi16(__mmask16 __U, __m256i __A, __m256i __B)
static __inline void __DEFAULT_FN_ATTRS256 _mm256_storeu_epi8(void *__P, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_movm_epi16(__mmask16 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mov_epi8(__mmask16 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_permutexvar_epi16(__m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_epi8(void *__P, __mmask16 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_min_epi16(__mmask16 __M, __m256i __A, __m256i __B)
static __inline__ unsigned char __DEFAULT_FN_ATTRS256 _mm256_reduce_max_epu8(__m256i __V)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_permutexvar_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mov_epi16(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_shuffle_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sra_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_unpacklo_epi8(__mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mullo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_adds_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_max_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_abs_epi8(__m128i __W, __mmask16 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_epi16(__mmask8 __U, void const *__P)
static __inline__ signed char __DEFAULT_FN_ATTRS256 _mm256_mask_reduce_max_epi8(__mmask32 __M, __m256i __V)
static __inline__ unsigned short __DEFAULT_FN_ATTRS128 _mm_reduce_min_epu16(__m128i __V)
static __inline__ unsigned short __DEFAULT_FN_ATTRS256 _mm256_mask_reduce_min_epu16(__mmask16 __M, __m256i __V)
static __inline__ short __DEFAULT_FN_ATTRS128 _mm_reduce_mul_epi16(__m128i __W)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sllv_epi16(__mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_packs_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_max_epu16(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_maddubs_epi16(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_permutex2var_epi16(__m128i __A, __m128i __I, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_broadcastw_epi16(__m256i __O, __mmask16 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi16_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS256 _mm256_testn_epi16_mask(__m256i __A, __m256i __B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS256 _mm256_mask_testn_epi16_mask(__mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_unpacklo_epi8(__mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mov_epi16(__mmask16 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_unpackhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sllv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_slli_epi16(__mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ unsigned short __DEFAULT_FN_ATTRS128 _mm_reduce_max_epu16(__m128i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_adds_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
static __inline__ short __DEFAULT_FN_ATTRS256 _mm256_reduce_max_epi16(__m256i __V)
static __inline__ signed char __DEFAULT_FN_ATTRS256 _mm256_reduce_max_epi8(__m256i __V)
static __inline __m256i __DEFAULT_FN_ATTRS256 _mm256_loadu_epi8(void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi16_epi8(__m128i __O, __mmask16 __M, __m256i __A)
static __inline__ short __DEFAULT_FN_ATTRS128 _mm_mask_reduce_and_epi16(__mmask8 __M, __m128i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_abs_epi16(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_min_epu16(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_packs_epi32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ signed char __DEFAULT_FN_ATTRS256 _mm256_reduce_add_epi8(__m256i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_add_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_unpacklo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_min_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_subs_epu16(__mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sllv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
#define _mm256_mask_cmpeq_epi8_mask(k, A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_add_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_subs_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi16_epi8(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_unpacklo_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi16_epi8(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_avg_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
static __inline__ signed char __DEFAULT_FN_ATTRS128 _mm_mask_reduce_or_epi8(__mmask16 __M, __m128i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_broadcastw_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ signed char __DEFAULT_FN_ATTRS128 _mm_mask_reduce_and_epi8(__mmask16 __M, __m128i __W)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y)
static __inline__ short __DEFAULT_FN_ATTRS128 _mm_reduce_max_epi16(__m128i __V)
static __inline__ unsigned char __DEFAULT_FN_ATTRS256 _mm256_mask_reduce_min_epu8(__mmask32 __M, __m256i __V)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_broadcastb_epi8(__mmask16 __M, __m128i __A)
#define _mm256_mask_cmpneq_epi16_mask(k, A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_madd_epi16(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sll_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_min_epi8(__mmask16 __M, __m128i __A, __m128i __B)
static __inline__ short __DEFAULT_FN_ATTRS256 _mm256_mask_reduce_mul_epi16(__mmask16 __M, __m256i __W)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpacklo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
static __inline__ short __DEFAULT_FN_ATTRS128 _mm_reduce_min_epi16(__m128i __V)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_blend_epi16(__mmask8 __U, __m128i __A, __m128i __W)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_max_epu16(__mmask16 __M, __m256i __A, __m256i __B)
static __inline__ signed char __DEFAULT_FN_ATTRS256 _mm256_reduce_or_epi8(__m256i __W)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_set1_epi16(__m256i __O, __mmask16 __M, short __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sra_epi16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sll_epi16(__mmask16 __U, __m256i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi8_epi16(__mmask16 __U, __m128i __A)
static __inline__ unsigned char __DEFAULT_FN_ATTRS128 _mm_mask_reduce_max_epu8(__mmask16 __M, __m128i __V)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_unpacklo_epi16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_max_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_subs_epi16(__mmask16 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_loadu_epi16(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_movm_epi16(__mmask8 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_madd_epi16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ unsigned short __DEFAULT_FN_ATTRS256 _mm256_reduce_max_epu16(__m256i __V)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srav_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_max_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ short __DEFAULT_FN_ATTRS128 _mm_reduce_add_epi16(__m128i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi16_epi8(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_max_epi16(__mmask16 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_unpackhi_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srl_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtepi16_epi8(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_max_epi8(__mmask32 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srl_epi16(__mmask8 __U, __m128i __A, __m128i __B)
#define _mm_mask_cmpneq_epi8_mask(k, A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_maddubs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcastw_epi16(__mmask16 __M, __m128i __A)
static __inline__ short __DEFAULT_FN_ATTRS128 _mm_reduce_and_epi16(__m128i __W)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask2_permutex2var_epi16(__m256i __A, __m256i __I, __mmask16 __U, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_shuffle_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu8_epi16(__m256i __W, __mmask16 __U, __m128i __A)
static __inline__ signed char __DEFAULT_FN_ATTRS128 _mm_mask_reduce_mul_epi8(__mmask16 __M, __m128i __W)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_packus_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
#define _mm256_cmpneq_epi16_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mulhi_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_epi16(__mmask16 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mov_epi8(__m256i __W, __mmask32 __U, __m256i __A)
static __inline__ short __DEFAULT_FN_ATTRS128 _mm_mask_reduce_or_epi16(__mmask8 __M, __m128i __W)
static __inline__ unsigned char __DEFAULT_FN_ATTRS128 _mm_reduce_max_epu8(__m128i __V)
static __inline__ signed char __DEFAULT_FN_ATTRS128 _mm_reduce_mul_epi8(__m128i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_min_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_permutexvar_epi16(__m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpacklo_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srl_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_max_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi8_epi16(__m256i __W, __mmask16 __U, __m128i __A)
static __inline__ unsigned char __DEFAULT_FN_ATTRS128 _mm_reduce_min_epu8(__m128i __V)
static __inline__ unsigned short __DEFAULT_FN_ATTRS128 _mm_mask_reduce_min_epu16(__mmask16 __M, __m128i __V)
static __inline__ signed char __DEFAULT_FN_ATTRS256 _mm256_mask_reduce_mul_epi8(__mmask32 __M, __m256i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_adds_epu16(__mmask8 __U, __m128i __A, __m128i __B)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_loadu_epi16(void const *__P)
static __inline__ short __DEFAULT_FN_ATTRS256 _mm256_reduce_min_epi16(__m256i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_packus_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_blend_epi16(__mmask16 __U, __m256i __A, __m256i __W)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi8(char __b)
Constructs a 256-bit integer vector of [32 x i8], with each of the 8-bit integral vector elements set...
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setzero_si256(void)
Constructs a 256-bit integer vector initialized to zero.
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi16(short __w)
Constructs a 256-bit integer vector of [16 x i16], with each of the 16-bit integral vector elements s...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi16(__m128i __a, __m128i __b)
Unpacks the low-order (index 0-3) values from each of the two 128-bit vectors of [8 x i16] and interl...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi16(__m128i __a, __m128i __b)
Converts, with saturation, 16-bit signed integers from both 128-bit integer vector operands into 8-bi...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu8(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit unsigned [16 x i8] vectors, saving the smaller value f...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a, __m128i __b)
Performs a bitwise AND of two 128-bit integer vectors.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi8(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit signed [16 x i8] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi16(__m128i __a, __m128i __b)
Subtracts the corresponding 16-bit integer values in the operands.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two signed [8 x i16] vectors, saving the lower 16 bits of ea...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi16(__m128i __a, int __count)
Right-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu8(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit unsigned [16 x i8] vectors, saving the greater value f...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu16(__m128i __a, __m128i __b)
Computes the rounded averages of corresponding elements of two 128-bit unsigned [8 x i16] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi16(__m128i __a, __m128i __count)
Right-shifts each of 16-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi8(__m128i __a, __m128i __b)
Unpacks the low-order (index 0-7) values from two 128-bit vectors of [16 x i8] and interleaves them i...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi8(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 8-bit signed integer values in the input and returns the di...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu16(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 16-bit unsigned integer values in the input and returns the...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi16(__m128i __a, __m128i __b)
Unpacks the high-order (index 4-7) values from two 128-bit vectors of [8 x i16] and interleaves them ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_madd_epi16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two 128-bit signed [8 x i16] vectors, producing eight interm...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu16(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit unsigned [8 x i16] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi16(__m128i __a, int __count)
Left-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu8(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit unsigned [16 x i8] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi16(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit signed [8 x i16] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi16(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 16-bit signed integer values in the input and returns the d...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi8(__m128i __a, __m128i __b)
Subtracts the corresponding 8-bit integer values in the operands.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epi16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two signed [8 x i16] vectors, saving the upper 16 bits of ea...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epu16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two unsigned [8 x i16] vectors, saving the upper 16 bits of ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi16(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit signed [8 x i16] vectors, saving the greater value fro...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a, __m128i __b)
Converts, with saturation, 16-bit signed integers from both 128-bit integer vector operands into 8-bi...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi16(__m128i __a, __m128i __count)
Right-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi8(char __b)
Initializes all values in a 128-bit vector of [16 x i8] with the specified 8-bit value.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a, __m128i __b)
Converts, with saturation, 32-bit signed integers from both 128-bit integer vector operands into 16-b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi16(__m128i __a, __m128i __count)
Left-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi8(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [16 x i8], saving the lower 8 bits of each ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi16(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit signed [8 x i16] vectors, saving the smaller value fro...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi16(short __w)
Initializes all values in a 128-bit vector of [8 x i16] with the specified 16-bit value.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu8(__m128i __a, __m128i __b)
Computes the rounded averages of corresponding elements of two 128-bit unsigned [16 x i8] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi8(__m128i __a, __m128i __b)
Unpacks the high-order (index 8-15) values from two 128-bit vectors of [16 x i8] and interleaves them...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi16(__m128i __a, int __count)
Right-shifts each of 16-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi16(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [8 x i16], saving the lower 16 bits of each...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu8(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 8-bit unsigned integer values in the input and returns the ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setzero_si128(void)
Creates a 128-bit integer vector initialized to zero.
struct __storeu_i16 *__P __v
__inline unsigned int unsigned int unsigned int * __P
__inline unsigned int unsigned int __Y
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu16(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [8 x u16] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi16(__m128i __V)
Zero-extends each of the lower eight 8-bit integer elements of a 128-bit vector of [16 x i8] to 16-bi...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi8(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [16 x i8] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi32(__m128i __V1, __m128i __V2)
Converts, with saturation, 32-bit signed integers from both 128-bit integer vector operands into 16-b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi8(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [16 x i8] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu16(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [8 x u16] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi16(__m128i __V)
Sign-extends each of the lower eight 8-bit integer elements of a 128-bit vector of [16 x i8] to 16-bi...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_abs_epi8(__m128i __a)
Computes the absolute value of each of the packed 8-bit signed integers in the source operand and sto...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhrs_epi16(__m128i __a, __m128i __b)
Multiplies packed 16-bit signed integer values, truncates the 32-bit products to the 18 most signific...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_abs_epi16(__m128i __a)
Computes the absolute value of each of the packed 16-bit signed integers in the source operand and st...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_maddubs_epi16(__m128i __a, __m128i __b)
Multiplies corresponding pairs of packed 8-bit unsigned integer values contained in the first source ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_shuffle_epi8(__m128i __a, __m128i __b)
Copies the 8-bit integers from a 128-bit integer vector to the destination or clears 8-bit values in ...