Linux ip-172-26-2-223 5.4.0-1018-aws #18-Ubuntu SMP Wed Jun 24 01:15:00 UTC 2020 x86_64
Apache
: 172.26.2.223 | : 3.144.82.191
Cant Read [ /etc/named.conf ]
8.1.13
www
www.github.com/MadExploits
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
CPANEL RESET
CREATE WP USER
BLACK DEFEND!
README
+ Create Folder
+ Create File
/
usr /
lib /
gcc /
x86_64-linux-gnu /
9 /
include /
[ HOME SHELL ]
Name
Size
Permission
Action
sanitizer
[ DIR ]
drwxr-xr-x
adxintrin.h
2.76
KB
-rw-r--r--
ammintrin.h
3.14
KB
-rw-r--r--
avx2intrin.h
57.26
KB
-rw-r--r--
avx5124fmapsintrin.h
6.38
KB
-rw-r--r--
avx5124vnniwintrin.h
4.16
KB
-rw-r--r--
avx512bitalgintrin.h
8.64
KB
-rw-r--r--
avx512bwintrin.h
99.13
KB
-rw-r--r--
avx512cdintrin.h
5.69
KB
-rw-r--r--
avx512dqintrin.h
83.37
KB
-rw-r--r--
avx512erintrin.h
12.66
KB
-rw-r--r--
avx512fintrin.h
499.3
KB
-rw-r--r--
avx512ifmaintrin.h
3.35
KB
-rw-r--r--
avx512ifmavlintrin.h
5.26
KB
-rw-r--r--
avx512pfintrin.h
10.05
KB
-rw-r--r--
avx512vbmi2intrin.h
19.35
KB
-rw-r--r--
avx512vbmi2vlintrin.h
36.25
KB
-rw-r--r--
avx512vbmiintrin.h
4.81
KB
-rw-r--r--
avx512vbmivlintrin.h
8.17
KB
-rw-r--r--
avx512vlbwintrin.h
140.48
KB
-rw-r--r--
avx512vldqintrin.h
59.88
KB
-rw-r--r--
avx512vlintrin.h
415.51
KB
-rw-r--r--
avx512vnniintrin.h
4.85
KB
-rw-r--r--
avx512vnnivlintrin.h
8.05
KB
-rw-r--r--
avx512vpopcntdqintrin.h
3.04
KB
-rw-r--r--
avx512vpopcntdqvlintrin.h
4.56
KB
-rw-r--r--
avxintrin.h
49.43
KB
-rw-r--r--
backtrace-supported.h
2.91
KB
-rw-r--r--
backtrace.h
8.87
KB
-rw-r--r--
bmi2intrin.h
3.31
KB
-rw-r--r--
bmiintrin.h
5.5
KB
-rw-r--r--
bmmintrin.h
1.13
KB
-rw-r--r--
cet.h
2.6
KB
-rw-r--r--
cetintrin.h
3.25
KB
-rw-r--r--
cldemoteintrin.h
1.58
KB
-rw-r--r--
clflushoptintrin.h
1.62
KB
-rw-r--r--
clwbintrin.h
1.55
KB
-rw-r--r--
clzerointrin.h
1.46
KB
-rw-r--r--
cpuid.h
8.86
KB
-rw-r--r--
cross-stdarg.h
2.5
KB
-rw-r--r--
emmintrin.h
50.21
KB
-rw-r--r--
f16cintrin.h
3.33
KB
-rw-r--r--
float.h
16.58
KB
-rw-r--r--
fma4intrin.h
8.92
KB
-rw-r--r--
fmaintrin.h
9.88
KB
-rw-r--r--
fxsrintrin.h
2.01
KB
-rw-r--r--
gcov.h
1.36
KB
-rw-r--r--
gfniintrin.h
14.7
KB
-rw-r--r--
ia32intrin.h
7.69
KB
-rw-r--r--
immintrin.h
6.31
KB
-rw-r--r--
iso646.h
1.24
KB
-rw-r--r--
limits.h
5.95
KB
-rw-r--r--
lwpintrin.h
3.32
KB
-rw-r--r--
lzcntintrin.h
2.34
KB
-rw-r--r--
mm3dnow.h
6.91
KB
-rw-r--r--
mm_malloc.h
1.74
KB
-rw-r--r--
mmintrin.h
30.62
KB
-rw-r--r--
movdirintrin.h
2.29
KB
-rw-r--r--
mwaitxintrin.h
1.71
KB
-rw-r--r--
nmmintrin.h
1.26
KB
-rw-r--r--
omp.h
7.26
KB
-rw-r--r--
openacc.h
5.44
KB
-rw-r--r--
pconfigintrin.h
2.29
KB
-rw-r--r--
pkuintrin.h
1.7
KB
-rw-r--r--
pmmintrin.h
4.27
KB
-rw-r--r--
popcntintrin.h
1.71
KB
-rw-r--r--
prfchwintrin.h
1.41
KB
-rw-r--r--
quadmath.h
9.14
KB
-rw-r--r--
quadmath_weak.h
3.13
KB
-rw-r--r--
rdseedintrin.h
1.97
KB
-rw-r--r--
rtmintrin.h
2.67
KB
-rw-r--r--
sgxintrin.h
6.92
KB
-rw-r--r--
shaintrin.h
3.13
KB
-rw-r--r--
smmintrin.h
27.74
KB
-rw-r--r--
stdalign.h
1.18
KB
-rw-r--r--
stdarg.h
3.98
KB
-rw-r--r--
stdatomic.h
9.1
KB
-rw-r--r--
stdbool.h
1.49
KB
-rw-r--r--
stddef.h
12.66
KB
-rw-r--r--
stdfix.h
5.86
KB
-rw-r--r--
stdint-gcc.h
9.24
KB
-rw-r--r--
stdint.h
328
B
-rw-r--r--
stdnoreturn.h
1.11
KB
-rw-r--r--
syslimits.h
330
B
-rw-r--r--
tbmintrin.h
5.12
KB
-rw-r--r--
tmmintrin.h
8.15
KB
-rw-r--r--
unwind.h
10.65
KB
-rw-r--r--
vaesintrin.h
3.39
KB
-rw-r--r--
varargs.h
139
B
-rw-r--r--
vpclmulqdqintrin.h
2.66
KB
-rw-r--r--
waitpkgintrin.h
1.95
KB
-rw-r--r--
wbnoinvdintrin.h
1.58
KB
-rw-r--r--
wmmintrin.h
4.55
KB
-rw-r--r--
x86intrin.h
1.4
KB
-rw-r--r--
xmmintrin.h
41.39
KB
-rw-r--r--
xopintrin.h
27.9
KB
-rw-r--r--
xsavecintrin.h
1.78
KB
-rw-r--r--
xsaveintrin.h
2.41
KB
-rw-r--r--
xsaveoptintrin.h
1.81
KB
-rw-r--r--
xsavesintrin.h
2.11
KB
-rw-r--r--
xtestintrin.h
1.65
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : avx512vbmi2vlintrin.h
/* Copyright (C) 2013-2019 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #ifndef _IMMINTRIN_H_INCLUDED #error "Never use <avx512vbmi2vlintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _AVX512VBMI2VLINTRIN_H_INCLUDED #define _AVX512VBMI2VLINTRIN_H_INCLUDED #if !defined(__AVX512VL__) || !defined(__AVX512VBMI2__) #pragma GCC push_options #pragma GCC target("avx512vbmi2,avx512vl") #define __DISABLE_AVX512VBMI2VL__ #endif /* __AVX512VBMIVL__ */ extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_compress_epi8 (__m128i __A, __mmask16 __B, __m128i __C) { return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi)__C, (__v16qi)__A, (__mmask16)__B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_compress_epi8 (__mmask16 __A, __m128i __B) { return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi) __B, (__v16qi) _mm_setzero_si128 (), (__mmask16) __A); } extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_compressstoreu_epi16 (void * __A, __mmask16 __B, __m256i __C) { __builtin_ia32_compressstoreuhi256_mask ((__v16hi *) __A, (__v16hi) __C, (__mmask16) __B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_compress_epi16 (__m128i __A, __mmask8 __B, __m128i __C) { return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi)__C, (__v8hi)__A, (__mmask8)__B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_compress_epi16 (__mmask8 __A, __m128i __B) { return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi) __B, (__v8hi) _mm_setzero_si128 (), (__mmask8) __A); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_compress_epi16 (__m256i __A, __mmask16 __B, __m256i __C) { return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi)__C, (__v16hi)__A, (__mmask16)__B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_compress_epi16 (__mmask16 __A, __m256i __B) { return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi) __B, (__v16hi) _mm256_setzero_si256 (), (__mmask16) __A); } extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_compressstoreu_epi8 (void * __A, __mmask16 __B, __m128i __C) { __builtin_ia32_compressstoreuqi128_mask ((__v16qi *) __A, (__v16qi) __C, (__mmask16) __B); } extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_compressstoreu_epi16 (void * __A, __mmask8 __B, __m128i __C) { __builtin_ia32_compressstoreuhi128_mask ((__v8hi *) __A, (__v8hi) __C, (__mmask8) __B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_expand_epi8 (__m128i __A, __mmask16 __B, __m128i __C) { return (__m128i) __builtin_ia32_expandqi128_mask ((__v16qi) __C, (__v16qi) __A, (__mmask16) __B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_expand_epi8 (__mmask16 __A, __m128i __B) { return (__m128i) __builtin_ia32_expandqi128_maskz ((__v16qi) __B, (__v16qi) _mm_setzero_si128 (), (__mmask16) __A); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_expandloadu_epi8 (__m128i __A, __mmask16 __B, const void * __C) { return (__m128i) __builtin_ia32_expandloadqi128_mask ((const __v16qi *) __C, (__v16qi) __A, (__mmask16) __B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_expandloadu_epi8 (__mmask16 __A, const void * __B) { return (__m128i) __builtin_ia32_expandloadqi128_maskz ((const __v16qi *) __B, (__v16qi) _mm_setzero_si128 (), (__mmask16) __A); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_expand_epi16 (__m128i __A, __mmask8 __B, __m128i __C) { return (__m128i) __builtin_ia32_expandhi128_mask ((__v8hi) __C, (__v8hi) __A, (__mmask8) __B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_expand_epi16 (__mmask8 __A, __m128i __B) { return (__m128i) __builtin_ia32_expandhi128_maskz ((__v8hi) __B, (__v8hi) _mm_setzero_si128 (), (__mmask8) __A); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_expandloadu_epi16 (__m128i __A, __mmask8 __B, const void * __C) { return (__m128i) __builtin_ia32_expandloadhi128_mask ((const __v8hi *) __C, (__v8hi) __A, (__mmask8) __B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_expandloadu_epi16 (__mmask8 __A, const void * __B) { return (__m128i) __builtin_ia32_expandloadhi128_maskz ((const __v8hi *) __B, (__v8hi) _mm_setzero_si128 (), (__mmask8) __A); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_expand_epi16 (__m256i __A, __mmask16 __B, __m256i __C) { return (__m256i) __builtin_ia32_expandhi256_mask ((__v16hi) __C, (__v16hi) __A, (__mmask16) __B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_expand_epi16 (__mmask16 __A, __m256i __B) { return (__m256i) __builtin_ia32_expandhi256_maskz ((__v16hi) __B, (__v16hi) _mm256_setzero_si256 (), (__mmask16) __A); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_expandloadu_epi16 (__m256i __A, __mmask16 __B, const void * __C) { return (__m256i) __builtin_ia32_expandloadhi256_mask ((const __v16hi *) __C, (__v16hi) __A, (__mmask16) __B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_expandloadu_epi16 (__mmask16 __A, const void * __B) { return (__m256i) __builtin_ia32_expandloadhi256_maskz ((const __v16hi *) __B, (__v16hi) _mm256_setzero_si256 (), (__mmask16) __A); } #ifdef __OPTIMIZE__ extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_shrdi_epi16 (__m256i __A, __m256i __B, int __C) { return (__m256i) __builtin_ia32_vpshrd_v16hi ((__v16hi)__A, (__v16hi) __B, __C); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_shrdi_epi16 (__m256i __A, __mmask16 __B, __m256i __C, __m256i __D, int __E) { return (__m256i)__builtin_ia32_vpshrd_v16hi_mask ((__v16hi)__C, (__v16hi) __D, __E, (__v16hi) __A, (__mmask16)__B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_shrdi_epi16 (__mmask16 __A, __m256i __B, __m256i __C, int __D) { return (__m256i)__builtin_ia32_vpshrd_v16hi_mask ((__v16hi)__B, (__v16hi) __C, __D, (__v16hi) _mm256_setzero_si256 (), (__mmask16)__A); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_shrdi_epi32 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D, int __E) { return (__m256i)__builtin_ia32_vpshrd_v8si_mask ((__v8si)__C, (__v8si) __D, __E, (__v8si) __A, (__mmask8)__B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_shrdi_epi32 (__mmask8 __A, __m256i __B, __m256i __C, int __D) { return (__m256i)__builtin_ia32_vpshrd_v8si_mask ((__v8si)__B, (__v8si) __C, __D, (__v8si) _mm256_setzero_si256 (), (__mmask8)__A); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_shrdi_epi32 (__m256i __A, __m256i __B, int __C) { return (__m256i) __builtin_ia32_vpshrd_v8si ((__v8si)__A, (__v8si) __B, __C); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_shrdi_epi64 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D, int __E) { return (__m256i)__builtin_ia32_vpshrd_v4di_mask ((__v4di)__C, (__v4di) __D, __E, (__v4di) __A, (__mmask8)__B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_shrdi_epi64 (__mmask8 __A, __m256i __B, __m256i __C, int __D) { return (__m256i)__builtin_ia32_vpshrd_v4di_mask ((__v4di)__B, (__v4di) __C, __D, (__v4di) _mm256_setzero_si256 (), (__mmask8)__A); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_shrdi_epi64 (__m256i __A, __m256i __B, int __C) { return (__m256i) __builtin_ia32_vpshrd_v4di ((__v4di)__A, (__v4di) __B, __C); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_shrdi_epi16 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, int __E) { return (__m128i)__builtin_ia32_vpshrd_v8hi_mask ((__v8hi)__C, (__v8hi) __D, __E, (__v8hi) __A, (__mmask8)__B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_shrdi_epi16 (__mmask8 __A, __m128i __B, __m128i __C, int __D) { return (__m128i)__builtin_ia32_vpshrd_v8hi_mask ((__v8hi)__B, (__v8hi) __C, __D, (__v8hi) _mm_setzero_si128 (), (__mmask8)__A); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_shrdi_epi16 (__m128i __A, __m128i __B, int __C) { return (__m128i) __builtin_ia32_vpshrd_v8hi ((__v8hi)__A, (__v8hi) __B, __C); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_shrdi_epi32 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, int __E) { return (__m128i)__builtin_ia32_vpshrd_v4si_mask ((__v4si)__C, (__v4si) __D, __E, (__v4si) __A, (__mmask8)__B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_shrdi_epi32 (__mmask8 __A, __m128i __B, __m128i __C, int __D) { return (__m128i)__builtin_ia32_vpshrd_v4si_mask ((__v4si)__B, (__v4si) __C, __D, (__v4si) _mm_setzero_si128 (), (__mmask8)__A); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_shrdi_epi32 (__m128i __A, __m128i __B, int __C) { return (__m128i) __builtin_ia32_vpshrd_v4si ((__v4si)__A, (__v4si) __B, __C); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_shrdi_epi64 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, int __E) { return (__m128i)__builtin_ia32_vpshrd_v2di_mask ((__v2di)__C, (__v2di) __D, __E, (__v2di) __A, (__mmask8)__B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_shrdi_epi64 (__mmask8 __A, __m128i __B, __m128i __C, int __D) { return (__m128i)__builtin_ia32_vpshrd_v2di_mask ((__v2di)__B, (__v2di) __C, __D, (__v2di) _mm_setzero_si128 (), (__mmask8)__A); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_shrdi_epi64 (__m128i __A, __m128i __B, int __C) { return (__m128i) __builtin_ia32_vpshrd_v2di ((__v2di)__A, (__v2di) __B, __C); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_shldi_epi16 (__m256i __A, __m256i __B, int __C) { return (__m256i) __builtin_ia32_vpshld_v16hi ((__v16hi)__A, (__v16hi) __B, __C); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_shldi_epi16 (__m256i __A, __mmask16 __B, __m256i __C, __m256i __D, int __E) { return (__m256i)__builtin_ia32_vpshld_v16hi_mask ((__v16hi)__C, (__v16hi) __D, __E, (__v16hi) __A, (__mmask16)__B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_shldi_epi16 (__mmask16 __A, __m256i __B, __m256i __C, int __D) { return (__m256i)__builtin_ia32_vpshld_v16hi_mask ((__v16hi)__B, (__v16hi) __C, __D, (__v16hi) _mm256_setzero_si256 (), (__mmask16)__A); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_shldi_epi32 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D, int __E) { return (__m256i)__builtin_ia32_vpshld_v8si_mask ((__v8si)__C, (__v8si) __D, __E, (__v8si) __A, (__mmask8)__B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_shldi_epi32 (__mmask8 __A, __m256i __B, __m256i __C, int __D) { return (__m256i)__builtin_ia32_vpshld_v8si_mask ((__v8si)__B, (__v8si) __C, __D, (__v8si) _mm256_setzero_si256 (), (__mmask8)__A); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_shldi_epi32 (__m256i __A, __m256i __B, int __C) { return (__m256i) __builtin_ia32_vpshld_v8si ((__v8si)__A, (__v8si) __B, __C); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_shldi_epi64 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D, int __E) { return (__m256i)__builtin_ia32_vpshld_v4di_mask ((__v4di)__C, (__v4di) __D, __E, (__v4di) __A, (__mmask8)__B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_shldi_epi64 (__mmask8 __A, __m256i __B, __m256i __C, int __D) { return (__m256i)__builtin_ia32_vpshld_v4di_mask ((__v4di)__B, (__v4di) __C, __D, (__v4di) _mm256_setzero_si256 (), (__mmask8)__A); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_shldi_epi64 (__m256i __A, __m256i __B, int __C) { return (__m256i) __builtin_ia32_vpshld_v4di ((__v4di)__A, (__v4di) __B, __C); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_shldi_epi16 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, int __E) { return (__m128i)__builtin_ia32_vpshld_v8hi_mask ((__v8hi)__C, (__v8hi) __D, __E, (__v8hi) __A, (__mmask8)__B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_shldi_epi16 (__mmask8 __A, __m128i __B, __m128i __C, int __D) { return (__m128i)__builtin_ia32_vpshld_v8hi_mask ((__v8hi)__B, (__v8hi) __C, __D, (__v8hi) _mm_setzero_si128 (), (__mmask8)__A); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_shldi_epi16 (__m128i __A, __m128i __B, int __C) { return (__m128i) __builtin_ia32_vpshld_v8hi ((__v8hi)__A, (__v8hi) __B, __C); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_shldi_epi32 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, int __E) { return (__m128i)__builtin_ia32_vpshld_v4si_mask ((__v4si)__C, (__v4si) __D, __E, (__v4si) __A, (__mmask8)__B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_shldi_epi32 (__mmask8 __A, __m128i __B, __m128i __C, int __D) { return (__m128i)__builtin_ia32_vpshld_v4si_mask ((__v4si)__B, (__v4si) __C, __D, (__v4si) _mm_setzero_si128 (), (__mmask8)__A); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_shldi_epi32 (__m128i __A, __m128i __B, int __C) { return (__m128i) __builtin_ia32_vpshld_v4si ((__v4si)__A, (__v4si) __B, __C); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_shldi_epi64 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, int __E) { return (__m128i)__builtin_ia32_vpshld_v2di_mask ((__v2di)__C, (__v2di) __D, __E, (__v2di) __A, (__mmask8)__B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_shldi_epi64 (__mmask8 __A, __m128i __B, __m128i __C, int __D) { return (__m128i)__builtin_ia32_vpshld_v2di_mask ((__v2di)__B, (__v2di) __C, __D, (__v2di) _mm_setzero_si128 (), (__mmask8)__A); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_shldi_epi64 (__m128i __A, __m128i __B, int __C) { return (__m128i) __builtin_ia32_vpshld_v2di ((__v2di)__A, (__v2di) __B, __C); } #else #define _mm256_shrdi_epi16(A, B, C) \ ((__m256i) __builtin_ia32_vpshrd_v16hi ((__v16hi)(__m256i)(A), \ (__v16hi)(__m256i)(B),(int)(C))) #define _mm256_mask_shrdi_epi16(A, B, C, D, E) \ ((__m256i) __builtin_ia32_vpshrd_v16hi_mask ((__v16hi)(__m256i)(C), \ (__v16hi)(__m256i)(D), \ (int)(E), \ (__v16hi)(__m256i)(A), \ (__mmask16)(B))) #define _mm256_maskz_shrdi_epi16(A, B, C, D) \ ((__m256i) \ __builtin_ia32_vpshrd_v16hi_mask ((__v16hi)(__m256i)(B), \ (__v16hi)(__m256i)(C),(int)(D), \ (__v16hi)(__m256i)_mm256_setzero_si256 (), \ (__mmask16)(A))) #define _mm256_shrdi_epi32(A, B, C) \ ((__m256i) __builtin_ia32_vpshrd_v8si ((__v8si)(__m256i)(A), \ (__v8si)(__m256i)(B),(int)(C))) #define _mm256_mask_shrdi_epi32(A, B, C, D, E) \ ((__m256i) __builtin_ia32_vpshrd_v8si_mask ((__v8si)(__m256i)(C), \ (__v8si)(__m256i)(D), \ (int)(E), \ (__v8si)(__m256i)(A), \ (__mmask8)(B))) #define _mm256_maskz_shrdi_epi32(A, B, C, D) \ ((__m256i) \ __builtin_ia32_vpshrd_v8si_mask ((__v8si)(__m256i)(B), \ (__v8si)(__m256i)(C),(int)(D), \ (__v8si)(__m256i)_mm256_setzero_si256 (), \ (__mmask8)(A))) #define _mm256_shrdi_epi64(A, B, C) \ ((__m256i) __builtin_ia32_vpshrd_v4di ((__v4di)(__m256i)(A), \ (__v4di)(__m256i)(B),(int)(C))) #define _mm256_mask_shrdi_epi64(A, B, C, D, E) \ ((__m256i) __builtin_ia32_vpshrd_v4di_mask ((__v4di)(__m256i)(C), \ (__v4di)(__m256i)(D), (int)(E), \ (__v4di)(__m256i)(A), \ (__mmask8)(B))) #define _mm256_maskz_shrdi_epi64(A, B, C, D) \ ((__m256i) \ __builtin_ia32_vpshrd_v4di_mask ((__v4di)(__m256i)(B), \ (__v4di)(__m256i)(C),(int)(D), \ (__v4di)(__m256i)_mm256_setzero_si256 (), \ (__mmask8)(A))) #define _mm_shrdi_epi16(A, B, C) \ ((__m128i) __builtin_ia32_vpshrd_v8hi ((__v8hi)(__m128i)(A), \ (__v8hi)(__m128i)(B),(int)(C))) #define _mm_mask_shrdi_epi16(A, B, C, D, E) \ ((__m128i) __builtin_ia32_vpshrd_v8hi_mask ((__v8hi)(__m128i)(C), \ (__v8hi)(__m128i)(D), (int)(E), \ (__v8hi)(__m128i)(A), \ (__mmask8)(B))) #define _mm_maskz_shrdi_epi16(A, B, C, D) \ ((__m128i) \ __builtin_ia32_vpshrd_v8hi_mask ((__v8hi)(__m128i)(B), \ (__v8hi)(__m128i)(C),(int)(D), \ (__v8hi)(__m128i)_mm_setzero_si128 (), \ (__mmask8)(A))) #define _mm_shrdi_epi32(A, B, C) \ ((__m128i) __builtin_ia32_vpshrd_v4si ((__v4si)(__m128i)(A), \ (__v4si)(__m128i)(B),(int)(C))) #define _mm_mask_shrdi_epi32(A, B, C, D, E) \ ((__m128i) __builtin_ia32_vpshrd_v4si_mask ((__v4si)(__m128i)(C), \ (__v4si)(__m128i)(D), (int)(E), \ (__v4si)(__m128i)(A), \ (__mmask8)(B))) #define _mm_maskz_shrdi_epi32(A, B, C, D) \ ((__m128i) \ __builtin_ia32_vpshrd_v4si_mask ((__v4si)(__m128i)(B), \ (__v4si)(__m128i)(C),(int)(D), \ (__v4si)(__m128i)_mm_setzero_si128 (), \ (__mmask8)(A))) #define _mm_shrdi_epi64(A, B, C) \ ((__m128i) __builtin_ia32_vpshrd_v2di ((__v2di)(__m128i)(A), \ (__v2di)(__m128i)(B),(int)(C))) #define _mm_mask_shrdi_epi64(A, B, C, D, E) \ ((__m128i) __builtin_ia32_vpshrd_v2di_mask ((__v2di)(__m128i)(C), \ (__v2di)(__m128i)(D), (int)(E), \ (__v2di)(__m128i)(A), \ (__mmask8)(B))) #define _mm_maskz_shrdi_epi64(A, B, C, D) \ ((__m128i) \ __builtin_ia32_vpshrd_v2di_mask ((__v2di)(__m128i)(B), \ (__v2di)(__m128i)(C),(int)(D), \ (__v2di)(__m128i)_mm_setzero_si128 (), \ (__mmask8)(A))) #define _mm256_shldi_epi16(A, B, C) \ ((__m256i) __builtin_ia32_vpshld_v16hi ((__v16hi)(__m256i)(A), \ (__v16hi)(__m256i)(B),(int)(C))) #define _mm256_mask_shldi_epi16(A, B, C, D, E) \ ((__m256i) __builtin_ia32_vpshld_v16hi_mask ((__v16hi)(__m256i)(C), \ (__v16hi)(__m256i)(D), \ (int)(E), \ (__v16hi)(__m256i)(A), \ (__mmask16)(B))) #define _mm256_maskz_shldi_epi16(A, B, C, D) \ ((__m256i) \ __builtin_ia32_vpshld_v16hi_mask ((__v16hi)(__m256i)(B), \ (__v16hi)(__m256i)(C),(int)(D), \ (__v16hi)(__m256i)_mm256_setzero_si256 (), \ (__mmask16)(A))) #define _mm256_shldi_epi32(A, B, C) \ ((__m256i) __builtin_ia32_vpshld_v8si ((__v8si)(__m256i)(A), \ (__v8si)(__m256i)(B),(int)(C))) #define _mm256_mask_shldi_epi32(A, B, C, D, E) \ ((__m256i) __builtin_ia32_vpshld_v8si_mask ((__v8si)(__m256i)(C), \ (__v8si)(__m256i)(D), (int)(E), \ (__v8si)(__m256i)(A), \ (__mmask8)(B))) #define _mm256_maskz_shldi_epi32(A, B, C, D) \ ((__m256i) \ __builtin_ia32_vpshld_v8si_mask ((__v8si)(__m256i)(B), \ (__v8si)(__m256i)(C),(int)(D), \ (__v8si)(__m256i)_mm256_setzero_si256 (), \ (__mmask8)(A))) #define _mm256_shldi_epi64(A, B, C) \ ((__m256i) __builtin_ia32_vpshld_v4di ((__v4di)(__m256i)(A), \ (__v4di)(__m256i)(B),(int)(C))) #define _mm256_mask_shldi_epi64(A, B, C, D, E) \ ((__m256i) __builtin_ia32_vpshld_v4di_mask ((__v4di)(__m256i)(C), \ (__v4di)(__m256i)(D), (int)(E), \ (__v4di)(__m256i)(A), \ (__mmask8)(B))) #define _mm256_maskz_shldi_epi64(A, B, C, D) \ ((__m256i) \ __builtin_ia32_vpshld_v4di_mask ((__v4di)(__m256i)(B), \ (__v4di)(__m256i)(C),(int)(D), \ (__v4di)(__m256i)_mm256_setzero_si256 (), \ (__mmask8)(A))) #define _mm_shldi_epi16(A, B, C) \ ((__m128i) __builtin_ia32_vpshld_v8hi ((__v8hi)(__m128i)(A), \ (__v8hi)(__m128i)(B),(int)(C))) #define _mm_mask_shldi_epi16(A, B, C, D, E) \ ((__m128i) __builtin_ia32_vpshld_v8hi_mask ((__v8hi)(__m128i)(C), \ (__v8hi)(__m128i)(D), (int)(E), \ (__v8hi)(__m128i)(A), \ (__mmask8)(B))) #define _mm_maskz_shldi_epi16(A, B, C, D) \ ((__m128i) \ __builtin_ia32_vpshld_v8hi_mask ((__v8hi)(__m128i)(B), \ (__v8hi)(__m128i)(C),(int)(D), \ (__v8hi)(__m128i)_mm_setzero_si128 (), \ (__mmask8)(A))) #define _mm_shldi_epi32(A, B, C) \ ((__m128i) __builtin_ia32_vpshld_v4si ((__v4si)(__m128i)(A), \ (__v4si)(__m128i)(B),(int)(C))) #define _mm_mask_shldi_epi32(A, B, C, D, E) \ ((__m128i) __builtin_ia32_vpshld_v4si_mask ((__v4si)(__m128i)(C), \ (__v4si)(__m128i)(D), (int)(E), \ (__v4si)(__m128i)(A), \ (__mmask8)(B))) #define _mm_maskz_shldi_epi32(A, B, C, D) \ ((__m128i) \ __builtin_ia32_vpshld_v4si_mask ((__v4si)(__m128i)(B), \ (__v4si)(__m128i)(C),(int)(D), \ (__v4si)(__m128i)_mm_setzero_si128 (), \ (__mmask8)(A))) #define _mm_shldi_epi64(A, B, C) \ ((__m128i) __builtin_ia32_vpshld_v2di ((__v2di)(__m128i)(A), \ (__v2di)(__m128i)(B),(int)(C))) #define _mm_mask_shldi_epi64(A, B, C, D, E) \ ((__m128i) __builtin_ia32_vpshld_v2di_mask ((__v2di)(__m128i)(C), \ (__v2di)(__m128i)(D), (int)(E), \ (__v2di)(__m128i)(A), \ (__mmask8)(B))) #define _mm_maskz_shldi_epi64(A, B, C, D) \ ((__m128i) \ __builtin_ia32_vpshld_v2di_mask ((__v2di)(__m128i)(B), \ (__v2di)(__m128i)(C),(int)(D), \ (__v2di)(__m128i)_mm_setzero_si128 (), \ (__mmask8)(A))) #endif extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_shrdv_epi16 (__m256i __A, __m256i __B, __m256i __C) { return (__m256i) __builtin_ia32_vpshrdv_v16hi ((__v16hi)__A, (__v16hi) __B, (__v16hi) __C); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_shrdv_epi16 (__m256i __A, __mmask16 __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshrdv_v16hi_mask ((__v16hi)__A, (__v16hi) __C, (__v16hi) __D, (__mmask16)__B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_shrdv_epi16 (__mmask16 __A, __m256i __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshrdv_v16hi_maskz ((__v16hi)__B, (__v16hi) __C, (__v16hi) __D, (__mmask16)__A); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_shrdv_epi32 (__m256i __A, __m256i __B, __m256i __C) { return (__m256i) __builtin_ia32_vpshrdv_v8si ((__v8si)__A, (__v8si) __B, (__v8si) __C); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_shrdv_epi32 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshrdv_v8si_mask ((__v8si)__A, (__v8si) __C, (__v8si) __D, (__mmask8)__B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_shrdv_epi32 (__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshrdv_v8si_maskz ((__v8si)__B, (__v8si) __C, (__v8si) __D, (__mmask8)__A); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_shrdv_epi64 (__m256i __A, __m256i __B, __m256i __C) { return (__m256i) __builtin_ia32_vpshrdv_v4di ((__v4di)__A, (__v4di) __B, (__v4di) __C); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_shrdv_epi64 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshrdv_v4di_mask ((__v4di)__A, (__v4di) __C, (__v4di) __D, (__mmask8)__B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_shrdv_epi64 (__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshrdv_v4di_maskz ((__v4di)__B, (__v4di) __C, (__v4di) __D, (__mmask8)__A); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_shrdv_epi16 (__m128i __A, __m128i __B, __m128i __C) { return (__m128i) __builtin_ia32_vpshrdv_v8hi ((__v8hi)__A, (__v8hi) __B, (__v8hi) __C); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_shrdv_epi16 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshrdv_v8hi_mask ((__v8hi)__A, (__v8hi) __C, (__v8hi) __D, (__mmask8)__B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_shrdv_epi16 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshrdv_v8hi_maskz ((__v8hi)__B, (__v8hi) __C, (__v8hi) __D, (__mmask8)__A); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_shrdv_epi32 (__m128i __A, __m128i __B, __m128i __C) { return (__m128i) __builtin_ia32_vpshrdv_v4si ((__v4si)__A, (__v4si) __B, (__v4si) __C); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_shrdv_epi32 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshrdv_v4si_mask ((__v4si)__A, (__v4si) __C, (__v4si) __D, (__mmask8)__B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_shrdv_epi32 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshrdv_v4si_maskz ((__v4si)__B, (__v4si) __C, (__v4si) __D, (__mmask8)__A); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_shrdv_epi64 (__m128i __A, __m128i __B, __m128i __C) { return (__m128i) __builtin_ia32_vpshrdv_v2di ((__v2di)__A, (__v2di) __B, (__v2di) __C); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_shrdv_epi64 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshrdv_v2di_mask ((__v2di)__A, (__v2di) __C, (__v2di) __D, (__mmask8)__B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_shrdv_epi64 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshrdv_v2di_maskz ((__v2di)__B, (__v2di) __C, (__v2di) __D, (__mmask8)__A); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_shldv_epi16 (__m256i __A, __m256i __B, __m256i __C) { return (__m256i) __builtin_ia32_vpshldv_v16hi ((__v16hi)__A, (__v16hi) __B, (__v16hi) __C); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_shldv_epi16 (__m256i __A, __mmask16 __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshldv_v16hi_mask ((__v16hi)__A, (__v16hi) __C, (__v16hi) __D, (__mmask16)__B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_shldv_epi16 (__mmask16 __A, __m256i __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshldv_v16hi_maskz ((__v16hi)__B, (__v16hi) __C, (__v16hi) __D, (__mmask16)__A); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_shldv_epi32 (__m256i __A, __m256i __B, __m256i __C) { return (__m256i) __builtin_ia32_vpshldv_v8si ((__v8si)__A, (__v8si) __B, (__v8si) __C); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_shldv_epi32 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshldv_v8si_mask ((__v8si)__A, (__v8si) __C, (__v8si) __D, (__mmask8)__B) ; } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_shldv_epi32 (__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshldv_v8si_maskz ((__v8si)__B, (__v8si) __C, (__v8si) __D, (__mmask8)__A); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_shldv_epi64 (__m256i __A, __m256i __B, __m256i __C) { return (__m256i) __builtin_ia32_vpshldv_v4di ((__v4di)__A, (__v4di) __B, (__v4di) __C); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_shldv_epi64 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshldv_v4di_mask ((__v4di)__A, (__v4di) __C, (__v4di) __D, (__mmask8)__B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_shldv_epi64 (__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshldv_v4di_maskz ((__v4di)__B, (__v4di) __C, (__v4di) __D, (__mmask8)__A); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_shldv_epi16 (__m128i __A, __m128i __B, __m128i __C) { return (__m128i) __builtin_ia32_vpshldv_v8hi ((__v8hi)__A, (__v8hi) __B, (__v8hi) __C); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_shldv_epi16 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshldv_v8hi_mask ((__v8hi)__A, (__v8hi) __C, (__v8hi) __D, (__mmask8)__B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_shldv_epi16 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshldv_v8hi_maskz ((__v8hi)__B, (__v8hi) __C, (__v8hi) __D, (__mmask8)__A); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_shldv_epi32 (__m128i __A, __m128i __B, __m128i __C) { return (__m128i) __builtin_ia32_vpshldv_v4si ((__v4si)__A, (__v4si) __B, (__v4si) __C); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_shldv_epi32 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshldv_v4si_mask ((__v4si)__A, (__v4si) __C, (__v4si) __D, (__mmask8)__B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_shldv_epi32 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshldv_v4si_maskz ((__v4si)__B, (__v4si) __C, (__v4si) __D, (__mmask8)__A); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_shldv_epi64 (__m128i __A, __m128i __B, __m128i __C) { return (__m128i) __builtin_ia32_vpshldv_v2di ((__v2di)__A, (__v2di) __B, (__v2di) __C); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mask_shldv_epi64 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshldv_v2di_mask ((__v2di)__A, (__v2di) __C, (__v2di) __D, (__mmask8)__B); } extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskz_shldv_epi64 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshldv_v2di_maskz ((__v2di)__B, (__v2di) __C, (__v2di) __D, (__mmask8)__A); } #ifdef __DISABLE_AVX512VBMI2VL__ #undef __DISABLE_AVX512VBMI2VL__ #pragma GCC pop_options #endif /* __DISABLE_AVX512VBMIVL__ */ #if !defined(__AVX512VL__) || !defined(__AVX512VBMI2__) || \ !defined(__AVX512BW__) #pragma GCC push_options #pragma GCC target("avx512vbmi2,avx512vl,avx512bw") #define __DISABLE_AVX512VBMI2VLBW__ #endif /* __AVX512VBMIVLBW__ */ extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_compress_epi8 (__m256i __A, __mmask32 __B, __m256i __C) { return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi)__C, (__v32qi)__A, (__mmask32)__B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_compress_epi8 (__mmask32 __A, __m256i __B) { return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi) __B, (__v32qi) _mm256_setzero_si256 (), (__mmask32) __A); } extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_compressstoreu_epi8 (void * __A, __mmask32 __B, __m256i __C) { __builtin_ia32_compressstoreuqi256_mask ((__v32qi *) __A, (__v32qi) __C, (__mmask32) __B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_expand_epi8 (__m256i __A, __mmask32 __B, __m256i __C) { return (__m256i) __builtin_ia32_expandqi256_mask ((__v32qi) __C, (__v32qi) __A, (__mmask32) __B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_expand_epi8 (__mmask32 __A, __m256i __B) { return (__m256i) __builtin_ia32_expandqi256_maskz ((__v32qi) __B, (__v32qi) _mm256_setzero_si256 (), (__mmask32) __A); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_expandloadu_epi8 (__m256i __A, __mmask32 __B, const void * __C) { return (__m256i) __builtin_ia32_expandloadqi256_mask ((const __v32qi *) __C, (__v32qi) __A, (__mmask32) __B); } extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm256_maskz_expandloadu_epi8 (__mmask32 __A, const void * __B) { return (__m256i) __builtin_ia32_expandloadqi256_maskz ((const __v32qi *) __B, (__v32qi) _mm256_setzero_si256 (), (__mmask32) __A); } #ifdef __DISABLE_AVX512VBMI2VLBW__ #undef __DISABLE_AVX512VBMI2VLBW__ #pragma GCC pop_options #endif /* __DISABLE_AVX512VBMIVLBW__ */ #endif /* _AVX512VBMIVLINTRIN_H_INCLUDED */
Close