GP-5725: Corrected operands for several AVX512 instructions

This commit is contained in:
ghidorahrex 2025-06-10 09:21:39 -04:00
parent 92f2588895
commit b0750c2783
4 changed files with 191 additions and 130 deletions

View file

@ -1766,17 +1766,34 @@ define pcodeop vpcmpistrm_avx ;
# PEXTRB/PEXTRD/PEXTRQ 4-274 PAGE 1394 LINE 72322
define pcodeop vpextrb_avx ;
:VPEXTRB Reg32_m8, XmmReg1, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x14; XmmReg1 ... & Reg32_m8; imm8
:VPEXTRB Rmr32, XmmReg1, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x14; mod=3 & XmmReg1 & Rmr32 & check_Rmr32_dest; imm8
{
Reg32_m8 = vpextrb_avx( XmmReg1, imm8:1 );
# TODO Reg64 = zext(Reg32)
local tmp8:1 = imm8;
local tmp = XmmReg1 >> (tmp8[0,3]*8);
Rmr32 = zext(tmp[0,8]);
build check_Rmr32_dest;
}
:VPEXTRB m8, XmmReg1, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x14; (XmmReg1 ... & m8); imm8
{
local tmp8:1 = imm8;
local tmp = XmmReg1 >> (tmp8[0,3]*8);
m8 = tmp[0,8];
}
# PEXTRB/PEXTRD/PEXTRQ 4-274 PAGE 1394 LINE 72326
define pcodeop vpextrd_avx ;
:VPEXTRD rm32, XmmReg1, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x16; XmmReg1 ... & rm32; imm8
:VPEXTRD Rmr32, XmmReg1, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x16; mod=3 & XmmReg1 & Rmr32 & check_Rmr32_dest; imm8
{
rm32 = vpextrd_avx( XmmReg1, imm8:1 );
local tmp = XmmReg1 >> (imm8*32);
Rmr32 = tmp(0);
build check_Rmr32_dest;
}
:VPEXTRD m32, XmmReg1, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x16; XmmReg1 ... & m32; imm8
{
local tmp = XmmReg1 >> (imm8*32);
m32 = tmp(0);
}
# PEXTRB/PEXTRD/PEXTRQ 4-274 PAGE 1394 LINE 72330
@ -2423,15 +2440,27 @@ define pcodeop vpsubusw_avx ;
define pcodeop vptest_avx ;
:VPTEST XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x17; XmmReg1 ... & XmmReg2_m128
{
vptest_avx( XmmReg1, XmmReg2_m128 );
# TODO set flags AF, CF, PF, SF, ZF
local val1 = XmmReg2_m128;
local val2 = XmmReg1;
ZF = (val1 & val2) == 0;
CF = (val1 & ~val2) == 0;
AF = 0;
OF = 0;
PF = 0;
SF = 0;
}
# PTEST 4-487 PAGE 1607 LINE 83730
:VPTEST YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x17; YmmReg1 ... & YmmReg2_m256
{
vptest_avx( YmmReg1, YmmReg2_m256 );
# TODO set flags AF, CF, PF, SF, ZF
local val1 = YmmReg2_m256;
local val2 = YmmReg1;
ZF = (val1 & val2) == 0;
CF = (val1 & ~val2) == 0;
AF = 0;
OF = 0;
PF = 0;
SF = 0;
}
# PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-491 PAGE 1611 LINE 83929
@ -3007,30 +3036,62 @@ define pcodeop vpermilps_avx ;
define pcodeop vtestps_avx ;
:VTESTPS XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x0E; XmmReg1 ... & XmmReg2_m128
{
vtestps_avx( XmmReg1, XmmReg2_m128 );
# TODO set flags AF, CF, PF, SF, ZF
local val1 = XmmReg2_m128;
local val2 = XmmReg1;
local ztest = val1 & val2;
ZF = (ztest[31,1] | ztest[63,1] | ztest[95,1] | ztest[127,1]) == 0;
local ctest = val1 & ~val2;
CF = (ctest[31,1] | ctest[63,1] | ctest[95,1] | ctest[127,1]) == 0;
AF = 0;
OF = 0;
PF = 0;
SF = 0;
}
# VTESTPD/VTESTPS 5-560 PAGE 2384 LINE 122260
:VTESTPS YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x0E; YmmReg1 ... & YmmReg2_m256
{
vtestps_avx( YmmReg1, YmmReg2_m256 );
# TODO set flags AF, CF, PF, SF, ZF
local val1 = YmmReg2_m256;
local val2 = YmmReg1;
local ztest = val1 & val2;
ZF = (ztest[31,1] | ztest[63,1] | ztest[95,1] | ztest[127,1] | ztest[160,1] | ztest[191,1] | ztest[224,1] | ztest[255,1]) == 0;
local ctest = val1 & ~val2;
CF = (ctest[31,1] | ctest[63,1] | ctest[95,1] | ctest[127,1] | ctest[160,1] | ctest[191,1] | ctest[224,1] | ctest[255,1]) == 0;
AF = 0;
OF = 0;
PF = 0;
SF = 0;
}
# VTESTPD/VTESTPS 5-560 PAGE 2384 LINE 122263
define pcodeop vtestpd_avx ;
:VTESTPD XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x0F; XmmReg1 ... & XmmReg2_m128
{
vtestpd_avx( XmmReg1, XmmReg2_m128 );
# TODO set flags AF, CF, PF, SF, ZF
local val1 = XmmReg2_m128;
local val2 = XmmReg1;
local ztest = val1 & val2;
ZF = (ztest[63,1] | ztest[127,1]) == 0;
local ctest = val1 & ~val2;
CF = (ctest[63,1] | ctest[127,1]) == 0;
AF = 0;
OF = 0;
PF = 0;
SF = 0;
}
# VTESTPD/VTESTPS 5-560 PAGE 2384 LINE 122266
:VTESTPD YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x0F; YmmReg1 ... & YmmReg2_m256
{
vtestpd_avx( YmmReg1, YmmReg2_m256 );
# TODO set flags AF, CF, PF, SF, ZF
local val1 = YmmReg2_m256;
local val2 = YmmReg1;
local ztest = val1 & val2;
ZF = (ztest[63,1] | ztest[127,1] | ztest[191,1] | ztest[255,1]) == 0;
local ctest = val1 & ~val2;
CF = (ctest[63,1] | ctest[127,1] | ctest[191,1] | ctest[255,1]) == 0;
AF = 0;
OF = 0;
PF = 0;
SF = 0;
}
# XORPD 5-596 PAGE 2420 LINE 123828

View file

@ -6264,10 +6264,10 @@ define pcodeop vunpcklps_avx512f ;
# VALIGND/VALIGNQ 5-5 PAGE 1829 LINE 94615
define pcodeop valignd_avx512vl ;
:VALIGND XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x03; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst
:VALIGND XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x03; ((XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst); imm8
[ evexD8Type = 0; evexTType = 0; ] # (TupleType FV)
{
XmmResult = valignd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst );
XmmResult = valignd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8:1 );
XmmMask = XmmReg1;
build XmmOpMask32;
ZmmReg1 = zext(XmmResult);
@ -6275,30 +6275,30 @@ define pcodeop valignd_avx512vl ;
# VALIGND/VALIGNQ 5-5 PAGE 1829 LINE 94619
define pcodeop valignq_avx512vl ;
:VALIGNQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x03; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst
:VALIGNQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x03; ((XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst); imm8
[ evexD8Type = 0; evexTType = 0; ] # (TupleType FV)
{
XmmResult = valignq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst );
XmmResult = valignq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 );
XmmMask = XmmReg1;
build XmmOpMask64;
ZmmReg1 = zext(XmmResult);
}
# VALIGND/VALIGNQ 5-5 PAGE 1829 LINE 94623
:VALIGND YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x03; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst
:VALIGND YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x03; ((YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst); imm8
[ evexD8Type = 0; evexTType = 0; ] # (TupleType FV)
{
YmmResult = valignd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst );
YmmResult = valignd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 );
YmmMask = YmmReg1;
build YmmOpMask32;
ZmmReg1 = zext(YmmResult);
}
# VALIGND/VALIGNQ 5-5 PAGE 1829 LINE 94627
:VALIGNQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x03; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst
:VALIGNQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x03; ((YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst); imm8
[ evexD8Type = 0; evexTType = 0; ] # (TupleType FV)
{
YmmResult = valignq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst );
YmmResult = valignq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 );
YmmMask = YmmReg1;
build YmmOpMask64;
ZmmReg1 = zext(YmmResult);
@ -6306,10 +6306,10 @@ define pcodeop valignq_avx512vl ;
# VALIGND/VALIGNQ 5-5 PAGE 1829 LINE 94631
define pcodeop valignd_avx512f ;
:VALIGND ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_ZmmReg; byte=0x03; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst
:VALIGND ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_ZmmReg; byte=0x03; ((ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst); imm8
[ evexD8Type = 0; evexTType = 0; ] # (TupleType FV)
{
ZmmResult = valignd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst );
ZmmResult = valignd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8:1 );
ZmmMask = ZmmReg1;
build ZmmOpMask32;
ZmmReg1 = ZmmResult;
@ -6317,10 +6317,10 @@ define pcodeop valignd_avx512f ;
# VALIGND/VALIGNQ 5-5 PAGE 1829 LINE 94635
define pcodeop valignq_avx512f ;
:VALIGNQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_ZmmReg; byte=0x03; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst
:VALIGNQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_ZmmReg; byte=0x03; ((ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst); imm8
[ evexD8Type = 0; evexTType = 0; ] # (TupleType FV)
{
ZmmResult = valignq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst );
ZmmResult = valignq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 );
ZmmMask = ZmmReg1;
build ZmmOpMask64;
ZmmReg1 = ZmmResult;
@ -10502,53 +10502,53 @@ define pcodeop vbroadcasti64x4_avx512f ;
# VPCMPB/VPCMPUB 5-339 PAGE 2163 LINE 111259
define pcodeop vpcmpb_avx512vl ;
:VPCMPB KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x3F; KReg_reg ... & XmmReg2_m128
:VPCMPB KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x3F; KReg_reg ... & XmmReg2_m128; imm8
[ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM)
{
local tmp = vpcmpb_avx512vl( evexV5_XmmReg, XmmReg2_m128 );
local tmp = vpcmpb_avx512vl( evexV5_XmmReg, XmmReg2_m128, imm8:1 );
KReg_reg = zext(AVXOpMask[0,16]) & tmp;
}
# VPCMPB/VPCMPUB 5-339 PAGE 2163 LINE 111263
:VPCMPB KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x3F; KReg_reg ... & YmmReg2_m256
:VPCMPB KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x3F; KReg_reg ... & YmmReg2_m256; imm8
[ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM)
{
local tmp = vpcmpb_avx512vl( evexV5_YmmReg, YmmReg2_m256 );
local tmp = vpcmpb_avx512vl( evexV5_YmmReg, YmmReg2_m256, imm8:1 );
KReg_reg = zext(AVXOpMask[0,32]) & tmp;
}
# VPCMPB/VPCMPUB 5-339 PAGE 2163 LINE 111267
define pcodeop vpcmpb_avx512bw ;
:VPCMPB KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_ZmmReg; byte=0x3F; KReg_reg ... & ZmmReg2_m512
:VPCMPB KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_ZmmReg; byte=0x3F; KReg_reg ... & ZmmReg2_m512; imm8
[ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM)
{
local tmp = vpcmpb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 );
local tmp = vpcmpb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512, imm8:1 );
KReg_reg = zext(AVXOpMask[0,64]) & tmp;
}
# VPCMPB/VPCMPUB 5-339 PAGE 2163 LINE 111271
define pcodeop vpcmpub_avx512vl ;
:VPCMPUB KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x3E; KReg_reg ... & XmmReg2_m128
:VPCMPUB KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x3E; KReg_reg ... & XmmReg2_m128; imm8
[ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM)
{
local tmp = vpcmpub_avx512vl( evexV5_XmmReg, XmmReg2_m128 );
local tmp = vpcmpub_avx512vl( evexV5_XmmReg, XmmReg2_m128, imm8:1 );
KReg_reg = zext(AVXOpMask[0,16]) & tmp;
}
# VPCMPB/VPCMPUB 5-339 PAGE 2163 LINE 111275
:VPCMPUB KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x3E; KReg_reg ... & YmmReg2_m256
:VPCMPUB KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x3E; KReg_reg ... & YmmReg2_m256; imm8
[ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM)
{
local tmp = vpcmpub_avx512vl( evexV5_YmmReg, YmmReg2_m256 );
local tmp = vpcmpub_avx512vl( evexV5_YmmReg, YmmReg2_m256, imm8:1 );
KReg_reg = zext(AVXOpMask[0,32]) & tmp;
}
# VPCMPB/VPCMPUB 5-339 PAGE 2163 LINE 111279
define pcodeop vpcmpub_avx512bw ;
:VPCMPUB KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_ZmmReg; byte=0x3E; KReg_reg ... & ZmmReg2_m512
:VPCMPUB KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_ZmmReg; byte=0x3E; KReg_reg ... & ZmmReg2_m512; imm8
[ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM)
{
local tmp = vpcmpub_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 );
local tmp = vpcmpub_avx512bw( evexV5_ZmmReg, ZmmReg2_m512, imm8:1 );
KReg_reg = zext(AVXOpMask[0,64]) & tmp;
}
@ -10659,53 +10659,53 @@ define pcodeop vpcmpuq_avx512f ;
# VPCMPW/VPCMPUW 5-348 PAGE 2172 LINE 111724
define pcodeop vpcmpw_avx512vl ;
:VPCMPW KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x3F; KReg_reg ... & XmmReg2_m128
:VPCMPW KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x3F; KReg_reg ... & XmmReg2_m128; imm8
[ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM)
{
local tmp = vpcmpw_avx512vl( evexV5_XmmReg, XmmReg2_m128 );
local tmp = vpcmpw_avx512vl( evexV5_XmmReg, XmmReg2_m128, imm8:1 );
KReg_reg = zext(AVXOpMask[0,8]) & tmp;
}
# VPCMPW/VPCMPUW 5-348 PAGE 2172 LINE 111728
:VPCMPW KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x3F; KReg_reg ... & YmmReg2_m256
:VPCMPW KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x3F; KReg_reg ... & YmmReg2_m256; imm8
[ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM)
{
local tmp = vpcmpw_avx512vl( evexV5_YmmReg, YmmReg2_m256 );
local tmp = vpcmpw_avx512vl( evexV5_YmmReg, YmmReg2_m256, imm8:1 );
KReg_reg = zext(AVXOpMask[0,16]) & tmp;
}
# VPCMPW/VPCMPUW 5-348 PAGE 2172 LINE 111732
define pcodeop vpcmpw_avx512bw ;
:VPCMPW KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_ZmmReg; byte=0x3F; KReg_reg ... & ZmmReg2_m512
:VPCMPW KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_ZmmReg; byte=0x3F; KReg_reg ... & ZmmReg2_m512; imm8
[ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM)
{
local tmp = vpcmpw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 );
local tmp = vpcmpw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512, imm8:1 );
KReg_reg = zext(AVXOpMask[0,32]) & tmp;
}
# VPCMPW/VPCMPUW 5-348 PAGE 2172 LINE 111736
define pcodeop vpcmpuw_avx512vl ;
:VPCMPUW KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x3E; KReg_reg ... & XmmReg2_m128
:VPCMPUW KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x3E; KReg_reg ... & XmmReg2_m128; imm8
[ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM)
{
local tmp = vpcmpuw_avx512vl( evexV5_XmmReg, XmmReg2_m128 );
local tmp = vpcmpuw_avx512vl( evexV5_XmmReg, XmmReg2_m128, imm8:1 );
KReg_reg = zext(AVXOpMask[0,8]) & tmp;
}
# VPCMPW/VPCMPUW 5-348 PAGE 2172 LINE 111740
:VPCMPUW KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x3E; KReg_reg ... & YmmReg2_m256
:VPCMPUW KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x3E; KReg_reg ... & YmmReg2_m256; imm8
[ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM)
{
local tmp = vpcmpuw_avx512vl( evexV5_YmmReg, YmmReg2_m256 );
local tmp = vpcmpuw_avx512vl( evexV5_YmmReg, YmmReg2_m256, imm8:1 );
KReg_reg = zext(AVXOpMask[0,16]) & tmp;
}
# VPCMPW/VPCMPUW 5-348 PAGE 2172 LINE 111745
define pcodeop vpcmpuw_avx512bw ;
:VPCMPUW KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_ZmmReg; byte=0x3E; KReg_reg ... & ZmmReg2_m512
:VPCMPUW KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_ZmmReg; byte=0x3E; KReg_reg ... & ZmmReg2_m512; imm8
[ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM)
{
local tmp = vpcmpuw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 );
local tmp = vpcmpuw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512, imm8:1 );
KReg_reg = zext(AVXOpMask[0,32]) & tmp;
}
@ -13345,10 +13345,10 @@ define pcodeop vscatterpf1qpd_avx512pf ;
# VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 121994
define pcodeop vshuff32x4_avx512vl ;
:VSHUFF32X4 YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x23; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst
:VSHUFF32X4 YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x23; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8
[ evexD8Type = 0; evexTType = 0; ] # (TupleType FV)
{
YmmResult = vshuff32x4_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst );
YmmResult = vshuff32x4_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 );
YmmMask = YmmReg1;
build YmmOpMask32;
ZmmReg1 = zext(YmmResult);
@ -13356,10 +13356,10 @@ define pcodeop vshuff32x4_avx512vl ;
# VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 121998
define pcodeop vshuff32x4_avx512f ;
:VSHUFF32x4 ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_ZmmReg; byte=0x23; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst
:VSHUFF32x4 ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_ZmmReg; byte=0x23; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst; imm8
[ evexD8Type = 0; evexTType = 0; ] # (TupleType FV)
{
ZmmResult = vshuff32x4_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst );
ZmmResult = vshuff32x4_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8:1 );
ZmmMask = ZmmReg1;
build ZmmOpMask32;
ZmmReg1 = ZmmResult;
@ -13367,10 +13367,10 @@ define pcodeop vshuff32x4_avx512f ;
# VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 122002
define pcodeop vshuff64x2_avx512vl ;
:VSHUFF64X2 YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x23; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst
:VSHUFF64X2 YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x23; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8
[ evexD8Type = 0; evexTType = 0; ] # (TupleType FV)
{
YmmResult = vshuff64x2_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst );
YmmResult = vshuff64x2_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 );
YmmMask = YmmReg1;
build YmmOpMask64;
ZmmReg1 = zext(YmmResult);
@ -13378,10 +13378,10 @@ define pcodeop vshuff64x2_avx512vl ;
# VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 122006
define pcodeop vshuff64x2_avx512f ;
:VSHUFF64x2 ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_ZmmReg; byte=0x23; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst
:VSHUFF64x2 ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_ZmmReg; byte=0x23; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst; imm8
[ evexD8Type = 0; evexTType = 0; ] # (TupleType FV)
{
ZmmResult = vshuff64x2_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst );
ZmmResult = vshuff64x2_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 );
ZmmMask = ZmmReg1;
build ZmmOpMask64;
ZmmReg1 = ZmmResult;
@ -13389,10 +13389,10 @@ define pcodeop vshuff64x2_avx512f ;
# VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 122010
define pcodeop vshufi32x4_avx512vl ;
:VSHUFI32X4 YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x43; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst
:VSHUFI32X4 YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x43; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8
[ evexD8Type = 0; evexTType = 0; ] # (TupleType FV)
{
YmmResult = vshufi32x4_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst );
YmmResult = vshufi32x4_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 );
YmmMask = YmmReg1;
build YmmOpMask32;
ZmmReg1 = zext(YmmResult);
@ -13400,10 +13400,10 @@ define pcodeop vshufi32x4_avx512vl ;
# VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 122013
define pcodeop vshufi32x4_avx512f ;
:VSHUFI32x4 ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_ZmmReg; byte=0x43; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst
:VSHUFI32x4 ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_ZmmReg; byte=0x43; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst; imm8
[ evexD8Type = 0; evexTType = 0; ] # (TupleType FV)
{
ZmmResult = vshufi32x4_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst );
ZmmResult = vshufi32x4_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8:1 );
ZmmMask = ZmmReg1;
build ZmmOpMask32;
ZmmReg1 = ZmmResult;
@ -13411,10 +13411,10 @@ define pcodeop vshufi32x4_avx512f ;
# VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 122016
define pcodeop vshufi64x2_avx512vl ;
:VSHUFI64X2 YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x43; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst
:VSHUFI64X2 YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x43; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8
[ evexD8Type = 0; evexTType = 0; ] # (TupleType FV)
{
YmmResult = vshufi64x2_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst );
YmmResult = vshufi64x2_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 );
YmmMask = YmmReg1;
build YmmOpMask64;
ZmmReg1 = zext(YmmResult);
@ -13422,10 +13422,10 @@ define pcodeop vshufi64x2_avx512vl ;
# VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 122019
define pcodeop vshufi64x2_avx512f ;
:VSHUFI64x2 ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_ZmmReg; byte=0x43; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst
:VSHUFI64x2 ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_ZmmReg; byte=0x43; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst; imm8
[ evexD8Type = 0; evexTType = 0; ] # (TupleType FV)
{
ZmmResult = vshufi64x2_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst );
ZmmResult = vshufi64x2_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 );
ZmmMask = ZmmReg1;
build ZmmOpMask64;
ZmmReg1 = ZmmResult;

View file

@ -1167,87 +1167,87 @@ m80fp: "extended double ptr" Mem is Mem { export *:10 Mem; }
## VSIB
##
vaddr32x: [Base + Xmm_vsib*ss] is mod=0 & r_m=4; Xmm_vsib & Base & ss { }
vaddr32x: [Xmm_vsib*ss + simm32_32] is mod=0 & r_m=4; Xmm_vsib & base=5 & ss; simm32_32 { }
vaddr32x: [Base + Xmm_vsib*ss + simm8_32] is mod=1 & r_m=4; Xmm_vsib & Base & ss; simm8_32 { }
vaddr32x: [Base + Xmm_vsib*ss + simm32_32] is mod=2 & r_m=4; Xmm_vsib & Base & ss; simm32_32 { }
vaddr32x: [Base + Xmm_vsib*ss] is mod=0 & r_m=4; Xmm_vsib & Base & ss { local tmp=zext(Base)+Xmm_vsib*ss; export tmp; }
vaddr32x: [Xmm_vsib*ss + simm32_32] is mod=0 & r_m=4; Xmm_vsib & base=5 & ss; simm32_32 { local tmp=zext(simm32_32)+Xmm_vsib*ss; export tmp; }
vaddr32x: [Base + Xmm_vsib*ss + simm8_32] is mod=1 & r_m=4; Xmm_vsib & Base & ss; simm8_32 { local tmp=zext(Base)+zext(simm8_32)+Xmm_vsib*ss; export tmp; }
vaddr32x: [Base + Xmm_vsib*ss + simm32_32] is mod=2 & r_m=4; Xmm_vsib & Base & ss; simm32_32 { local tmp=zext(Base)+zext(simm32_32)+Xmm_vsib*ss; export tmp; }
vaddr32y: [Base + Ymm_vsib*ss] is mod=0 & r_m=4; Ymm_vsib & Base & ss { }
vaddr32y: [Ymm_vsib*ss + simm32_32] is mod=0 & r_m=4; Ymm_vsib & base=5 & ss; simm32_32 { }
vaddr32y: [Base + Ymm_vsib*ss + simm8_32] is mod=1 & r_m=4; Ymm_vsib & Base & ss; simm8_32 { }
vaddr32y: [Base + Ymm_vsib*ss + simm32_32] is mod=2 & r_m=4; Ymm_vsib & Base & ss; simm32_32 { }
vaddr32y: [Base + Ymm_vsib*ss] is mod=0 & r_m=4; Ymm_vsib & Base & ss { local tmp=zext(Base)+Ymm_vsib*ss; export tmp; }
vaddr32y: [Ymm_vsib*ss + simm32_32] is mod=0 & r_m=4; Ymm_vsib & base=5 & ss; simm32_32 { local tmp=zext(simm32_32)+Ymm_vsib*ss; export tmp; }
vaddr32y: [Base + Ymm_vsib*ss + simm8_32] is mod=1 & r_m=4; Ymm_vsib & Base & ss; simm8_32 { local tmp=zext(Base)+zext(simm8_32)+Ymm_vsib*ss; export tmp; }
vaddr32y: [Base + Ymm_vsib*ss + simm32_32] is mod=2 & r_m=4; Ymm_vsib & Base & ss; simm32_32 { local tmp=zext(Base)+zext(simm32_32)+Ymm_vsib*ss; export tmp; }
vaddr32z: [Base + Zmm_vsib*ss] is mod=0 & r_m=4; Zmm_vsib & Base & ss { }
vaddr32z: [Zmm_vsib*ss + simm32_32] is mod=0 & r_m=4; Zmm_vsib & base=5 & ss; simm32_32 { }
vaddr32z: [Base + Zmm_vsib*ss + simm8_32] is mod=1 & r_m=4; Zmm_vsib & Base & ss; simm8_32 { }
vaddr32z: [Base + Zmm_vsib*ss + simm32_32] is mod=2 & r_m=4; Zmm_vsib & Base & ss; simm32_32 { }
vaddr32z: [Base + Zmm_vsib*ss] is mod=0 & r_m=4; Zmm_vsib & Base & ss { local tmp=zext(Base)+Zmm_vsib*ss; export tmp; }
vaddr32z: [Zmm_vsib*ss + simm32_32] is mod=0 & r_m=4; Zmm_vsib & base=5 & ss; simm32_32 { local tmp=zext(simm32_32)+Zmm_vsib*ss; export tmp; }
vaddr32z: [Base + Zmm_vsib*ss + simm8_32] is mod=1 & r_m=4; Zmm_vsib & Base & ss; simm8_32 { local tmp=zext(Base)+zext(simm8_32)+Zmm_vsib*ss; export tmp; }
vaddr32z: [Base + Zmm_vsib*ss + simm32_32] is mod=2 & r_m=4; Zmm_vsib & Base & ss; simm32_32 { local tmp=zext(Base)+zext(simm32_32)+Zmm_vsib*ss; export tmp; }
@ifdef IA64
vaddr64x: [Base64 + Xmm_vsib*ss] is mod=0 & r_m=4; Xmm_vsib & Base64 & ss { }
vaddr64x: [Xmm_vsib*ss + simm32_64] is mod=0 & r_m=4; Xmm_vsib & base64=5 & ss; simm32_64 { }
vaddr64x: [Base64 + Xmm_vsib*ss + simm8_64] is mod=1 & r_m=4; Xmm_vsib & Base64 & ss; simm8_64 { }
vaddr64x: [Base64 + Xmm_vsib*ss + simm32_64] is mod=2 & r_m=4; Xmm_vsib & Base64 & ss; simm32_64 { }
vaddr64x: [Base64 + Xmm_vsib*ss] is mod=0 & r_m=4; Xmm_vsib & Base64 & ss { local tmp=zext(Base64)+Xmm_vsib*ss; export tmp; }
vaddr64x: [Xmm_vsib*ss + simm32_64] is mod=0 & r_m=4; Xmm_vsib & base64=5 & ss; simm32_64 { local tmp=zext(simm32_64)+Xmm_vsib*ss; export tmp; }
vaddr64x: [Base64 + Xmm_vsib*ss + simm8_64] is mod=1 & r_m=4; Xmm_vsib & Base64 & ss; simm8_64 { local tmp=zext(Base64)+zext(simm8_64)+Xmm_vsib*ss; export tmp; }
vaddr64x: [Base64 + Xmm_vsib*ss + simm32_64] is mod=2 & r_m=4; Xmm_vsib & Base64 & ss; simm32_64 { local tmp=zext(Base64)+zext(simm32_64)+Xmm_vsib*ss; export tmp; }
vaddr64y: [Base64 + Ymm_vsib*ss] is mod=0 & r_m=4; Ymm_vsib & Base64 & ss { }
vaddr64y: [Ymm_vsib*ss + simm32_64] is mod=0 & r_m=4; Ymm_vsib & base64=5 & ss; simm32_64 { }
vaddr64y: [Base64 + Ymm_vsib*ss + simm8_64] is mod=1 & r_m=4; Ymm_vsib & Base64 & ss; simm8_64 { }
vaddr64y: [Base64 + Ymm_vsib*ss + simm32_64] is mod=2 & r_m=4; Ymm_vsib & Base64 & ss; simm32_64 { }
vaddr64y: [Base64 + Ymm_vsib*ss] is mod=0 & r_m=4; Ymm_vsib & Base64 & ss { local tmp=zext(Base64)+Ymm_vsib*ss; export tmp; }
vaddr64y: [Ymm_vsib*ss + simm32_64] is mod=0 & r_m=4; Ymm_vsib & base64=5 & ss; simm32_64 { local tmp=zext(simm32_64)+Ymm_vsib*ss; export tmp; }
vaddr64y: [Base64 + Ymm_vsib*ss + simm8_64] is mod=1 & r_m=4; Ymm_vsib & Base64 & ss; simm8_64 { local tmp=zext(Base64)+zext(simm8_64)+Ymm_vsib*ss; export tmp; }
vaddr64y: [Base64 + Ymm_vsib*ss + simm32_64] is mod=2 & r_m=4; Ymm_vsib & Base64 & ss; simm32_64 { local tmp=zext(Base64)+zext(simm32_64)+Ymm_vsib*ss; export tmp; }
vaddr64z: [Base64 + Zmm_vsib*ss] is mod=0 & r_m=4; Zmm_vsib & Base64 & ss { }
vaddr64z: [Zmm_vsib*ss + simm32_64] is mod=0 & r_m=4; Zmm_vsib & base64=5 & ss; simm32_64 { }
vaddr64z: [Base64 + Zmm_vsib*ss + simm8_64] is mod=1 & r_m=4; Zmm_vsib & Base64 & ss; simm8_64 { }
vaddr64z: [Base64 + Zmm_vsib*ss + simm32_64] is mod=2 & r_m=4; Zmm_vsib & Base64 & ss; simm32_64 { }
vaddr64z: [Base64 + Zmm_vsib*ss] is mod=0 & r_m=4; Zmm_vsib & Base64 & ss { local tmp=zext(Base64)+Zmm_vsib*ss; export tmp; }
vaddr64z: [Zmm_vsib*ss + simm32_64] is mod=0 & r_m=4; Zmm_vsib & base64=5 & ss; simm32_64 { local tmp=zext(simm32_64)+Zmm_vsib*ss; export tmp; }
vaddr64z: [Base64 + Zmm_vsib*ss + simm8_64] is mod=1 & r_m=4; Zmm_vsib & Base64 & ss; simm8_64 { local tmp=zext(Base64)+zext(simm8_64)+Zmm_vsib*ss; export tmp; }
vaddr64z: [Base64 + Zmm_vsib*ss + simm32_64] is mod=2 & r_m=4; Zmm_vsib & Base64 & ss; simm32_64 { local tmp=zext(Base64)+zext(simm32_64)+Zmm_vsib*ss; export tmp; }
@endif
vMem32x: segWide^vaddr32x is addrsize=1 & segWide; vaddr32x { }
vMem32x: segWide^vaddr32x is addrsize=1 & segWide & highseg=1; vaddr32x { }
vMem32x: segWide^vaddr32x is addrsize=1 & segWide; vaddr32x { export vaddr32x; }
vMem32x: segWide^vaddr32x is addrsize=1 & segWide & highseg=1; vaddr32x { export vaddr32x; }
vMem32y: segWide^vaddr32y is addrsize=1 & segWide; vaddr32y { }
vMem32y: segWide^vaddr32y is addrsize=1 & segWide & highseg=1; vaddr32y { }
vMem32y: segWide^vaddr32y is addrsize=1 & segWide; vaddr32y { export vaddr32y; }
vMem32y: segWide^vaddr32y is addrsize=1 & segWide & highseg=1; vaddr32y { export vaddr32y; }
vMem32z: segWide^vaddr32z is addrsize=1 & segWide; vaddr32z { }
vMem32z: segWide^vaddr32z is addrsize=1 & segWide & highseg=1; vaddr32z { }
vMem32z: segWide^vaddr32z is addrsize=1 & segWide; vaddr32z { export vaddr32z; }
vMem32z: segWide^vaddr32z is addrsize=1 & segWide & highseg=1; vaddr32z { export vaddr32z; }
@ifdef IA64
# GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base.
# Behavior is coded to match Binutils; exceeds what the manual indicates is possible.
vMem32x: segWide^vaddr64x is addrsize=2 & segWide; vaddr64x { }
vMem32x: segWide^vaddr64x is addrsize=2 & segWide & highseg=1; vaddr64x { }
vMem32x: segWide^vaddr64x is addrsize=2 & segWide; vaddr64x { export vaddr64x; }
vMem32x: segWide^vaddr64x is addrsize=2 & segWide & highseg=1; vaddr64x { export vaddr64x; }
# GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base.
# Behavior is coded to match Binutils; exceeds what the manual indicates is possible.
vMem32y: segWide^vaddr64y is addrsize=2 & segWide; vaddr64y { }
vMem32y: segWide^vaddr64y is addrsize=2 & segWide & highseg=1; vaddr64y { }
vMem32y: segWide^vaddr64y is addrsize=2 & segWide; vaddr64y { export vaddr64y; }
vMem32y: segWide^vaddr64y is addrsize=2 & segWide & highseg=1; vaddr64y { export vaddr64y; }
# GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base.
# Behavior is coded to match Binutils; exceeds what the manual indicates is possible.
vMem32z: segWide^vaddr64z is addrsize=2 & segWide; vaddr64z { }
vMem32z: segWide^vaddr64z is addrsize=2 & segWide & highseg=1; vaddr64z { }
vMem32z: segWide^vaddr64z is addrsize=2 & segWide; vaddr64z { export vaddr64z; }
vMem32z: segWide^vaddr64z is addrsize=2 & segWide & highseg=1; vaddr64z { export vaddr64z;}
# GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base.
# Behavior is coded to match Binutils; exceeds what the manual indicates is possible.
vMem64x: segWide^vaddr32x is addrsize=1 & segWide; vaddr32x { }
vMem64x: segWide^vaddr32x is addrsize=1 & segWide & highseg=1; vaddr32x { }
vMem64x: segWide^vaddr32x is addrsize=1 & segWide; vaddr32x { export vaddr32x; }
vMem64x: segWide^vaddr32x is addrsize=1 & segWide & highseg=1; vaddr32x { export vaddr32x; }
vMem64x: segWide^vaddr64x is addrsize=2 & segWide; vaddr64x { }
vMem64x: segWide^vaddr64x is addrsize=2 & segWide & highseg=1; vaddr64x { }
vMem64x: segWide^vaddr64x is addrsize=2 & segWide; vaddr64x { export vaddr64x; }
vMem64x: segWide^vaddr64x is addrsize=2 & segWide & highseg=1; vaddr64x { export vaddr64x; }
# GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base.
# Behavior is coded to match Binutils; exceeds what the manual indicates is possible.
vMem64y: segWide^vaddr32y is addrsize=1 & segWide; vaddr32y { }
vMem64y: segWide^vaddr32y is addrsize=1 & segWide & highseg=1; vaddr32y { }
vMem64y: segWide^vaddr32y is addrsize=1 & segWide; vaddr32y { export vaddr32y; }
vMem64y: segWide^vaddr32y is addrsize=1 & segWide & highseg=1; vaddr32y { export vaddr32y; }
vMem64y: segWide^vaddr64y is addrsize=2 & segWide; vaddr64y { }
vMem64y: segWide^vaddr64y is addrsize=2 & segWide & highseg=1; vaddr64y { }
vMem64y: segWide^vaddr64y is addrsize=2 & segWide; vaddr64y { export vaddr64y; }
vMem64y: segWide^vaddr64y is addrsize=2 & segWide & highseg=1; vaddr64y { export vaddr64y; }
# GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base.
# Behavior is coded to match Binutils; exceeds what the manual indicates is possible.
vMem64z: segWide^vaddr32z is addrsize=1 & segWide; vaddr32z { }
vMem64z: segWide^vaddr32z is addrsize=1 & segWide & highseg=1; vaddr32z { }
vMem64z: segWide^vaddr32z is addrsize=1 & segWide; vaddr32z { export vaddr32z; }
vMem64z: segWide^vaddr32z is addrsize=1 & segWide & highseg=1; vaddr32z { export vaddr32z; }
vMem64z: segWide^vaddr64z is addrsize=2 & segWide; vaddr64z { }
vMem64z: segWide^vaddr64z is addrsize=2 & segWide & highseg=1; vaddr64z { }
vMem64z: segWide^vaddr64z is addrsize=2 & segWide; vaddr64z { export vaddr64z; }
vMem64z: segWide^vaddr64z is addrsize=2 & segWide & highseg=1; vaddr64z { export vaddr64z; }
@endif
@ -1262,14 +1262,14 @@ d_vm64y: "dword ptr "^vMem64y is vMem64y { }
@endif
q_vm32x: "qword ptr "^vMem32x is vMem32x { }
q_vm32x: "qword ptr "^vMem32x is vMem32x { export vMem32x; }
# not used q_vm32y: "qword ptr "^vMem32y is vMem32y { }
# not used q_vm32z: "qword ptr "^vMem32z is vMem32z { }
@ifdef IA64
q_vm64x: "qword ptr "^vMem64x is vMem64x { }
q_vm64y: "qword ptr "^vMem64y is vMem64y { }
q_vm64z: "qword ptr "^vMem64z is vMem64z { }
q_vm64x: "qword ptr "^vMem64x is vMem64x { export vMem64x; }
q_vm64y: "qword ptr "^vMem64y is vMem64y { export vMem64y; }
q_vm64z: "qword ptr "^vMem64z is vMem64z { export vMem64z; }
@endif
x_vm32x: "xmmword ptr "^vMem32x is vMem32x { export vMem32x; }
@ -1277,9 +1277,9 @@ y_vm32y: "ymmword ptr "^vMem32y is vMem32y { export vMem32y; }
z_vm32z: "zmmword ptr "^vMem32z is vMem32z { export vMem32z; }
@ifdef IA64
x_vm64x: "xmmword ptr "^vMem64x is vMem64x { }
y_vm64y: "ymmword ptr "^vMem64y is vMem64y { }
z_vm64z: "zmmword ptr "^vMem64z is vMem64z { }
x_vm64x: "xmmword ptr "^vMem64x is vMem64x { export vMem64x; }
y_vm64y: "ymmword ptr "^vMem64y is vMem64y { export vMem64y; }
z_vm64z: "zmmword ptr "^vMem64z is vMem64z { export vMem64z; }
@endif
Reg32_m8: Rmr32 is mod=3 & Rmr32 { export Rmr32; }

View file

@ -5,7 +5,7 @@
endian="little"
size="32"
variant="default"
version="4.5"
version="4.6"
slafile="x86.sla"
processorspec="x86.pspec"
manualindexfile="../manuals/x86.idx"
@ -37,7 +37,7 @@
endian="little"
size="32"
variant="System Management Mode"
version="4.5"
version="4.6"
slafile="x86.sla"
processorspec="x86-16.pspec"
manualindexfile="../manuals/x86.idx"
@ -50,7 +50,7 @@
endian="little"
size="16"
variant="Real Mode"
version="4.5"
version="4.6"
slafile="x86.sla"
processorspec="x86-16-real.pspec"
manualindexfile="../manuals/x86.idx"
@ -70,7 +70,7 @@
endian="little"
size="16"
variant="Protected Mode"
version="4.5"
version="4.6"
slafile="x86.sla"
processorspec="x86-16.pspec"
manualindexfile="../manuals/x86.idx"
@ -85,7 +85,7 @@
endian="little"
size="64"
variant="default"
version="4.5"
version="4.6"
slafile="x86-64.sla"
processorspec="x86-64.pspec"
manualindexfile="../manuals/x86.idx"
@ -108,7 +108,7 @@
endian="little"
size="64"
variant="compat32"
version="4.5"
version="4.6"
slafile="x86-64.sla"
processorspec="x86-64-compat32.pspec"
manualindexfile="../manuals/x86.idx"