From 4f1933c1a38c559631dcc89d4ae46a7b97c6d55c Mon Sep 17 00:00:00 2001 From: ghidorahrex Date: Thu, 24 Apr 2025 12:57:38 -0400 Subject: [PATCH] GP-5537: Added additional AVX512 instructions --- .../Processors/x86/data/languages/avx512.sinc | 6242 ++++++++++++++--- Ghidra/Processors/x86/data/languages/ia.sinc | 141 +- 2 files changed, 5273 insertions(+), 1110 deletions(-) diff --git a/Ghidra/Processors/x86/data/languages/avx512.sinc b/Ghidra/Processors/x86/data/languages/avx512.sinc index 2c88dc743f..3a3aea7e40 100644 --- a/Ghidra/Processors/x86/data/languages/avx512.sinc +++ b/Ghidra/Processors/x86/data/languages/avx512.sinc @@ -4,10 +4,10 @@ # ADDPD 3-33 PAGE 603 LINE 33411 define pcodeop vaddpd_avx512vl ; -:VADDPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VADDPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vaddpd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vaddpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; @@ -15,10 +15,10 @@ define pcodeop vaddpd_avx512vl ; } # ADDPD 3-33 PAGE 603 LINE 33414 -:VADDPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x58; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VADDPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x58; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vaddpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vaddpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; @@ -39,10 +39,10 @@ define pcodeop vaddpd_avx512f ; # ADDPS 3-36 PAGE 606 LINE 33562 define pcodeop vaddps_avx512vl ; -:VADDPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VADDPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vaddps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vaddps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; @@ -50,10 +50,10 @@ define pcodeop vaddps_avx512vl ; } # ADDPS 3-36 PAGE 606 LINE 33565 -:VADDPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x58; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VADDPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x58; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vaddps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst); + YmmResult = vaddps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst); YmmMask = YmmReg1; build YmmOpMask32; @@ -74,10 +74,10 @@ define pcodeop vaddps_avx512f ; # ADDSD 3-39 PAGE 609 LINE 33721 define pcodeop vaddsd_avx512f ; -:VADDSD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VADDSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vaddsd_avx512f( vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vaddsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; @@ -86,10 +86,10 @@ define pcodeop vaddsd_avx512f ; # ADDSS 3-41 PAGE 611 LINE 33815 define pcodeop vaddss_avx512f ; -:VADDSS XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m32 +:VADDSS XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vaddss_avx512f( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vaddss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask64; @@ -98,10 +98,10 @@ define pcodeop vaddss_avx512f ; # ANDPD 3-64 PAGE 634 LINE 34827 define pcodeop vandpd_avx512vl ; -:VANDPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x54; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VANDPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x54; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vandpd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vandpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; @@ -109,10 +109,10 @@ define pcodeop vandpd_avx512vl ; } # ANDPD 3-64 PAGE 634 LINE 34830 -:VANDPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x54; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VANDPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x54; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vandpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vandpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; @@ -133,10 +133,10 @@ define pcodeop vandpd_avx512dq ; # ANDPS 3-67 PAGE 637 LINE 34953 define pcodeop vandps_avx512vl ; -:VANDPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x54; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VANDPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x54; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vandps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vandps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; @@ -144,10 +144,10 @@ define pcodeop vandps_avx512vl ; } # ANDPS 3-67 PAGE 637 LINE 34956 -:VANDPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x54; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VANDPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x54; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp:32 = vandps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + local tmp:32 = vandps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -166,20 +166,20 @@ define pcodeop vandps_avx512dq ; # ANDNPD 3-70 PAGE 640 LINE 35087 define pcodeop vandnpd_avx512vl ; -:VANDNPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x55; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VANDNPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x55; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vandnpd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vandnpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # ANDNPD 3-70 PAGE 640 LINE 35090 -:VANDNPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x55; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VANDNPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x55; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vandnpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vandnpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -198,20 +198,20 @@ define pcodeop vandnpd_avx512dq ; # ANDNPS 3-73 PAGE 643 LINE 35213 define pcodeop vandnps_avx512vl ; -:VANDNPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x55; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VANDNPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x55; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vandnps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vandnps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # ANDNPS 3-73 PAGE 643 LINE 35216 -:VANDNPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x55; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VANDNPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x55; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vandnps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vandnps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -230,18 +230,18 @@ define pcodeop vandnps_avx512dq ; # CMPPD 3-155 PAGE 725 LINE 39246 define pcodeop vcmppd_avx512vl ; -:^VCMPPD_mon KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128_m64bcst^VCMPPD_op is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & AVXOpMask & vexVVVV_XmmReg; byte=0xC2; KReg_reg ... & XmmReg2_m128_m64bcst; VCMPPD_mon & VCMPPD_op +:^VCMPPD_mon KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m64bcst^VCMPPD_op is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0xC2; KReg_reg ... & XmmReg2_m128_m64bcst; VCMPPD_mon & VCMPPD_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vcmppd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst, VCMPPD_op ); + local tmp = vcmppd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst, VCMPPD_op ); KReg_reg = zext(AVXOpMask[0,2]) & tmp; } # CMPPD 3-155 PAGE 725 LINE 39250 -:^VCMPPD_mon KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256_m64bcst^VCMPPD_op is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & AVXOpMask & vexVVVV_YmmReg; byte=0xC2; KReg_reg ... & YmmReg2_m256_m64bcst; VCMPPD_mon & VCMPPD_op +:^VCMPPD_mon KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m64bcst^VCMPPD_op is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0xC2; KReg_reg ... & YmmReg2_m256_m64bcst; VCMPPD_mon & VCMPPD_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vcmppd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst, VCMPPD_op ); + local tmp = vcmppd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, VCMPPD_op ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } @@ -256,18 +256,18 @@ define pcodeop vcmppd_avx512f ; # CMPPS 3-162 PAGE 732 LINE 39613 define pcodeop vcmpps_avx512vl ; -:^VCMPPS_mon KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128_m32bcst^VCMPPS_op is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & vexVVVV_XmmReg; byte=0xC2; KReg_reg ... & XmmReg2_m128_m32bcst; VCMPPS_mon & VCMPPS_op +:^VCMPPS_mon KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m32bcst^VCMPPS_op is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0xC2; KReg_reg ... & XmmReg2_m128_m32bcst; VCMPPS_mon & VCMPPS_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vcmpps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst, VCMPPS_op ); + local tmp = vcmpps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst, VCMPPS_op ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # CMPPS 3-162 PAGE 732 LINE 39617 -:^VCMPPS_mon KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256_m32bcst^VCMPPS_op is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & vexVVVV_YmmReg; byte=0xC2; KReg_reg ... & YmmReg2_m256_m32bcst; VCMPPS_mon & VCMPPS_op +:^VCMPPS_mon KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m32bcst^VCMPPS_op is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0xC2; KReg_reg ... & YmmReg2_m256_m32bcst; VCMPPS_mon & VCMPPS_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vcmpps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst, VCMPPS_op ); + local tmp = vcmpps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst, VCMPPS_op ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } @@ -282,19 +282,19 @@ define pcodeop vcmpps_avx512f ; # CMPSD 3-173 PAGE 743 LINE 40157 define pcodeop vcmpsd_avx512f ; -:^VCMPSD_mon KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m64^VCMPSD_op is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & AVXOpMask & vexVVVV_XmmReg; byte=0xC2; KReg_reg ... & XmmReg2_m64; VCMPSD_mon & VCMPSD_op +:^VCMPSD_mon KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m64^VCMPSD_op is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0xC2; KReg_reg ... & XmmReg2_m64; VCMPSD_mon & VCMPSD_op [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - local tmp = vcmpsd_avx512f( vexVVVV_XmmReg, XmmReg2_m64, VCMPSD_op ); + local tmp = vcmpsd_avx512f( evexV5_XmmReg, XmmReg2_m64, VCMPSD_op ); KReg_reg = zext(AVXOpMask[0,1]) & tmp; } # CMPSS 3-177 PAGE 747 LINE 40393 define pcodeop vcmpss_avx512f ; -:^VCMPSS_mon KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m32^VCMPSS_op is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & vexVVVV_XmmReg; byte=0xC2; KReg_reg ... & XmmReg2_m32; VCMPSS_mon & VCMPSS_op +:^VCMPSS_mon KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m32^VCMPSS_op is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0xC2; KReg_reg ... & XmmReg2_m32; VCMPSS_mon & VCMPSS_op [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - local tmp = vcmpss_avx512f( vexVVVV_XmmReg, XmmReg2_m32, VCMPSS_op ); + local tmp = vcmpss_avx512f( evexV5_XmmReg, XmmReg2_m32, VCMPSS_op ); KReg_reg = zext(AVXOpMask[0,1]) & tmp; } @@ -528,10 +528,10 @@ define pcodeop vcvtsd2si_avx512f ; # CVTSD2SS 3-255 PAGE 825 LINE 44417 define pcodeop vcvtsd2ss_avx512f ; -:VCVTSD2SS XmmReg1^XmmOpMask, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x5A; (XmmReg1 & ZmmReg1 & XmmOpMask) ... & XmmReg2_m64 +:VCVTSD2SS XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5A; (XmmReg1 & ZmmReg1 & XmmOpMask) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vcvtsd2ss_avx512f( vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vcvtsd2ss_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask; XmmResult[0,32] = (zext(XmmOpMask[0,1]) * XmmResult[0,32]) + (zext(!XmmOpMask[0,1]) * XmmMask[0,32]); @@ -540,48 +540,48 @@ define pcodeop vcvtsd2ss_avx512f ; # CVTSI2SD 3-257 PAGE 827 LINE 44522 define pcodeop vcvtsi2sd_avx512f ; -:VCVTSI2SD XmmReg1, vexVVVV_XmmReg, rm32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm32 +:VCVTSI2SD XmmReg1, evexV5_XmmReg, rm32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - local tmp:16 = vcvtsi2sd_avx512f( vexVVVV_XmmReg, rm32 ); + local tmp:16 = vcvtsi2sd_avx512f( evexV5_XmmReg, rm32 ); ZmmReg1 = zext(tmp); } # CVTSI2SD 3-257 PAGE 827 LINE 44525 @ifdef IA64 -:VCVTSI2SD XmmReg1, vexVVVV_XmmReg, rm64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm64 +:VCVTSI2SD XmmReg1, evexV5_XmmReg, rm64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - local tmp:16 = vcvtsi2sd_avx512f( vexVVVV_XmmReg, rm64 ); + local tmp:16 = vcvtsi2sd_avx512f( evexV5_XmmReg, rm64 ); ZmmReg1 = zext(tmp); } @endif # CVTSI2SS 3-259 PAGE 829 LINE 44636 define pcodeop vcvtsi2ss_avx512f ; -:VCVTSI2SS XmmReg1, vexVVVV_XmmReg, rm32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm32 +:VCVTSI2SS XmmReg1, evexV5_XmmReg, rm32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - local tmp:16 = vcvtsi2ss_avx512f( vexVVVV_XmmReg, rm32 ); + local tmp:16 = vcvtsi2ss_avx512f( evexV5_XmmReg, rm32 ); ZmmReg1 = zext(tmp); } # CVTSI2SS 3-259 PAGE 829 LINE 44638 @ifdef IA64 -:VCVTSI2SS XmmReg1, vexVVVV_XmmReg, rm64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm64 +:VCVTSI2SS XmmReg1, evexV5_XmmReg, rm64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - local tmp:16 = vcvtsi2ss_avx512f( vexVVVV_XmmReg, rm64 ); + local tmp:16 = vcvtsi2ss_avx512f( evexV5_XmmReg, rm64 ); ZmmReg1 = zext(tmp); } @endif # CVTSS2SD 3-261 PAGE 831 LINE 44747 define pcodeop vcvtss2sd_avx512f ; -:VCVTSS2SD XmmReg1^XmmOpMask, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x5A; (XmmReg1 & ZmmReg1 & XmmOpMask) ... & XmmReg2_m32 +:VCVTSS2SD XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5A; (XmmReg1 & ZmmReg1 & XmmOpMask) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vcvtss2sd_avx512f( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vcvtss2sd_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask; XmmResult[0,64] = (zext(XmmOpMask[0,1]) * XmmResult[0,64]) + (zext(!XmmOpMask[0,1]) * XmmMask[0,64]); @@ -708,20 +708,20 @@ define pcodeop vcvttss2si_avx512f ; # DIVPD 3-288 PAGE 858 LINE 46029 define pcodeop vdivpd_avx512vl ; -:VDIVPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VDIVPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vdivpd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vdivpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # DIVPD 3-288 PAGE 858 LINE 46033 -:VDIVPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x5E; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VDIVPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x5E; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vdivpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vdivpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -740,20 +740,20 @@ define pcodeop vdivpd_avx512f ; # DIVPS 3-291 PAGE 861 LINE 46170 define pcodeop vdivps_avx512vl ; -:VDIVPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VDIVPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vdivps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vdivps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # DIVPS 3-291 PAGE 861 LINE 46174 -:VDIVPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x5E; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VDIVPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x5E; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vdivps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vdivps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -772,10 +772,10 @@ define pcodeop vdivps_avx512f ; # DIVSD 3-294 PAGE 864 LINE 46315 define pcodeop vdivsd_avx512f ; -:VDIVSD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VDIVSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vdivsd_avx512f( vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vdivsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -783,10 +783,10 @@ define pcodeop vdivsd_avx512f ; # DIVSS 3-296 PAGE 866 LINE 46413 define pcodeop vdivss_avx512f ; -:VDIVSS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VDIVSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vdivss_avx512f( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vdivss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -802,30 +802,30 @@ define pcodeop vextractps_avx512f ; # INSERTPS 3-454 PAGE 1024 LINE 53785 define pcodeop vinsertps_avx512f ; -:VINSERTPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x21; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32; imm8 +:VINSERTPS XmmReg1, evexV5_XmmReg, XmmReg2_m32, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x21; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - local tmp:16 = vinsertps_avx512f( vexVVVV_XmmReg, XmmReg2_m32, imm8:1 ); + local tmp:16 = vinsertps_avx512f( evexV5_XmmReg, XmmReg2_m32, imm8:1 ); ZmmReg1 = zext(tmp); } # MAXPD 4-12 PAGE 1132 LINE 59206 define pcodeop vmaxpd_avx512vl ; -:VMAXPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VMAXPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vmaxpd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vmaxpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # MAXPD 4-12 PAGE 1132 LINE 59210 -:VMAXPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x5F; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VMAXPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x5F; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vmaxpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vmaxpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -844,20 +844,20 @@ define pcodeop vmaxpd_avx512f ; # MAXPS 4-15 PAGE 1135 LINE 59356 define pcodeop vmaxps_avx512vl ; -:VMAXPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VMAXPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vmaxps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vmaxps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # MAXPS 4-15 PAGE 1135 LINE 59359 -:VMAXPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x5F; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VMAXPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x5F; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vmaxps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vmaxps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -876,10 +876,10 @@ define pcodeop vmaxps_avx512f ; # MAXSD 4-18 PAGE 1138 LINE 59506 define pcodeop vmaxsd_avx512f ; -:VMAXSD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VMAXSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vmaxsd_avx512f( vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vmaxsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -887,10 +887,10 @@ define pcodeop vmaxsd_avx512f ; # MAXSS 4-20 PAGE 1140 LINE 59609 define pcodeop vmaxss_avx512f ; -:VMAXSS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VMAXSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vmaxss_avx512f( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vmaxss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -898,20 +898,20 @@ define pcodeop vmaxss_avx512f ; # MINPD 4-23 PAGE 1143 LINE 59771 define pcodeop vminpd_avx512vl ; -:VMINPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VMINPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vminpd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vminpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # MINPD 4-23 PAGE 1143 LINE 59774 -:VMINPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x5D; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VMINPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x5D; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vminpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vminpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -930,20 +930,20 @@ define pcodeop vminpd_avx512f ; # MINPS 4-26 PAGE 1146 LINE 59915 define pcodeop vminps_avx512vl ; -:VMINPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VMINPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vminps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vminps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # MINPS 4-26 PAGE 1146 LINE 59918 -:VMINPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x5D; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VMINPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x5D; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vminps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vminps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -962,10 +962,10 @@ define pcodeop vminps_avx512f ; # MINSD 4-29 PAGE 1149 LINE 60063 define pcodeop vminsd_avx512f ; -:VMINSD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VMINSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vminsd_avx512f( vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vminsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -973,10 +973,10 @@ define pcodeop vminsd_avx512f ; # MINSS 4-31 PAGE 1151 LINE 60166 define pcodeop vminss_avx512f ; -:VMINSS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VMINSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vminss_avx512f( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vminss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -1540,9 +1540,9 @@ define pcodeop vmovdqu64_avx512f ; } # MOVHLPS 4-76 PAGE 1196 LINE 62412 -:VMOVHLPS XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x12; (XmmReg1 & ZmmReg1) & (mod=0x3 & XmmReg2) +:VMOVHLPS XmmReg1, evexV5_XmmReg, XmmReg2 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x12; (XmmReg1 & ZmmReg1) & (mod=0x3 & XmmReg2) { - local src1 = vexVVVV_XmmReg[64,64]; + local src1 = evexV5_XmmReg[64,64]; local src2 = XmmReg2[64,64]; XmmReg1[0,64] = src2; XmmReg1[64,64] = src2; @@ -1551,10 +1551,10 @@ define pcodeop vmovdqu64_avx512f ; # MOVHPD 4-78 PAGE 1198 LINE 62485 define pcodeop vmovhpd_avx512f ; -:VMOVHPD XmmReg1, vexVVVV_XmmReg, m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x16; (XmmReg1 & ZmmReg1) ... & m64 +:VMOVHPD XmmReg1, evexV5_XmmReg, m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x16; (XmmReg1 & ZmmReg1) ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - local src1 = vexVVVV_XmmReg[0,64]; + local src1 = evexV5_XmmReg[0,64]; local src2 = m64[0,64]; XmmReg1[0,64] = src2; XmmReg1[64,64] = src2; @@ -1570,10 +1570,10 @@ define pcodeop vmovhpd_avx512f ; # MOVHPS 4-80 PAGE 1200 LINE 62572 define pcodeop vmovhps_avx512f ; -:VMOVHPS XmmReg1, vexVVVV_XmmReg, m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x16; (XmmReg1 & ZmmReg1) ... & m64 +:VMOVHPS XmmReg1, evexV5_XmmReg, m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x16; (XmmReg1 & ZmmReg1) ... & m64 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2) { - XmmResult = vmovhps_avx512f( vexVVVV_XmmReg, m64 ); + XmmResult = vmovhps_avx512f( evexV5_XmmReg, m64 ); XmmMask = XmmReg1; ZmmReg1 = zext(XmmResult); } @@ -1588,18 +1588,18 @@ define pcodeop vmovhps_avx512f ; # MOVLHPS 4-82 PAGE 1202 LINE 62660 # WARNING: duplicate opcode EVEX.NDS.128.0F.W0 16 /r last seen on 4-80 PAGE 1200 LINE 62572 for "VMOVLHPS xmm1, xmm2, xmm3" define pcodeop vmovlhps_avx512f ; -:VMOVLHPS XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x16; (XmmReg1 & ZmmReg1) & (mod=0x3 & XmmReg2) +:VMOVLHPS XmmReg1, evexV5_XmmReg, XmmReg2 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x16; (XmmReg1 & ZmmReg1) & (mod=0x3 & XmmReg2) { - local tmp:16 = vmovlhps_avx512f( vexVVVV_XmmReg, XmmReg2 ); + local tmp:16 = vmovlhps_avx512f( evexV5_XmmReg, XmmReg2 ); ZmmReg1 = zext(tmp); } # MOVLPD 4-84 PAGE 1204 LINE 62733 define pcodeop vmovlpd_avx512f ; -:VMOVLPD XmmReg1, vexVVVV_XmmReg, m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x12; (XmmReg1 & ZmmReg1) ... & m64 +:VMOVLPD XmmReg1, evexV5_XmmReg, m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x12; (XmmReg1 & ZmmReg1) ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - local tmp:16 = vmovlpd_avx512f( vexVVVV_XmmReg, m64 ); + local tmp:16 = vmovlpd_avx512f( evexV5_XmmReg, m64 ); ZmmReg1 = zext(tmp); } @@ -1613,10 +1613,10 @@ define pcodeop vmovlpd_avx512f ; # MOVLPS 4-86 PAGE 1206 LINE 62818 # WARNING: duplicate opcode EVEX.NDS.128.0F.W0 12 /r last seen on 4-76 PAGE 1196 LINE 62412 for "VMOVLPS xmm2, xmm1, m64" define pcodeop vmovlps_avx512f ; -:VMOVLPS XmmReg1, vexVVVV_XmmReg, m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x12; (XmmReg1 & ZmmReg1) ... & m64 +:VMOVLPS XmmReg1, evexV5_XmmReg, m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x12; (XmmReg1 & ZmmReg1) ... & m64 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2) { - local tmp:16 = vmovlps_avx512f( vexVVVV_XmmReg, m64 ); + local tmp:16 = vmovlps_avx512f( evexV5_XmmReg, m64 ); ZmmReg1 = zext(tmp); } @@ -1744,13 +1744,13 @@ define pcodeop vmovntps_avx512f ; # MOVSD 4-111 PAGE 1231 LINE 63978 define pcodeop vmovsd_avx512f ; -:VMOVSD XmmReg1^XmmOpMask, vexVVVV_XmmReg, XmmReg2 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x10; (XmmReg1 & ZmmReg1 & XmmOpMask) & (mod=0x3 & XmmReg2) +:VMOVSD XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x10; (XmmReg1 & ZmmReg1 & XmmOpMask) & (mod=0x3 & XmmReg2) { XmmResult = XmmReg2; XmmMask = XmmReg1; build XmmOpMask; XmmResult[0,64] = (zext(XmmOpMask[0,1]) * XmmResult[0,64]) + (zext(!XmmOpMask[0,1]) * XmmMask[0,64]); - XmmResult[64,64] = vexVVVV_XmmReg[64,64]; + XmmResult[64,64] = evexV5_XmmReg[64,64]; ZmmReg1 = zext(XmmResult); } @@ -1766,13 +1766,13 @@ define pcodeop vmovsd_avx512f ; } # MOVSD 4-111 PAGE 1231 LINE 63983 -:VMOVSD XmmReg2^XmmOpMask, vexVVVV_XmmReg, XmmReg1 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x11; XmmReg1 & ZmmReg1 & XmmOpMask & (mod=0x3 & XmmReg2) +:VMOVSD XmmReg2^XmmOpMask, evexV5_XmmReg, XmmReg1 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x11; XmmReg1 & ZmmReg1 & XmmOpMask & (mod=0x3 & XmmReg2) { XmmResult = XmmReg1; XmmMask = XmmReg2; build XmmOpMask; XmmResult[0,64] = (zext(XmmOpMask[0,1]) * XmmResult[0,64]) + (zext(!XmmOpMask[0,1]) * XmmMask[0,64]); - XmmResult[64,64] = vexVVVV_XmmReg[64,64]; + XmmResult[64,64] = evexV5_XmmReg[64,64]; ZmmReg1 = zext(XmmResult); } @@ -1854,12 +1854,12 @@ define pcodeop vmovsldup_avx512f ; } # MOVSS 4-120 PAGE 1240 LINE 64443 -:VMOVSS XmmReg1^XmmOpMask, vexVVVV_XmmReg, XmmReg2 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x10; (XmmReg1 & ZmmReg1 & XmmOpMask) & (mod=0x3 & XmmReg2) +:VMOVSS XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x10; (XmmReg1 & ZmmReg1 & XmmOpMask) & (mod=0x3 & XmmReg2) { local tmp:4 = XmmReg2[0,32]; XmmMask = XmmReg1; build XmmOpMask; - XmmResult = vexVVVV_XmmReg; + XmmResult = evexV5_XmmReg; XmmResult[0,32] = (zext(XmmOpMask[0,1]) * tmp) + (zext(!XmmOpMask[0,1]) * XmmMask[0,32]); ZmmReg1 = zext(XmmResult); } @@ -1876,12 +1876,12 @@ define pcodeop vmovsldup_avx512f ; } # MOVSS 4-120 PAGE 1240 LINE 64448 -:VMOVSS XmmReg2^XmmOpMask, vexVVVV_XmmReg, XmmReg1 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & XmmOpMask & vexVVVV_XmmReg; byte=0x11; XmmReg1 & (mod=0x3 & (XmmReg2 & ZmmReg2)) +:VMOVSS XmmReg2^XmmOpMask, evexV5_XmmReg, XmmReg1 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x11; XmmReg1 & (mod=0x3 & (XmmReg2 & ZmmReg2)) { local tmp:4 = XmmReg1[0,32]; XmmMask = XmmReg2; build XmmOpMask; - XmmResult = vexVVVV_XmmReg; + XmmResult = evexV5_XmmReg; XmmResult[0,32] = (zext(XmmOpMask[0,1]) * tmp) + (zext(!XmmOpMask[0,1]) * XmmMask[0,32]); ZmmReg2 = zext(XmmResult); } @@ -2054,20 +2054,20 @@ define pcodeop vmovsldup_avx512f ; # MULPD 4-146 PAGE 1266 LINE 65686 define pcodeop vmulpd_avx512vl ; -:VMULPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VMULPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vmulpd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vmulpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # MULPD 4-146 PAGE 1266 LINE 65689 -:VMULPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x59; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VMULPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x59; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vmulpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vmulpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -2086,20 +2086,20 @@ define pcodeop vmulpd_avx512f ; # MULPS 4-149 PAGE 1269 LINE 65817 define pcodeop vmulps_avx512vl ; -:VMULPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VMULPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vmulps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vmulps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # MULPS 4-149 PAGE 1269 LINE 65820 -:VMULPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x59; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VMULPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x59; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vmulps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vmulps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -2118,10 +2118,10 @@ define pcodeop vmulps_avx512f ; # MULSD 4-152 PAGE 1272 LINE 65959 define pcodeop vmulsd_avx512f ; -:VMULSD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VMULSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vmulsd_avx512f( vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vmulsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -2129,10 +2129,10 @@ define pcodeop vmulsd_avx512f ; # MULSS 4-154 PAGE 1274 LINE 66055 define pcodeop vmulss_avx512f ; -:VMULSS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VMULSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vmulss_avx512f( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vmulss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -2140,20 +2140,20 @@ define pcodeop vmulss_avx512f ; # ORPD 4-168 PAGE 1288 LINE 66724 define pcodeop vorpd_avx512vl ; -:VORPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x56; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VORPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x56; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vorpd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vorpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # ORPD 4-168 PAGE 1288 LINE 66727 -:VORPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x56; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VORPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x56; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vorpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vorpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -2172,20 +2172,20 @@ define pcodeop vorpd_avx512dq ; # ORPS 4-171 PAGE 1291 LINE 66850 define pcodeop vorps_avx512vl ; -:VORPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x56; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VORPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x56; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vorps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vorps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # ORPS 4-171 PAGE 1291 LINE 66853 -:VORPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x56; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VORPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x56; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vorps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vorps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -2332,20 +2332,20 @@ define pcodeop vpabsq_avx512f ; # PACKSSWB/PACKSSDW 4-186 PAGE 1306 LINE 67645 define pcodeop vpacksswb_avx512vl ; -:VPACKSSWB XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x63; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPACKSSWB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0x63; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpacksswb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpacksswb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PACKSSWB/PACKSSDW 4-186 PAGE 1306 LINE 67649 -:VPACKSSWB YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x63; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPACKSSWB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0x63; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpacksswb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpacksswb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); @@ -2364,20 +2364,20 @@ define pcodeop vpacksswb_avx512bw ; # PACKSSWB/PACKSSDW 4-186 PAGE 1306 LINE 67657 define pcodeop vpackssdw_avx512vl ; -:VPACKSSDW XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x6B; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPACKSSDW XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x6B; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpackssdw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpackssdw_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PACKSSWB/PACKSSDW 4-187 PAGE 1307 LINE 67674 -:VPACKSSDW YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x6B; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPACKSSDW YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x6B; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpackssdw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpackssdw_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -2396,20 +2396,20 @@ define pcodeop vpackssdw_avx512bw ; # PACKUSDW 4-194 PAGE 1314 LINE 68094 define pcodeop vpackusdw_avx512vl ; -:VPACKUSDW XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x2B; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPACKUSDW XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x2B; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpackusdw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpackusdw_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PACKUSDW 4-194 PAGE 1314 LINE 68098 -:VPACKUSDW YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x2B; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPACKUSDW YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x2B; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpackusdw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpackusdw_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -2428,20 +2428,20 @@ define pcodeop vpackusdw_avx512bw ; # PACKUSWB 4-199 PAGE 1319 LINE 68374 define pcodeop vpackuswb_avx512vl ; -:VPACKUSWB XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x67; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPACKUSWB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0x67; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpackuswb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpackuswb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PACKUSWB 4-199 PAGE 1319 LINE 68378 -:VPACKUSWB YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x67; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPACKUSWB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0x67; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpackuswb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpackuswb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); @@ -2460,10 +2460,10 @@ define pcodeop vpackuswb_avx512bw ; # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68674 define pcodeop vpaddb_avx512vl ; -:VPADDB XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xFC; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPADDB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xFC; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpaddb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpaddb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); @@ -2471,10 +2471,10 @@ define pcodeop vpaddb_avx512vl ; # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68677 define pcodeop vpaddw_avx512vl ; -:VPADDW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xFD; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPADDW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xFD; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpaddw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpaddw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); @@ -2482,10 +2482,10 @@ define pcodeop vpaddw_avx512vl ; # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68680 define pcodeop vpaddd_avx512vl ; -:VPADDD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xFE; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPADDD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xFE; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpaddd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpaddd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -2493,50 +2493,50 @@ define pcodeop vpaddd_avx512vl ; # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68683 define pcodeop vpaddq_avx512vl ; -:VPADDQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xD4; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPADDQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xD4; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpaddq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpaddq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68686 -:VPADDB YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xFC; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPADDB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xFC; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpaddb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpaddb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68689 -:VPADDW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xFD; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPADDW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xFD; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpaddw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpaddw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68692 -:VPADDD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xFE; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPADDD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xFE; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpaddd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpaddd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PADDB/PADDW/PADDD/PADDQ 4-205 PAGE 1325 LINE 68707 -:VPADDQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xD4; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPADDQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xD4; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpaddq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpaddq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -2588,20 +2588,20 @@ define pcodeop vpaddq_avx512f ; # PADDSB/PADDSW 4-211 PAGE 1331 LINE 69051 define pcodeop vpaddsb_avx512vl ; -:VPADDSB XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xEC; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPADDSB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xEC; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpaddsb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpaddsb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PADDSB/PADDSW 4-211 PAGE 1331 LINE 69054 -:VPADDSB YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xEC; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPADDSB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xEC; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpaddsb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpaddsb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); @@ -2620,20 +2620,20 @@ define pcodeop vpaddsb_avx512bw ; # PADDSB/PADDSW 4-211 PAGE 1331 LINE 69060 define pcodeop vpaddsw_avx512vl ; -:VPADDSW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xED; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPADDSW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xED; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpaddsw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpaddsw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PADDSB/PADDSW 4-211 PAGE 1331 LINE 69063 -:VPADDSW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xED; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPADDSW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xED; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpaddsw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpaddsw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -2652,20 +2652,20 @@ define pcodeop vpaddsw_avx512bw ; # PADDUSB/PADDUSW 4-215 PAGE 1335 LINE 69269 define pcodeop vpaddusb_avx512vl ; -:VPADDUSB XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xDC; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPADDUSB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xDC; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpaddusb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpaddusb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PADDUSB/PADDUSW 4-215 PAGE 1335 LINE 69273 -:VPADDUSB YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xDC; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPADDUSB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xDC; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpaddusb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpaddusb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); @@ -2684,20 +2684,20 @@ define pcodeop vpaddusb_avx512bw ; # PADDUSB/PADDUSW 4-215 PAGE 1335 LINE 69281 define pcodeop vpaddusw_avx512vl ; -:VPADDUSW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xDD; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPADDUSW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xDD; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpaddusw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpaddusw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PADDUSB/PADDUSW 4-215 PAGE 1335 LINE 69285 -:VPADDUSW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xDD; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPADDUSW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xDD; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpaddusw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpaddusw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -2716,20 +2716,20 @@ define pcodeop vpaddusw_avx512bw ; # PALIGNR 4-219 PAGE 1339 LINE 69495 define pcodeop vpalignr_avx512vl ; -:VPALIGNR XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x0F; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPALIGNR XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & evexV5_XmmReg; byte=0x0F; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpalignr_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpalignr_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PALIGNR 4-219 PAGE 1339 LINE 69499 -:VPALIGNR YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x0F; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPALIGNR YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & evexV5_YmmReg; byte=0x0F; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpalignr_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpalignr_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); @@ -2748,20 +2748,20 @@ define pcodeop vpalignr_avx512bw ; # PAND 4-223 PAGE 1343 LINE 69684 define pcodeop vpandd_avx512vl ; -:VPANDD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xDB; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPANDD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xDB; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpandd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpandd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PAND 4-223 PAGE 1343 LINE 69687 -:VPANDD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xDB; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPANDD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xDB; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpandd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpandd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -2780,20 +2780,20 @@ define pcodeop vpandd_avx512f ; # PAND 4-223 PAGE 1343 LINE 69693 define pcodeop vpandq_avx512vl ; -:VPANDQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xDB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPANDQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xDB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpandq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpandq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PAND 4-223 PAGE 1343 LINE 69696 -:VPANDQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xDB; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPANDQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xDB; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpandq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpandq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -2812,20 +2812,20 @@ define pcodeop vpandq_avx512f ; # PANDN 4-226 PAGE 1346 LINE 69859 define pcodeop vpandnd_avx512vl ; -:VPANDND XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xDF; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPANDND XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xDF; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpandnd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpandnd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PANDN 4-226 PAGE 1346 LINE 69862 -:VPANDND YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xDF; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPANDND YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xDF; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpandnd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpandnd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -2844,20 +2844,20 @@ define pcodeop vpandnd_avx512f ; # PANDN 4-226 PAGE 1346 LINE 69868 define pcodeop vpandnq_avx512vl ; -:VPANDNQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xDF; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPANDNQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xDF; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpandnq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpandnq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PANDN 4-226 PAGE 1346 LINE 69871 -:VPANDNQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xDF; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPANDNQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xDF; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpandnq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpandnq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -2876,20 +2876,20 @@ define pcodeop vpandnq_avx512f ; # PAVGB/PAVGW 4-230 PAGE 1350 LINE 70097 define pcodeop vpavgb_avx512vl ; -:VPAVGB XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xE0; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPAVGB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xE0; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpavgb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpavgb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PAVGB/PAVGW 4-230 PAGE 1350 LINE 70100 -:VPAVGB YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xE0; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPAVGB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xE0; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpavgb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpavgb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); @@ -2908,20 +2908,20 @@ define pcodeop vpavgb_avx512bw ; # PAVGB/PAVGW 4-230 PAGE 1350 LINE 70106 define pcodeop vpavgw_avx512vl ; -:VPAVGW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xE3; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPAVGW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xE3; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpavgw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpavgw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PAVGB/PAVGW 4-230 PAGE 1350 LINE 70109 -:VPAVGW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xE3; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPAVGW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xE3; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpavgw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpavgw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -2940,19 +2940,19 @@ define pcodeop vpavgw_avx512bw ; # PCMPEQB/PCMPEQW/PCMPEQD 4-244 PAGE 1364 LINE 70841 define pcodeop vpcmpeqd_avx512vl ; -:VPCMPEQD KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & vexVVVV_XmmReg; byte=0x76; KReg_reg ... & XmmReg2_m128_m32bcst +:VPCMPEQD KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x76; KReg_reg ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vpcmpeqd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + local tmp = vpcmpeqd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # PCMPEQB/PCMPEQW/PCMPEQD 4-244 PAGE 1364 LINE 70846 -:VPCMPEQD KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & vexVVVV_YmmReg; byte=0x76; KReg_reg ... & YmmReg2_m256_m32bcst +:VPCMPEQD KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x76; KReg_reg ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vpcmpeqd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + local tmp = vpcmpeqd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } @@ -2967,18 +2967,18 @@ define pcodeop vpcmpeqd_avx512f ; # PCMPEQB/PCMPEQW/PCMPEQD 4-244 PAGE 1364 LINE 70855 define pcodeop vpcmpeqb_avx512vl ; -:VPCMPEQB KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & vexVVVV_XmmReg; byte=0x74; KReg_reg ... & XmmReg2_m128 +:VPCMPEQB KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_XmmReg; byte=0x74; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vpcmpeqb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + local tmp = vpcmpeqb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # PCMPEQB/PCMPEQW/PCMPEQD 4-245 PAGE 1365 LINE 70873 -:VPCMPEQB KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & vexVVVV_YmmReg; byte=0x74; KReg_reg ... & YmmReg2_m256 +:VPCMPEQB KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_YmmReg; byte=0x74; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vpcmpeqb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + local tmp = vpcmpeqb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } @@ -2993,18 +2993,18 @@ define pcodeop vpcmpeqb_avx512bw ; # PCMPEQB/PCMPEQW/PCMPEQD 4-245 PAGE 1365 LINE 70883 define pcodeop vpcmpeqw_avx512vl ; -:VPCMPEQW KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & vexVVVV_XmmReg; byte=0x75; KReg_reg ... & XmmReg2_m128 +:VPCMPEQW KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_XmmReg; byte=0x75; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vpcmpeqw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + local tmp = vpcmpeqw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # PCMPEQB/PCMPEQW/PCMPEQD 4-245 PAGE 1365 LINE 70888 -:VPCMPEQW KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & vexVVVV_YmmReg; byte=0x75; KReg_reg ... & YmmReg2_m256 +:VPCMPEQW KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_YmmReg; byte=0x75; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vpcmpeqw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + local tmp = vpcmpeqw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } @@ -3019,18 +3019,18 @@ define pcodeop vpcmpeqw_avx512bw ; # PCMPEQQ 4-250 PAGE 1370 LINE 71174 define pcodeop vpcmpeqq_avx512vl ; -:VPCMPEQQ KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & vexVVVV_XmmReg; byte=0x29; KReg_reg ... & XmmReg2_m128_m64bcst +:VPCMPEQQ KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x29; KReg_reg ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vpcmpeqq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + local tmp = vpcmpeqq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); KReg_reg = zext(AVXOpMask[0,2]) & tmp; } # PCMPEQQ 4-250 PAGE 1370 LINE 71179 -:VPCMPEQQ KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & vexVVVV_YmmReg; byte=0x29; KReg_reg ... & YmmReg2_m256_m64bcst +:VPCMPEQQ KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x29; KReg_reg ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vpcmpeqq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + local tmp = vpcmpeqq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } @@ -3045,18 +3045,18 @@ define pcodeop vpcmpeqq_avx512f ; # PCMPGTB/PCMPGTW/PCMPGTD 4-257 PAGE 1377 LINE 71517 define pcodeop vpcmpgtd_avx512vl ; -:VPCMPGTD KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & vexVVVV_XmmReg; byte=0x66; KReg_reg ... & XmmReg2_m128_m32bcst +:VPCMPGTD KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x66; KReg_reg ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vpcmpgtd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + local tmp = vpcmpgtd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # PCMPGTB/PCMPGTW/PCMPGTD 4-257 PAGE 1377 LINE 71521 -:VPCMPGTD KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & vexVVVV_YmmReg; byte=0x66; KReg_reg ... & YmmReg2_m256_m32bcst +:VPCMPGTD KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x66; KReg_reg ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vpcmpgtd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + local tmp = vpcmpgtd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } @@ -3071,18 +3071,18 @@ define pcodeop vpcmpgtd_avx512f ; # PCMPGTB/PCMPGTW/PCMPGTD 4-257 PAGE 1377 LINE 71529 define pcodeop vpcmpgtb_avx512vl ; -:VPCMPGTB KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & vexVVVV_XmmReg; byte=0x64; KReg_reg ... & XmmReg2_m128 +:VPCMPGTB KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_XmmReg; byte=0x64; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vpcmpgtb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + local tmp = vpcmpgtb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # PCMPGTB/PCMPGTW/PCMPGTD 4-257 PAGE 1377 LINE 71533 -:VPCMPGTB KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & vexVVVV_YmmReg; byte=0x64; KReg_reg ... & YmmReg2_m256 +:VPCMPGTB KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_YmmReg; byte=0x64; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vpcmpgtb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + local tmp = vpcmpgtb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } @@ -3097,18 +3097,18 @@ define pcodeop vpcmpgtb_avx512bw ; # PCMPGTB/PCMPGTW/PCMPGTD 4-258 PAGE 1378 LINE 71549 define pcodeop vpcmpgtw_avx512vl ; -:VPCMPGTW KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & vexVVVV_XmmReg; byte=0x65; KReg_reg ... & XmmReg2_m128 +:VPCMPGTW KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_XmmReg; byte=0x65; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vpcmpgtw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + local tmp = vpcmpgtw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # PCMPGTB/PCMPGTW/PCMPGTD 4-258 PAGE 1378 LINE 71553 -:VPCMPGTW KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & vexVVVV_YmmReg; byte=0x65; KReg_reg ... & YmmReg2_m256 +:VPCMPGTW KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_YmmReg; byte=0x65; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vpcmpgtw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + local tmp = vpcmpgtw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } @@ -3123,18 +3123,18 @@ define pcodeop vpcmpgtw_avx512bw ; # PCMPGTQ 4-263 PAGE 1383 LINE 71837 define pcodeop vpcmpgtq_avx512vl ; -:VPCMPGTQ KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & vexVVVV_XmmReg; byte=0x37; KReg_reg ... & XmmReg2_m128_m64bcst +:VPCMPGTQ KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x37; KReg_reg ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vpcmpgtq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + local tmp = vpcmpgtq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); KReg_reg = zext(AVXOpMask[0,2]) & tmp; } # PCMPGTQ 4-263 PAGE 1383 LINE 71841 -:VPCMPGTQ KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & vexVVVV_YmmReg; byte=0x37; KReg_reg ... & YmmReg2_m256_m64bcst +:VPCMPGTQ KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x37; KReg_reg ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vpcmpgtq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + local tmp = vpcmpgtq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } @@ -3223,58 +3223,58 @@ define pcodeop vpcmpgtq_avx512f ; # PINSRB/PINSRD/PINSRQ 4-293 PAGE 1413 LINE 73330 define pcodeop vpinsrb_avx512bw ; -:VPINSRB XmmReg1, vexVVVV_XmmReg, Reg32_m8, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x20; (XmmReg1 & ZmmReg1) ... & Reg32_m8; imm8 +:VPINSRB XmmReg1, evexV5_XmmReg, Reg32_m8, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & evexV5_XmmReg; byte=0x20; (XmmReg1 & ZmmReg1) ... & Reg32_m8; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-RVMI) { - local tmp:16 = vpinsrb_avx512bw( vexVVVV_XmmReg, Reg32_m8, imm8:1 ); + local tmp:16 = vpinsrb_avx512bw( evexV5_XmmReg, Reg32_m8, imm8:1 ); ZmmReg1 = zext(tmp); } # PINSRB/PINSRD/PINSRQ 4-293 PAGE 1413 LINE 73333 define pcodeop vpinsrd_avx512dq ; -:VPINSRD XmmReg1, vexVVVV_XmmReg, rm32, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x22; (XmmReg1 & ZmmReg1) ... & rm32; imm8 +:VPINSRD XmmReg1, evexV5_XmmReg, rm32, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x22; (XmmReg1 & ZmmReg1) ... & rm32; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-RVMI) { - local tmp:16 = vpinsrd_avx512dq( vexVVVV_XmmReg, rm32, imm8:1 ); + local tmp:16 = vpinsrd_avx512dq( evexV5_XmmReg, rm32, imm8:1 ); ZmmReg1 = zext(tmp); } # PINSRB/PINSRD/PINSRQ 4-293 PAGE 1413 LINE 73336 define pcodeop vpinsrq_avx512dq ; @ifdef IA64 -:VPINSRQ XmmReg1, vexVVVV_XmmReg, rm64, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x22; (XmmReg1 & ZmmReg1) ... & rm64; imm8 +:VPINSRQ XmmReg1, evexV5_XmmReg, rm64, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x22; (XmmReg1 & ZmmReg1) ... & rm64; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-RVMI) { - local tmp:16 = vpinsrq_avx512dq( vexVVVV_XmmReg, rm64, imm8:1 ); + local tmp:16 = vpinsrq_avx512dq( evexV5_XmmReg, rm64, imm8:1 ); ZmmReg1 = zext(tmp); } @endif # PINSRW 4-296 PAGE 1416 LINE 73449 define pcodeop vpinsrw_avx512bw ; -:VPINSRW XmmReg1, vexVVVV_XmmReg, Reg32_m16, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xC4; (XmmReg1 & ZmmReg1) ... & Reg32_m16; imm8 +:VPINSRW XmmReg1, evexV5_XmmReg, Reg32_m16, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xC4; (XmmReg1 & ZmmReg1) ... & Reg32_m16; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-RVMI) { - local tmp:16 = vpinsrw_avx512bw( vexVVVV_XmmReg, Reg32_m16, imm8:1 ); + local tmp:16 = vpinsrw_avx512bw( evexV5_XmmReg, Reg32_m16, imm8:1 ); ZmmReg1 = zext(tmp); } # PMADDUBSW 4-298 PAGE 1418 LINE 73558 define pcodeop vpmaddubsw_avx512vl ; -:VPMADDUBSW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x04; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPMADDUBSW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0x04; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpmaddubsw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpmaddubsw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMADDUBSW 4-298 PAGE 1418 LINE 73562 -:VPMADDUBSW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x04; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPMADDUBSW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0x04; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpmaddubsw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpmaddubsw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -3293,20 +3293,20 @@ define pcodeop vpmaddubsw_avx512bw ; # PMADDWD 4-301 PAGE 1421 LINE 73708 define pcodeop vpmaddwd_avx512vl ; -:VPMADDWD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xF5; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 +:VPMADDWD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xF5; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpmaddwd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpmaddwd_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PMADDWD 4-301 PAGE 1421 LINE 73712 -:VPMADDWD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xF5; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256 +:VPMADDWD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xF5; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpmaddwd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpmaddwd_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -3325,20 +3325,20 @@ define pcodeop vpmaddwd_avx512bw ; # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73900 define pcodeop vpmaxsb_avx512vl ; -:VPMAXSB XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x3C; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPMAXSB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0x3C; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpmaxsb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpmaxsb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73903 -:VPMAXSB YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x3C; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPMAXSB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0x3C; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpmaxsb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpmaxsb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); @@ -3357,20 +3357,20 @@ define pcodeop vpmaxsb_avx512bw ; # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73909 define pcodeop vpmaxsw_avx512vl ; -:VPMAXSW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xEE; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPMAXSW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xEE; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpmaxsw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpmaxsw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73912 -:VPMAXSW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xEE; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPMAXSW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xEE; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpmaxsw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpmaxsw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -3389,20 +3389,20 @@ define pcodeop vpmaxsw_avx512bw ; # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73918 define pcodeop vpmaxsd_avx512vl ; -:VPMAXSD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x3D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPMAXSD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x3D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpmaxsd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpmaxsd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-305 PAGE 1425 LINE 73933 -:VPMAXSD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x3D; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPMAXSD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x3D; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpmaxsd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpmaxsd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -3421,20 +3421,20 @@ define pcodeop vpmaxsd_avx512f ; # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-305 PAGE 1425 LINE 73939 define pcodeop vpmaxsq_avx512vl ; -:VPMAXSQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x3D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPMAXSQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x3D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpmaxsq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpmaxsq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-305 PAGE 1425 LINE 73942 -:VPMAXSQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x3D; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPMAXSQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x3D; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpmaxsq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpmaxsq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -3453,20 +3453,20 @@ define pcodeop vpmaxsq_avx512f ; # PMAXUB/PMAXUW 4-311 PAGE 1431 LINE 74295 define pcodeop vpmaxub_avx512vl ; -:VPMAXUB XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xDE; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPMAXUB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xDE; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpmaxub_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpmaxub_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PMAXUB/PMAXUW 4-311 PAGE 1431 LINE 74298 -:VPMAXUB YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xDE; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPMAXUB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xDE; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpmaxub_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpmaxub_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); @@ -3485,20 +3485,20 @@ define pcodeop vpmaxub_avx512bw ; # PMAXUB/PMAXUW 4-311 PAGE 1431 LINE 74304 define pcodeop vpmaxuw_avx512vl ; -:VPMAXUW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x3E; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPMAXUW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0x3E; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpmaxuw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpmaxuw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMAXUB/PMAXUW 4-311 PAGE 1431 LINE 74307 -:VPMAXUW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x3E; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPMAXUW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0x3E; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpmaxuw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpmaxuw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -3517,20 +3517,20 @@ define pcodeop vpmaxuw_avx512bw ; # PMAXUD/PMAXUQ 4-316 PAGE 1436 LINE 74540 define pcodeop vpmaxud_avx512vl ; -:VPMAXUD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x3F; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPMAXUD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x3F; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpmaxud_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpmaxud_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PMAXUD/PMAXUQ 4-316 PAGE 1436 LINE 74543 -:VPMAXUD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x3F; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPMAXUD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x3F; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpmaxud_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpmaxud_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -3549,20 +3549,20 @@ define pcodeop vpmaxud_avx512f ; # PMAXUD/PMAXUQ 4-316 PAGE 1436 LINE 74549 define pcodeop vpmaxuq_avx512vl ; -:VPMAXUQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x3F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPMAXUQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x3F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpmaxuq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpmaxuq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMAXUD/PMAXUQ 4-316 PAGE 1436 LINE 74552 -:VPMAXUQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x3F; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPMAXUQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x3F; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpmaxuq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpmaxuq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -3581,20 +3581,20 @@ define pcodeop vpmaxuq_avx512f ; # PMINSB/PMINSW 4-320 PAGE 1440 LINE 74748 define pcodeop vpminsb_avx512vl ; -:VPMINSB XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x38; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPMINSB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0x38; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpminsb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpminsb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PMINSB/PMINSW 4-320 PAGE 1440 LINE 74751 -:VPMINSB YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x38; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPMINSB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0x38; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpminsb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpminsb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); @@ -3613,20 +3613,20 @@ define pcodeop vpminsb_avx512bw ; # PMINSB/PMINSW 4-320 PAGE 1440 LINE 74757 define pcodeop vpminsw_avx512vl ; -:VPMINSW XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xEA; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPMINSW XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xEA; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpminsw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpminsw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PMINSB/PMINSW 4-320 PAGE 1440 LINE 74760 -:VPMINSW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xEA; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPMINSW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xEA; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpminsw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpminsw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -3645,20 +3645,20 @@ define pcodeop vpminsw_avx512bw ; # PMINSD/PMINSQ 4-325 PAGE 1445 LINE 74995 define pcodeop vpminsd_avx512vl ; -:VPMINSD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x39; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPMINSD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x39; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpminsd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpminsd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PMINSD/PMINSQ 4-325 PAGE 1445 LINE 74998 -:VPMINSD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x39; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPMINSD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x39; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpminsd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpminsd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -3677,20 +3677,20 @@ define pcodeop vpminsd_avx512f ; # PMINSD/PMINSQ 4-325 PAGE 1445 LINE 75004 define pcodeop vpminsq_avx512vl ; -:VPMINSQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x39; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPMINSQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x39; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpminsq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpminsq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMINSD/PMINSQ 4-325 PAGE 1445 LINE 75007 -:VPMINSQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x39; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPMINSQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x39; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpminsq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpminsq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -3709,20 +3709,20 @@ define pcodeop vpminsq_avx512f ; # PMINUB/PMINUW 4-329 PAGE 1449 LINE 75207 define pcodeop vpminub_avx512vl ; -:VPMINUB XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & vexVVVV_XmmReg; byte=0xDA; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPMINUB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & evexV5_XmmReg; byte=0xDA; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpminub_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpminub_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PMINUB/PMINUW 4-329 PAGE 1449 LINE 75210 -:VPMINUB YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & vexVVVV_YmmReg; byte=0xDA; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPMINUB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & evexV5_YmmReg; byte=0xDA; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpminub_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpminub_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); @@ -3741,20 +3741,20 @@ define pcodeop vpminub_avx512bw ; # PMINUB/PMINUW 4-329 PAGE 1449 LINE 75216 define pcodeop vpminuw_avx512vl ; -:VPMINUW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & vexVVVV_XmmReg; byte=0x3A; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPMINUW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & evexV5_XmmReg; byte=0x3A; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpminuw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpminuw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMINUB/PMINUW 4-329 PAGE 1449 LINE 75219 -:VPMINUW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & vexVVVV_YmmReg; byte=0x3A; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPMINUW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & evexV5_YmmReg; byte=0x3A; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpminuw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpminuw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -3773,20 +3773,20 @@ define pcodeop vpminuw_avx512bw ; # PMINUD/PMINUQ 4-334 PAGE 1454 LINE 75451 define pcodeop vpminud_avx512vl ; -:VPMINUD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x3B; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPMINUD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x3B; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpminud_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpminud_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PMINUD/PMINUQ 4-334 PAGE 1454 LINE 75454 -:VPMINUD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x3B; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPMINUD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x3B; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpminud_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpminud_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -3805,20 +3805,20 @@ define pcodeop vpminud_avx512f ; # PMINUD/PMINUQ 4-334 PAGE 1454 LINE 75460 define pcodeop vpminuq_avx512vl ; -:VPMINUQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x3B; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPMINUQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x3B; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpminuq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpminuq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMINUD/PMINUQ 4-334 PAGE 1454 LINE 75463 -:VPMINUQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x3B; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPMINUQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x3B; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpminuq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpminuq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -4221,20 +4221,20 @@ define pcodeop vpmovzxdq_avx512f ; # PMULDQ 4-359 PAGE 1479 LINE 76794 define pcodeop vpmuldq_avx512vl ; -:VPMULDQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x28; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPMULDQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x28; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpmuldq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpmuldq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMULDQ 4-359 PAGE 1479 LINE 76798 -:VPMULDQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x28; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPMULDQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x28; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpmuldq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpmuldq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -4253,20 +4253,20 @@ define pcodeop vpmuldq_avx512f ; # PMULHRSW 4-362 PAGE 1482 LINE 76934 define pcodeop vpmulhrsw_avx512vl ; -:VPMULHRSW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x0B; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPMULHRSW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0x0B; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpmulhrsw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpmulhrsw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMULHRSW 4-362 PAGE 1482 LINE 76937 -:VPMULHRSW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x0B; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPMULHRSW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0x0B; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpmulhrsw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpmulhrsw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -4285,20 +4285,20 @@ define pcodeop vpmulhrsw_avx512bw ; # PMULHUW 4-366 PAGE 1486 LINE 77147 define pcodeop vpmulhuw_avx512vl ; -:VPMULHUW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xE4; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPMULHUW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xE4; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpmulhuw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpmulhuw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMULHUW 4-366 PAGE 1486 LINE 77151 -:VPMULHUW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xE4; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPMULHUW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xE4; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpmulhuw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpmulhuw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -4317,20 +4317,20 @@ define pcodeop vpmulhuw_avx512bw ; # PMULHW 4-370 PAGE 1490 LINE 77376 define pcodeop vpmulhw_avx512vl ; -:VPMULHW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xE5; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPMULHW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xE5; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpmulhw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpmulhw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMULHW 4-370 PAGE 1490 LINE 77379 -:VPMULHW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xE5; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPMULHW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xE5; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpmulhw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpmulhw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -4349,20 +4349,20 @@ define pcodeop vpmulhw_avx512bw ; # PMULLD/PMULLQ 4-374 PAGE 1494 LINE 77582 define pcodeop vpmulld_avx512vl ; -:VPMULLD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x40; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPMULLD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x40; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpmulld_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpmulld_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PMULLD/PMULLQ 4-374 PAGE 1494 LINE 77585 -:VPMULLD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x40; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPMULLD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x40; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpmulld_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpmulld_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -4381,20 +4381,20 @@ define pcodeop vpmulld_avx512f ; # PMULLD/PMULLQ 4-374 PAGE 1494 LINE 77591 define pcodeop vpmullq_avx512vl ; -:VPMULLQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x40; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPMULLQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x40; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpmullq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpmullq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMULLD/PMULLQ 4-374 PAGE 1494 LINE 77594 -:VPMULLQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x40; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPMULLQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x40; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpmullq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpmullq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -4413,20 +4413,20 @@ define pcodeop vpmullq_avx512dq ; # PMULLW 4-378 PAGE 1498 LINE 77781 define pcodeop vpmullw_avx512vl ; -:VPMULLW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xD5; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPMULLW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xD5; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpmullw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpmullw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMULLW 4-378 PAGE 1498 LINE 77784 -:VPMULLW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xD5; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPMULLW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xD5; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpmullw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpmullw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -4445,20 +4445,20 @@ define pcodeop vpmullw_avx512bw ; # PMULUDQ 4-382 PAGE 1502 LINE 77977 define pcodeop vpmuludq_avx512vl ; -:VPMULUDQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xF4; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPMULUDQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xF4; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpmuludq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpmuludq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMULUDQ 4-382 PAGE 1502 LINE 77981 -:VPMULUDQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xF4; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPMULUDQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xF4; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpmuludq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpmuludq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -4477,20 +4477,20 @@ define pcodeop vpmuludq_avx512f ; # POR 4-399 PAGE 1519 LINE 78854 define pcodeop vpord_avx512vl ; -:VPORD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xEB; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPORD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xEB; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpord_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpord_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # POR 4-399 PAGE 1519 LINE 78857 -:VPORD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xEB; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPORD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xEB; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpord_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpord_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -4509,20 +4509,20 @@ define pcodeop vpord_avx512f ; # POR 4-399 PAGE 1519 LINE 78863 define pcodeop vporq_avx512vl ; -:VPORQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xEB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPORQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xEB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vporq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vporq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # POR 4-399 PAGE 1519 LINE 78866 -:VPORQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xEB; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPORQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xEB; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vporq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vporq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -4541,18 +4541,18 @@ define pcodeop vporq_avx512f ; # PSADBW 4-408 PAGE 1528 LINE 79250 define pcodeop vpsadbw_avx512vl ; -:VPSADBW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xF6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +:VPSADBW XmmReg1, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xF6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp:16 = vpsadbw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + local tmp:16 = vpsadbw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSADBW 4-408 PAGE 1528 LINE 79255 -:VPSADBW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xF6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 +:VPSADBW YmmReg1, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xF6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpsadbw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpsadbw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; ZmmReg1 = zext(YmmResult); } @@ -4569,20 +4569,20 @@ define pcodeop vpsadbw_avx512bw ; # PSHUFB 4-412 PAGE 1532 LINE 79466 define pcodeop vpshufb_avx512vl ; -:VPSHUFB XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x00; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPSHUFB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0x00; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpshufb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpshufb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PSHUFB 4-412 PAGE 1532 LINE 79468 -:VPSHUFB YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x00; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPSHUFB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0x00; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpshufb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpshufb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); @@ -4697,19 +4697,19 @@ define pcodeop vpshuflw_avx512bw ; # PSLLDQ 4-431 PAGE 1551 LINE 80491 define pcodeop vpslldq_avx512vl ; -:VPSLLDQ vexVVVV_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (vexVVVV_XmmReg & vexVVVV_ZmmReg); byte=0x73; reg_opcode=7 ... & XmmReg2_m128; imm8 +:VPSLLDQ evexV5_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_XmmReg & evexV5_ZmmReg); byte=0x73; reg_opcode=7 ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVMI) { local tmp:64 = vpslldq_avx512vl( XmmReg2_m128, imm8:1 ); - vexVVVV_ZmmReg = zext(tmp); + evexV5_ZmmReg = zext(tmp); } # PSLLDQ 4-431 PAGE 1551 LINE 80493 -:VPSLLDQ vexVVVV_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (vexVVVV_YmmReg & vexVVVV_ZmmReg); byte=0x73; reg_opcode=7 ... & YmmReg2_m256; imm8 +:VPSLLDQ evexV5_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_YmmReg & evexV5_ZmmReg); byte=0x73; reg_opcode=7 ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVMI) { local tmp:64 = vpslldq_avx512vl( YmmReg2_m256, imm8:1 ); - vexVVVV_ZmmReg = zext(tmp); + evexV5_ZmmReg = zext(tmp); } # PSLLDQ 4-431 PAGE 1551 LINE 80495 @@ -4722,20 +4722,20 @@ define pcodeop vpslldq_avx512bw ; # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80667 define pcodeop vpsllw_avx512vl ; -:VPSLLW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xF1; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPSLLW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xF1; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - XmmResult = vpsllw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpsllw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80670 -:VPSLLW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xF1; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & XmmReg2_m128 +:VPSLLW YmmReg1^YmmOpMask16, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xF1; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - YmmResult = vpsllw_avx512vl( vexVVVV_YmmReg, XmmReg2_m128 ); + YmmResult = vpsllw_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -4753,23 +4753,23 @@ define pcodeop vpsllw_avx512bw ; } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80676 -:VPSLLW vexVVVV_XmmReg^XmmOpMask16, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (vexVVVV_XmmReg & vexVVVV_ZmmReg) & XmmOpMask16; byte=0x71; reg_opcode=6 ... & XmmReg2_m128; imm8 +:VPSLLW evexV5_XmmReg^XmmOpMask16, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask16; byte=0x71; reg_opcode=6 ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVMI) { XmmResult = vpsllw_avx512vl( XmmReg2_m128, imm8:1 ); - XmmMask = vexVVVV_XmmReg; + XmmMask = evexV5_XmmReg; build XmmOpMask16; - vexVVVV_ZmmReg = zext(XmmResult); + evexV5_ZmmReg = zext(XmmResult); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80678 -:VPSLLW vexVVVV_YmmReg^YmmOpMask16, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (vexVVVV_YmmReg & vexVVVV_ZmmReg) & YmmOpMask16; byte=0x71; reg_opcode=6 ... & YmmReg2_m256; imm8 +:VPSLLW evexV5_YmmReg^YmmOpMask16, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask16; byte=0x71; reg_opcode=6 ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVMI) { YmmResult = vpsllw_avx512vl( YmmReg2_m256, imm8:1 ); - YmmMask = vexVVVV_YmmReg; + YmmMask = evexV5_YmmReg; build YmmOpMask16; - vexVVVV_ZmmReg = zext(YmmResult); + evexV5_ZmmReg = zext(YmmResult); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80680 @@ -4784,20 +4784,20 @@ define pcodeop vpsllw_avx512bw ; # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80682 define pcodeop vpslld_avx512vl ; -:VPSLLD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xF2; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 +:VPSLLD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xF2; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - XmmResult = vpslld_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpslld_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80685 -:VPSLLD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xF2; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128 +:VPSLLD YmmReg1^YmmOpMask32, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xF2; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - YmmResult = vpslld_avx512vl( vexVVVV_YmmReg, XmmReg2_m128 ); + YmmResult = vpslld_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -4815,23 +4815,23 @@ define pcodeop vpslld_avx512f ; } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80691 -:VPSLLD vexVVVV_XmmReg^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (vexVVVV_XmmReg & vexVVVV_ZmmReg) & XmmOpMask32; byte=0x72; reg_opcode=6 ... & XmmReg2_m128_m32bcst; imm8 +:VPSLLD evexV5_XmmReg^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask32; byte=0x72; reg_opcode=6 ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { XmmResult = vpslld_avx512vl( XmmReg2_m128_m32bcst, imm8:1 ); - XmmMask = vexVVVV_XmmReg; + XmmMask = evexV5_XmmReg; build XmmOpMask32; - vexVVVV_ZmmReg = zext(XmmResult); + evexV5_ZmmReg = zext(XmmResult); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80694 -:VPSLLD vexVVVV_YmmReg^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (vexVVVV_YmmReg & vexVVVV_ZmmReg) & YmmOpMask32; byte=0x72; reg_opcode=6 ... & YmmReg2_m256_m32bcst; imm8 +:VPSLLD evexV5_YmmReg^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask32; byte=0x72; reg_opcode=6 ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { YmmResult = vpslld_avx512vl( YmmReg2_m256_m32bcst, imm8:1 ); - YmmMask = vexVVVV_YmmReg; + YmmMask = evexV5_YmmReg; build YmmOpMask32; - vexVVVV_ZmmReg = zext(YmmResult); + evexV5_ZmmReg = zext(YmmResult); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80697 @@ -4846,20 +4846,20 @@ define pcodeop vpslld_avx512f ; # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80700 define pcodeop vpsllq_avx512vl ; -:VPSLLQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xF3; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128 +:VPSLLQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xF3; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - XmmResult = vpsllq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpsllq_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80703 -:VPSLLQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xF3; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128 +:VPSLLQ YmmReg1^YmmOpMask64, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xF3; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - YmmResult = vpsllq_avx512vl( vexVVVV_YmmReg, XmmReg2_m128 ); + YmmResult = vpsllq_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -4877,23 +4877,23 @@ define pcodeop vpsllq_avx512f ; } # PSLLW/PSLLD/PSLLQ 4-435 PAGE 1555 LINE 80721 -:VPSLLQ vexVVVV_XmmReg^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (vexVVVV_XmmReg & vexVVVV_ZmmReg) & XmmOpMask64; byte=0x73; reg_opcode=6 ... & XmmReg2_m128_m64bcst; imm8 +:VPSLLQ evexV5_XmmReg^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask64; byte=0x73; reg_opcode=6 ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { XmmResult = vpsllq_avx512vl( XmmReg2_m128_m64bcst, imm8:1 ); - XmmMask = vexVVVV_XmmReg; + XmmMask = evexV5_XmmReg; build XmmOpMask64; - vexVVVV_ZmmReg = zext(XmmResult); + evexV5_ZmmReg = zext(XmmResult); } # PSLLW/PSLLD/PSLLQ 4-435 PAGE 1555 LINE 80724 -:VPSLLQ vexVVVV_YmmReg^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (vexVVVV_YmmReg & vexVVVV_ZmmReg) & YmmOpMask64; byte=0x73; reg_opcode=6 ... & YmmReg2_m256_m64bcst; imm8 +:VPSLLQ evexV5_YmmReg^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask64; byte=0x73; reg_opcode=6 ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { YmmResult = vpsllq_avx512vl( YmmReg2_m256_m64bcst, imm8:1 ); - YmmMask = vexVVVV_YmmReg; + YmmMask = evexV5_YmmReg; build YmmOpMask64; - vexVVVV_ZmmReg = zext(YmmResult); + evexV5_ZmmReg = zext(YmmResult); } # PSLLW/PSLLD/PSLLQ 4-435 PAGE 1555 LINE 80727 @@ -4908,20 +4908,20 @@ define pcodeop vpsllq_avx512f ; # PSRAW/PSRAD/PSRAQ 4-445 PAGE 1565 LINE 81329 define pcodeop vpsraw_avx512vl ; -:VPSRAW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xE1; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPSRAW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xE1; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - XmmResult = vpsraw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpsraw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PSRAW/PSRAD/PSRAQ 4-445 PAGE 1565 LINE 81332 -:VPSRAW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xE1; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & XmmReg2_m128 +:VPSRAW YmmReg1^YmmOpMask16, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xE1; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - YmmResult = vpsraw_avx512vl( vexVVVV_YmmReg, XmmReg2_m128 ); + YmmResult = vpsraw_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -4939,23 +4939,23 @@ define pcodeop vpsraw_avx512bw ; } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81350 -:VPSRAW vexVVVV_XmmReg^XmmOpMask16, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (vexVVVV_XmmReg & vexVVVV_ZmmReg) & XmmOpMask16; byte=0x71; reg_opcode=4 ... & XmmReg2_m128; imm8 +:VPSRAW evexV5_XmmReg^XmmOpMask16, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask16; byte=0x71; reg_opcode=4 ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVMI) { XmmResult = vpsraw_avx512vl( XmmReg2_m128, imm8:1 ); - XmmMask = vexVVVV_XmmReg; + XmmMask = evexV5_XmmReg; build XmmOpMask16; - vexVVVV_ZmmReg = zext(XmmResult); + evexV5_ZmmReg = zext(XmmResult); } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81352 -:VPSRAW vexVVVV_YmmReg^YmmOpMask16, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (vexVVVV_YmmReg & vexVVVV_ZmmReg) & YmmOpMask16; byte=0x71; reg_opcode=4 ... & YmmReg2_m256; imm8 +:VPSRAW evexV5_YmmReg^YmmOpMask16, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask16; byte=0x71; reg_opcode=4 ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVMI) { YmmResult = vpsraw_avx512vl( YmmReg2_m256, imm8:1 ); - YmmMask = vexVVVV_YmmReg; + YmmMask = evexV5_YmmReg; build YmmOpMask16; - vexVVVV_ZmmReg = zext(YmmResult); + evexV5_ZmmReg = zext(YmmResult); } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81354 @@ -4970,20 +4970,20 @@ define pcodeop vpsraw_avx512bw ; # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81356 define pcodeop vpsrad_avx512vl ; -:VPSRAD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xE2; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 +:VPSRAD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xE2; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - XmmResult = vpsrad_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpsrad_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81359 -:VPSRAD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xE2; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128 +:VPSRAD YmmReg1^YmmOpMask32, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xE2; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - YmmResult = vpsrad_avx512vl( vexVVVV_YmmReg, XmmReg2_m128 ); + YmmResult = vpsrad_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -5001,23 +5001,23 @@ define pcodeop vpsrad_avx512f ; } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81365 -:VPSRAD vexVVVV_XmmReg^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (vexVVVV_XmmReg & vexVVVV_ZmmReg) & XmmOpMask32; byte=0x72; reg_opcode=4 ... & XmmReg2_m128_m32bcst; imm8 +:VPSRAD evexV5_XmmReg^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask32; byte=0x72; reg_opcode=4 ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { XmmResult = vpsrad_avx512vl( XmmReg2_m128_m32bcst, imm8:1 ); - XmmMask = vexVVVV_XmmReg; + XmmMask = evexV5_XmmReg; build XmmOpMask32; - vexVVVV_ZmmReg = zext(XmmResult); + evexV5_ZmmReg = zext(XmmResult); } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81368 -:VPSRAD vexVVVV_YmmReg^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (vexVVVV_YmmReg & vexVVVV_ZmmReg) & YmmOpMask32; byte=0x72; reg_opcode=4 ... & YmmReg2_m256_m32bcst; imm8 +:VPSRAD evexV5_YmmReg^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask32; byte=0x72; reg_opcode=4 ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { YmmResult = vpsrad_avx512vl( YmmReg2_m256_m32bcst, imm8:1 ); - YmmMask = vexVVVV_YmmReg; + YmmMask = evexV5_YmmReg; build YmmOpMask32; - vexVVVV_ZmmReg = zext(YmmResult); + evexV5_ZmmReg = zext(YmmResult); } @@ -5033,20 +5033,20 @@ define pcodeop vpsrad_avx512f ; # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81374 define pcodeop vpsraq_avx512vl ; -:VPSRAQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xE2; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128 +:VPSRAQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xE2; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - XmmResult = vpsraq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpsraq_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81377 -:VPSRAQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xE2; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128 +:VPSRAQ YmmReg1^YmmOpMask64, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xE2; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - YmmResult = vpsraq_avx512vl( vexVVVV_YmmReg, XmmReg2_m128 ); + YmmResult = vpsraq_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -5064,23 +5064,23 @@ define pcodeop vpsraq_avx512f ; } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81383 -:VPSRAQ vexVVVV_XmmReg^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (vexVVVV_XmmReg & vexVVVV_ZmmReg) & XmmOpMask64; byte=0x72; reg_opcode=4 ... & XmmReg2_m128_m64bcst; imm8 +:VPSRAQ evexV5_XmmReg^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask64; byte=0x72; reg_opcode=4 ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { XmmResult = vpsraq_avx512vl( XmmReg2_m128_m64bcst, imm8:1 ); - XmmMask = vexVVVV_XmmReg; + XmmMask = evexV5_XmmReg; build XmmOpMask64; - vexVVVV_ZmmReg = zext(XmmResult); + evexV5_ZmmReg = zext(XmmResult); } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81386 -:VPSRAQ vexVVVV_YmmReg^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (vexVVVV_YmmReg & vexVVVV_ZmmReg) & YmmOpMask64; byte=0x72; reg_opcode=4 ... & YmmReg2_m256_m64bcst; imm8 +:VPSRAQ evexV5_YmmReg^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask64; byte=0x72; reg_opcode=4 ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { YmmResult = vpsraq_avx512vl( YmmReg2_m256_m64bcst, imm8:1 ); - YmmMask = vexVVVV_YmmReg; + YmmMask = evexV5_YmmReg; build YmmOpMask64; - vexVVVV_ZmmReg = zext(YmmResult); + evexV5_ZmmReg = zext(YmmResult); } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81389 @@ -5095,19 +5095,19 @@ define pcodeop vpsraq_avx512f ; # PSRLDQ 4-455 PAGE 1575 LINE 81879 define pcodeop vpsrldq_avx512vl ; -:VPSRLDQ vexVVVV_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (vexVVVV_XmmReg & vexVVVV_ZmmReg); byte=0x73; reg_opcode=3 ... & XmmReg2_m128; imm8 +:VPSRLDQ evexV5_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_XmmReg & evexV5_ZmmReg); byte=0x73; reg_opcode=3 ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp:64 = vpsrldq_avx512vl( XmmReg2_m128, imm8:1 ); - vexVVVV_ZmmReg = zext(tmp); + evexV5_ZmmReg = zext(tmp); } # PSRLDQ 4-455 PAGE 1575 LINE 81881 -:VPSRLDQ vexVVVV_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (vexVVVV_YmmReg & vexVVVV_ZmmReg); byte=0x73; reg_opcode=3 ... & YmmReg2_m256; imm8 +:VPSRLDQ evexV5_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_YmmReg & evexV5_ZmmReg); byte=0x73; reg_opcode=3 ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp:64 = vpsrldq_avx512vl( YmmReg2_m256, imm8:1 ); - vexVVVV_ZmmReg = zext(tmp); + evexV5_ZmmReg = zext(tmp); } # PSRLDQ 4-455 PAGE 1575 LINE 81883 @@ -5120,20 +5120,20 @@ define pcodeop vpsrldq_avx512bw ; # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82059 define pcodeop vpsrlw_avx512vl ; -:VPSRLW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xD1; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPSRLW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xD1; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - XmmResult = vpsrlw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpsrlw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82062 -:VPSRLW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xD1; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & XmmReg2_m128 +:VPSRLW YmmReg1^YmmOpMask16, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xD1; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - YmmResult = vpsrlw_avx512vl( vexVVVV_YmmReg, XmmReg2_m128 ); + YmmResult = vpsrlw_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -5151,23 +5151,23 @@ define pcodeop vpsrlw_avx512bw ; } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82068 -:VPSRLW vexVVVV_XmmReg^XmmOpMask16, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (vexVVVV_XmmReg & vexVVVV_ZmmReg) & XmmOpMask16; byte=0x71; reg_opcode=2 ... & XmmReg2_m128; imm8 +:VPSRLW evexV5_XmmReg^XmmOpMask16, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask16; byte=0x71; reg_opcode=2 ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpsrlw_avx512vl( XmmReg2_m128, imm8:1 ); - XmmMask = vexVVVV_XmmReg; + XmmMask = evexV5_XmmReg; build XmmOpMask16; - vexVVVV_ZmmReg = zext(XmmResult); + evexV5_ZmmReg = zext(XmmResult); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82070 -:VPSRLW vexVVVV_YmmReg^YmmOpMask16, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (vexVVVV_YmmReg & vexVVVV_ZmmReg) & YmmOpMask16; byte=0x71; reg_opcode=2 ... & YmmReg2_m256; imm8 +:VPSRLW evexV5_YmmReg^YmmOpMask16, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask16; byte=0x71; reg_opcode=2 ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpsrlw_avx512vl( YmmReg2_m256, imm8:1 ); - YmmMask = vexVVVV_YmmReg; + YmmMask = evexV5_YmmReg; build YmmOpMask16; - vexVVVV_ZmmReg = zext(YmmResult); + evexV5_ZmmReg = zext(YmmResult); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82072 @@ -5182,20 +5182,20 @@ define pcodeop vpsrlw_avx512bw ; # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82074 define pcodeop vpsrld_avx512vl ; -:VPSRLD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xD2; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 +:VPSRLD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xD2; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - XmmResult = vpsrld_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpsrld_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82077 -:VPSRLD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xD2; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128 +:VPSRLD YmmReg1^YmmOpMask32, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xD2; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - YmmResult = vpsrld_avx512vl( vexVVVV_YmmReg, XmmReg2_m128 ); + YmmResult = vpsrld_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -5213,23 +5213,23 @@ define pcodeop vpsrld_avx512f ; } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82084 -:VPSRLD vexVVVV_XmmReg^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (vexVVVV_XmmReg & vexVVVV_ZmmReg) & XmmOpMask32; byte=0x72; reg_opcode=2 ... & XmmReg2_m128_m32bcst; imm8 +:VPSRLD evexV5_XmmReg^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask32; byte=0x72; reg_opcode=2 ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { XmmResult = vpsrld_avx512vl( XmmReg2_m128_m32bcst, imm8:1 ); - XmmMask = vexVVVV_XmmReg; + XmmMask = evexV5_XmmReg; build XmmOpMask32; - vexVVVV_ZmmReg = zext(XmmResult); + evexV5_ZmmReg = zext(XmmResult); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82088 -:VPSRLD vexVVVV_YmmReg^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (vexVVVV_YmmReg & vexVVVV_ZmmReg) & YmmOpMask32; byte=0x72; reg_opcode=2 ... & YmmReg2_m256_m32bcst; imm8 +:VPSRLD evexV5_YmmReg^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask32; byte=0x72; reg_opcode=2 ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { YmmResult = vpsrld_avx512vl( YmmReg2_m256_m32bcst, imm8:1 ); - YmmMask = vexVVVV_YmmReg; + YmmMask = evexV5_YmmReg; build YmmOpMask32; - vexVVVV_ZmmReg = zext(YmmResult); + evexV5_ZmmReg = zext(YmmResult); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82091 @@ -5244,20 +5244,20 @@ define pcodeop vpsrld_avx512f ; # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82094 define pcodeop vpsrlq_avx512vl ; -:VPSRLQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xD3; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128 +:VPSRLQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xD3; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - XmmResult = vpsrlq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpsrlq_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82097 -:VPSRLQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xD3; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128 +:VPSRLQ YmmReg1^YmmOpMask64, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xD3; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { - YmmResult = vpsrlq_avx512vl( vexVVVV_YmmReg, XmmReg2_m128 ); + YmmResult = vpsrlq_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -5275,23 +5275,23 @@ define pcodeop vpsrlq_avx512f ; } # PSRLW/PSRLD/PSRLQ 4-459 PAGE 1579 LINE 82115 -:VPSRLQ vexVVVV_XmmReg^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (vexVVVV_XmmReg & vexVVVV_ZmmReg) & XmmOpMask64; byte=0x73; reg_opcode=2 ... & XmmReg2_m128_m64bcst; imm8 +:VPSRLQ evexV5_XmmReg^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask64; byte=0x73; reg_opcode=2 ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { XmmResult = vpsrlq_avx512vl( XmmReg2_m128_m64bcst, imm8:1 ); - XmmMask = vexVVVV_XmmReg; + XmmMask = evexV5_XmmReg; build XmmOpMask64; - vexVVVV_ZmmReg = zext(XmmResult); + evexV5_ZmmReg = zext(XmmResult); } # PSRLW/PSRLD/PSRLQ 4-459 PAGE 1579 LINE 82119 -:VPSRLQ vexVVVV_YmmReg^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (vexVVVV_YmmReg & vexVVVV_ZmmReg) & YmmOpMask64; byte=0x73; reg_opcode=2 ... & YmmReg2_m256_m64bcst; imm8 +:VPSRLQ evexV5_YmmReg^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask64; byte=0x73; reg_opcode=2 ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { YmmResult = vpsrlq_avx512vl( YmmReg2_m256_m64bcst, imm8:1 ); - YmmMask = vexVVVV_YmmReg; + YmmMask = evexV5_YmmReg; build YmmOpMask64; - vexVVVV_ZmmReg = zext(YmmResult); + evexV5_ZmmReg = zext(YmmResult); } # PSRLW/PSRLD/PSRLQ 4-459 PAGE 1579 LINE 82122 @@ -5306,20 +5306,20 @@ define pcodeop vpsrlq_avx512f ; # PSUBB/PSUBW/PSUBD 4-469 PAGE 1589 LINE 82702 define pcodeop vpsubb_avx512vl ; -:VPSUBB XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xF8; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPSUBB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xF8; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpsubb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpsubb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PSUBB/PSUBW/PSUBD 4-469 PAGE 1589 LINE 82705 -:VPSUBB YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xF8; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPSUBB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xF8; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpsubb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpsubb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); @@ -5338,20 +5338,20 @@ define pcodeop vpsubb_avx512bw ; # PSUBB/PSUBW/PSUBD 4-469 PAGE 1589 LINE 82711 define pcodeop vpsubw_avx512vl ; -:VPSUBW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xF9; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPSUBW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xF9; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpsubw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpsubw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PSUBB/PSUBW/PSUBD 4-469 PAGE 1589 LINE 82714 -:VPSUBW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xF9; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPSUBW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xF9; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpsubw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpsubw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -5370,20 +5370,20 @@ define pcodeop vpsubw_avx512bw ; # PSUBB/PSUBW/PSUBD 4-470 PAGE 1590 LINE 82733 define pcodeop vpsubd_avx512vl ; -:VPSUBD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xFA; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPSUBD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xFA; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpsubd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpsubd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PSUBB/PSUBW/PSUBD 4-470 PAGE 1590 LINE 82736 -:VPSUBD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xFA; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPSUBD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xFA; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpsubd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpsubd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -5402,20 +5402,20 @@ define pcodeop vpsubd_avx512f ; # PSUBQ 4-476 PAGE 1596 LINE 83111 define pcodeop vpsubq_avx512vl ; -:VPSUBQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xFB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPSUBQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xFB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpsubq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpsubq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PSUBQ 4-476 PAGE 1596 LINE 83114 -:VPSUBQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xFB; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPSUBQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xFB; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpsubq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpsubq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -5434,20 +5434,20 @@ define pcodeop vpsubq_avx512f ; # PSUBSB/PSUBSW 4-479 PAGE 1599 LINE 83270 define pcodeop vpsubsb_avx512vl ; -:VPSUBSB XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xE8; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPSUBSB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xE8; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpsubsb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpsubsb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PSUBSB/PSUBSW 4-479 PAGE 1599 LINE 83274 -:VPSUBSB YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xE8; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPSUBSB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xE8; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpsubsb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpsubsb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); @@ -5466,20 +5466,20 @@ define pcodeop vpsubsb_avx512bw ; # PSUBSB/PSUBSW 4-479 PAGE 1599 LINE 83282 define pcodeop vpsubsw_avx512vl ; -:VPSUBSW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xE9; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPSUBSW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xE9; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpsubsw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpsubsw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PSUBSB/PSUBSW 4-479 PAGE 1599 LINE 83286 -:VPSUBSW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xE9; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPSUBSW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xE9; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpsubsw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpsubsw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -5496,20 +5496,20 @@ define pcodeop psubsw_avx512bw ; # PSUBUSB/PSUBUSW 4-483 PAGE 1603 LINE 83510 define pcodeop vpsubusb_avx512vl ; -:VPSUBUSB XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xD8; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPSUBUSB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xD8; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpsubusb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpsubusb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PSUBUSB/PSUBUSW 4-483 PAGE 1603 LINE 83514 -:VPSUBUSB YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xD8; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPSUBUSB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xD8; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpsubusb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpsubusb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); @@ -5528,20 +5528,20 @@ define pcodeop vpsubusb_avx512bw ; # PSUBUSB/PSUBUSW 4-483 PAGE 1603 LINE 83522 define pcodeop vpsubusw_avx512vl ; -:VPSUBUSW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xD9; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPSUBUSW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xD9; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpsubusw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpsubusw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PSUBUSB/PSUBUSW 4-483 PAGE 1603 LINE 83526 -:VPSUBUSW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xD9; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPSUBUSW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xD9; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpsubusw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpsubusw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -5560,10 +5560,10 @@ define pcodeop vpsubusw_avx512bw ; # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-491 PAGE 1611 LINE 83948 define pcodeop vpunpckhbw_avx512vl ; -:VPUNPCKHBW XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x68; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPUNPCKHBW XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0x68; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpunpckhbw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpunpckhbw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); @@ -5571,10 +5571,10 @@ define pcodeop vpunpckhbw_avx512vl ; # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-491 PAGE 1611 LINE 83952 define pcodeop vpunpckhwd_avx512vl ; -:VPUNPCKHWD XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x69; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPUNPCKHWD XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0x69; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpunpckhwd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpunpckhwd_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); @@ -5582,10 +5582,10 @@ define pcodeop vpunpckhwd_avx512vl ; # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-491 PAGE 1611 LINE 83955 define pcodeop vpunpckhdq_avx512vl ; -:VPUNPCKHDQ XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x6A; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPUNPCKHDQ XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x6A; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpunpckhdq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpunpckhdq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -5593,50 +5593,50 @@ define pcodeop vpunpckhdq_avx512vl ; # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-491 PAGE 1611 LINE 83958 define pcodeop vpunpckhqdq_avx512vl ; -:VPUNPCKHQDQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x6D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPUNPCKHQDQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x6D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpunpckhqdq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpunpckhqdq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-492 PAGE 1612 LINE 83974 -:VPUNPCKHBW YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x68; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPUNPCKHBW YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0x68; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpunpckhbw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpunpckhbw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-492 PAGE 1612 LINE 83977 -:VPUNPCKHWD YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x69; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPUNPCKHWD YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0x69; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpunpckhwd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpunpckhwd_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-492 PAGE 1612 LINE 83980 -:VPUNPCKHDQ YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x6A; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPUNPCKHDQ YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x6A; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpunpckhdq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpunpckhdq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-492 PAGE 1612 LINE 83984 -:VPUNPCKHQDQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x6D; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPUNPCKHQDQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x6D; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpunpckhqdq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpunpckhqdq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -5688,10 +5688,10 @@ define pcodeop vpunpckhqdq_avx512f ; # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-501 PAGE 1621 LINE 84553 define pcodeop vpunpcklbw_avx512vl ; -:VPUNPCKLBW XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x60; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPUNPCKLBW XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0x60; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpunpcklbw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpunpcklbw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); @@ -5699,10 +5699,10 @@ define pcodeop vpunpcklbw_avx512vl ; # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-501 PAGE 1621 LINE 84556 define pcodeop vpunpcklwd_avx512vl ; -:VPUNPCKLWD XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x61; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPUNPCKLWD XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0x61; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpunpcklwd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpunpcklwd_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); @@ -5710,10 +5710,10 @@ define pcodeop vpunpcklwd_avx512vl ; # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-501 PAGE 1621 LINE 84559 define pcodeop vpunpckldq_avx512vl ; -:VPUNPCKLDQ XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x62; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPUNPCKLDQ XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x62; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpunpckldq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpunpckldq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -5721,50 +5721,50 @@ define pcodeop vpunpckldq_avx512vl ; # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-501 PAGE 1621 LINE 84562 define pcodeop vpunpcklqdq_avx512vl ; -:VPUNPCKLQDQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x6C; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPUNPCKLQDQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x6C; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpunpcklqdq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpunpcklqdq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-502 PAGE 1622 LINE 84578 -:VPUNPCKLBW YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x60; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPUNPCKLBW YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0x60; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpunpcklbw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpunpcklbw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-502 PAGE 1622 LINE 84581 -:VPUNPCKLWD YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x61; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPUNPCKLWD YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0x61; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpunpcklwd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpunpcklwd_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-502 PAGE 1622 LINE 84584 -:VPUNPCKLDQ YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x62; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPUNPCKLDQ YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x62; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpunpckldq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpunpckldq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-502 PAGE 1622 LINE 84587 -:VPUNPCKLQDQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x6C; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPUNPCKLQDQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x6C; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpunpcklqdq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpunpcklqdq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -5816,20 +5816,20 @@ define pcodeop vpunpcklqdq_avx512f ; # PXOR 4-518 PAGE 1638 LINE 85503 define pcodeop vpxord_avx512vl ; -:VPXORD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xEF; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPXORD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xEF; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpxord_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpxord_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PXOR 4-518 PAGE 1638 LINE 85505 -:VPXORD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xEF; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPXORD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xEF; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpxord_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpxord_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -5848,20 +5848,20 @@ define pcodeop vpxord_avx512f ; # PXOR 4-518 PAGE 1638 LINE 85514 define pcodeop vpxorq_avx512vl ; -:VPXORQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xEF; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPXORQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xEF; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpxorq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpxorq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PXOR 4-518 PAGE 1638 LINE 85521 -:VPXORQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xEF; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPXORQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xEF; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpxorq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpxorq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -5880,20 +5880,20 @@ define pcodeop vpxorq_avx512f ; # SHUFPD 4-617 PAGE 1737 LINE 90231 define pcodeop vshufpd_avx512vl ; -:VSHUFPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xC6; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst; imm8 +:VSHUFPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xC6; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vshufpd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); + XmmResult = vshufpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # SHUFPD 4-617 PAGE 1737 LINE 90235 -:VSHUFPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xC6; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 +:VSHUFPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xC6; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vshufpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); + YmmResult = vshufpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -5912,20 +5912,20 @@ define pcodeop vshufpd_avx512f ; # SHUFPS 4-622 PAGE 1742 LINE 90489 define pcodeop vshufps_avx512vl ; -:VSHUFPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xC6; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst; imm8 +:VSHUFPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xC6; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vshufps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); + XmmResult = vshufps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # SHUFPS 4-622 PAGE 1742 LINE 90493 -:VSHUFPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xC6; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 +:VSHUFPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xC6; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vshufps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); + YmmResult = vshufps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -6008,10 +6008,10 @@ define pcodeop vsqrtps_avx512f ; # SQRTSD 4-638 PAGE 1758 LINE 91276 define pcodeop vsqrtsd_avx512f ; -:VSQRTSD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VSQRTSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vsqrtsd_avx512f( vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vsqrtsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -6019,10 +6019,10 @@ define pcodeop vsqrtsd_avx512f ; # SQRTSS 4-640 PAGE 1760 LINE 91371 define pcodeop vsqrtss_avx512f ; -:VSQRTSS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VSQRTSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vsqrtss_avx512f( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vsqrtss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -6030,20 +6030,20 @@ define pcodeop vsqrtss_avx512f ; # SUBPD 4-656 PAGE 1776 LINE 92120 define pcodeop vsubpd_avx512vl ; -:VSUBPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VSUBPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vsubpd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vsubpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # SUBPD 4-656 PAGE 1776 LINE 92123 -:VSUBPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x5C; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VSUBPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x5C; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vsubpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vsubpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -6062,20 +6062,20 @@ define pcodeop vsubpd_avx512f ; # SUBPS 4-659 PAGE 1779 LINE 92269 define pcodeop vsubps_avx512vl ; -:VSUBPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VSUBPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vsubps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vsubps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # SUBPS 4-659 PAGE 1779 LINE 92272 -:VSUBPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x5C; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VSUBPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x5C; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vsubps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vsubps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -6094,10 +6094,10 @@ define pcodeop vsubps_avx512f ; # SUBSD 4-662 PAGE 1782 LINE 92421 define pcodeop vsubsd_avx512f ; -:VSUBSD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VSUBSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vsubsd_avx512f( vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vsubsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -6105,10 +6105,10 @@ define pcodeop vsubsd_avx512f ; # SUBSS 4-664 PAGE 1784 LINE 92514 define pcodeop vsubss_avx512f ; -:VSUBSS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VSUBSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vsubss_avx512f( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vsubss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -6136,20 +6136,20 @@ define pcodeop vucomiss_avx512f ; # UNPCKHPD 4-688 PAGE 1808 LINE 93629 define pcodeop vunpckhpd_avx512vl ; -:VUNPCKHPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x15; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VUNPCKHPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x15; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vunpckhpd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vunpckhpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # UNPCKHPD 4-688 PAGE 1808 LINE 93632 -:VUNPCKHPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x15; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VUNPCKHPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x15; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vunpckhpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vunpckhpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -6168,20 +6168,20 @@ define pcodeop vunpckhpd_avx512f ; # UNPCKHPS 4-692 PAGE 1812 LINE 93813 define pcodeop vunpckhps_avx512vl ; -:VUNPCKHPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x15; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VUNPCKHPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x15; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vunpckhps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vunpckhps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # UNPCKHPS 4-692 PAGE 1812 LINE 93817 -:VUNPCKHPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x15; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VUNPCKHPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x15; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vunpckhps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vunpckhps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -6200,20 +6200,20 @@ define pcodeop vunpckhps_avx512f ; # UNPCKLPD 4-696 PAGE 1816 LINE 94045 define pcodeop vunpcklpd_avx512vl ; -:VUNPCKLPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x14; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VUNPCKLPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x14; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vunpcklpd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vunpcklpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # UNPCKLPD 4-696 PAGE 1816 LINE 94048 -:VUNPCKLPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x14; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VUNPCKLPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x14; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vunpcklpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vunpcklpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -6232,20 +6232,20 @@ define pcodeop vunpcklpd_avx512f ; # UNPCKLPS 4-700 PAGE 1820 LINE 94231 define pcodeop vunpcklps_avx512vl ; -:VUNPCKLPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x14; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VUNPCKLPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x14; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vunpcklps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vunpcklps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # UNPCKLPS 4-700 PAGE 1820 LINE 94234 -:VUNPCKLPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x14; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VUNPCKLPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x14; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vunpcklps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vunpcklps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -6264,10 +6264,10 @@ define pcodeop vunpcklps_avx512f ; # VALIGND/VALIGNQ 5-5 PAGE 1829 LINE 94615 define pcodeop valignd_avx512vl ; -:VALIGND XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x03; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VALIGND XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x03; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = valignd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = valignd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -6275,30 +6275,30 @@ define pcodeop valignd_avx512vl ; # VALIGND/VALIGNQ 5-5 PAGE 1829 LINE 94619 define pcodeop valignq_avx512vl ; -:VALIGNQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x03; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VALIGNQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x03; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = valignq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = valignq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VALIGND/VALIGNQ 5-5 PAGE 1829 LINE 94623 -:VALIGND YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x03; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VALIGND YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x03; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = valignd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = valignd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VALIGND/VALIGNQ 5-5 PAGE 1829 LINE 94627 -:VALIGNQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x03; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VALIGNQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x03; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = valignq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = valignq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -6328,20 +6328,20 @@ define pcodeop valignq_avx512f ; # VBLENDMPD/VBLENDMPS 5-9 PAGE 1833 LINE 94787 define pcodeop vblendmpd_avx512vl ; -:VBLENDMPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x65; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VBLENDMPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x65; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vblendmpd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vblendmpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VBLENDMPD/VBLENDMPS 5-9 PAGE 1833 LINE 94790 -:VBLENDMPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x65; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VBLENDMPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x65; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vblendmpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vblendmpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -6360,20 +6360,20 @@ define pcodeop vblendmpd_avx512f ; # VBLENDMPD/VBLENDMPS 5-9 PAGE 1833 LINE 94796 define pcodeop vblendmps_avx512vl ; -:VBLENDMPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x65; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VBLENDMPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x65; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vblendmps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vblendmps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VBLENDMPD/VBLENDMPS 5-9 PAGE 1833 LINE 94799 -:VBLENDMPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x65; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VBLENDMPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x65; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vblendmps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vblendmps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -7385,58 +7385,58 @@ define pcodeop vcvtuqq2ps_avx512dq ; # VCVTUSI2SD 5-81 PAGE 1905 LINE 98308 define pcodeop vcvtusi2sd_avx512f ; -:VCVTUSI2SD XmmReg1, vexVVVV_XmmReg, rm32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x7B; (XmmReg1 & ZmmReg1) ... & rm32 +:VCVTUSI2SD XmmReg1, evexV5_XmmReg, rm32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x7B; (XmmReg1 & ZmmReg1) ... & rm32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - local tmp:16 = vcvtusi2sd_avx512f( vexVVVV_XmmReg, rm32 ); + local tmp:16 = vcvtusi2sd_avx512f( evexV5_XmmReg, rm32 ); ZmmReg1 = zext(tmp); } # VCVTUSI2SD 5-81 PAGE 1905 LINE 98311 @ifdef IA64 -:VCVTUSI2SD XmmReg1, vexVVVV_XmmReg, rm64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x7B; (XmmReg1 & ZmmReg1) ... & rm64 +:VCVTUSI2SD XmmReg1, evexV5_XmmReg, rm64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x7B; (XmmReg1 & ZmmReg1) ... & rm64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - local tmp:16 = vcvtusi2sd_avx512f( vexVVVV_XmmReg, rm64 ); + local tmp:16 = vcvtusi2sd_avx512f( evexV5_XmmReg, rm64 ); ZmmReg1 = zext(tmp); } @endif # VCVTUSI2SS 5-83 PAGE 1907 LINE 98381 define pcodeop vcvtusi2ss_avx512f ; -:VCVTUSI2SS XmmReg1, vexVVVV_XmmReg, rm32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x7B; (XmmReg1 & ZmmReg1) ... & rm32 +:VCVTUSI2SS XmmReg1, evexV5_XmmReg, rm32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x7B; (XmmReg1 & ZmmReg1) ... & rm32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - local tmp:16 = vcvtusi2ss_avx512f( vexVVVV_XmmReg, rm32 ); + local tmp:16 = vcvtusi2ss_avx512f( evexV5_XmmReg, rm32 ); ZmmReg1 = zext(tmp); } # VCVTUSI2SS 5-83 PAGE 1907 LINE 98383 @ifdef IA64 -:VCVTUSI2SS XmmReg1, vexVVVV_XmmReg, rm64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x7B; (XmmReg1 & ZmmReg1) ... & rm64 +:VCVTUSI2SS XmmReg1, evexV5_XmmReg, rm64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x7B; (XmmReg1 & ZmmReg1) ... & rm64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - local tmp:16 = vcvtusi2ss_avx512f( vexVVVV_XmmReg, rm64 ); + local tmp:16 = vcvtusi2ss_avx512f( evexV5_XmmReg, rm64 ); ZmmReg1 = zext(tmp); } @endif # VDBPSADBW 5-85 PAGE 1909 LINE 98455 define pcodeop vdbpsadbw_avx512vl ; -:VDBPSADBW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x42; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128; imm8 +:VDBPSADBW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x42; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vdbpsadbw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128, imm8:1 ); + XmmResult = vdbpsadbw_avx512vl( evexV5_XmmReg, XmmReg2_m128, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VDBPSADBW 5-85 PAGE 1909 LINE 98460 -:VDBPSADBW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x42; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256; imm8 +:VDBPSADBW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x42; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vdbpsadbw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256, imm8:1 ); + YmmResult = vdbpsadbw_avx512vl( evexV5_YmmReg, YmmReg2_m256, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -7782,20 +7782,20 @@ define pcodeop vextracti64x4_avx512f ; } # VFIXUPIMMPD 5-112 PAGE 1936 LINE 99754 define pcodeop vfixupimmpd_avx512vl ; -:VFIXUPIMMPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x54; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst; imm8 +:VFIXUPIMMPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x54; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfixupimmpd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); + XmmResult = vfixupimmpd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFIXUPIMMPD 5-112 PAGE 1936 LINE 99757 -:VFIXUPIMMPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x54; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 +:VFIXUPIMMPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x54; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfixupimmpd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); + YmmResult = vfixupimmpd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -7814,20 +7814,20 @@ define pcodeop vfixupimmpd_avx512f ; # VFIXUPIMMPS 5-116 PAGE 1940 LINE 99957 define pcodeop vfixupimmps_avx512vl ; -:VFIXUPIMMPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x54; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst; imm8 +:VFIXUPIMMPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x54; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfixupimmps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); + XmmResult = vfixupimmps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFIXUPIMMPS 5-116 PAGE 1940 LINE 99960 -:VFIXUPIMMPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x54; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 +:VFIXUPIMMPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x54; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfixupimmps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); + YmmResult = vfixupimmps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -7846,10 +7846,10 @@ define pcodeop vfixupimmps_avx512f ; # VFIXUPIMMSD 5-120 PAGE 1944 LINE 100159 define pcodeop vfixupimmsd_avx512f ; -:VFIXUPIMMSD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64, imm8 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x55; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64; imm8 +:VFIXUPIMMSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64, imm8 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x55; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfixupimmsd_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64, imm8:1 ); + XmmResult = vfixupimmsd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -7857,10 +7857,10 @@ define pcodeop vfixupimmsd_avx512f ; # VFIXUPIMMSS 5-123 PAGE 1947 LINE 100331 define pcodeop vfixupimmss_avx512f ; -:VFIXUPIMMSS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32, imm8 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x55; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32; imm8 +:VFIXUPIMMSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32, imm8 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x55; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfixupimmss_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32, imm8:1 ); + XmmResult = vfixupimmss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -7868,9 +7868,9 @@ define pcodeop vfixupimmss_avx512f ; # VFMADD132PD/VFMADD213PD/VFMADD231PD 5-126 PAGE 1950 LINE 100523 define pcodeop vfmadd132pd_avx512vl ; -:VFMADD132PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x98; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFMADD132PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x98; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst { - XmmResult = vfmadd132pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfmadd132pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -7878,10 +7878,10 @@ define pcodeop vfmadd132pd_avx512vl ; # VFMADD132PD/VFMADD213PD/VFMADD231PD 5-126 PAGE 1950 LINE 100526 define pcodeop vfmadd213pd_avx512vl ; -:VFMADD213PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xA8; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFMADD213PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xA8; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmadd213pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfmadd213pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -7889,40 +7889,40 @@ define pcodeop vfmadd213pd_avx512vl ; # VFMADD132PD/VFMADD213PD/VFMADD231PD 5-126 PAGE 1950 LINE 100529 define pcodeop vfmadd231pd_avx512vl ; -:VFMADD231PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xB8; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFMADD231PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xB8; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmadd231pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfmadd231pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMADD132PD/VFMADD213PD/VFMADD231PD 5-126 PAGE 1950 LINE 100532 -:VFMADD132PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x98; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFMADD132PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x98; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmadd132pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfmadd132pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMADD132PD/VFMADD213PD/VFMADD231PD 5-126 PAGE 1950 LINE 100535 -:VFMADD213PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xA8; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFMADD213PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xA8; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmadd213pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfmadd213pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMADD132PD/VFMADD213PD/VFMADD231PD 5-126 PAGE 1950 LINE 100538 -:VFMADD231PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xB8; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFMADD231PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xB8; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmadd231pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfmadd231pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -7963,10 +7963,10 @@ define pcodeop vfmadd231pd_avx512f ; # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-133 PAGE 1957 LINE 100884 define pcodeop vfmadd132ps_avx512vl ; -:VFMADD132PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x98; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFMADD132PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x98; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmadd132ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfmadd132ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -7974,10 +7974,10 @@ define pcodeop vfmadd132ps_avx512vl ; # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-133 PAGE 1957 LINE 100887 define pcodeop vfmadd213ps_avx512vl ; -:VFMADD213PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xA8; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFMADD213PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xA8; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmadd213ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfmadd213ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -7985,40 +7985,40 @@ define pcodeop vfmadd213ps_avx512vl ; # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-133 PAGE 1957 LINE 100890 define pcodeop vfmadd231ps_avx512vl ; -:VFMADD231PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xB8; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFMADD231PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xB8; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmadd231ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfmadd231ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-133 PAGE 1957 LINE 100893 -:VFMADD132PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x98; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFMADD132PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x98; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmadd132ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfmadd132ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-133 PAGE 1957 LINE 100896 -:VFMADD213PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xA8; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFMADD213PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xA8; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmadd213ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfmadd213ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-133 PAGE 1957 LINE 100899 -:VFMADD231PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xB8; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFMADD231PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xB8; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmadd231ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfmadd231ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -8059,10 +8059,10 @@ define pcodeop vfmadd231ps_avx512f ; # VFMADD132SD/VFMADD213SD/VFMADD231SD 5-140 PAGE 1964 LINE 101235 define pcodeop vfmadd132sd_avx512f ; -:VFMADD132SD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x99; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VFMADD132SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x99; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfmadd132sd_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vfmadd132sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -8070,10 +8070,10 @@ define pcodeop vfmadd132sd_avx512f ; # VFMADD132SD/VFMADD213SD/VFMADD231SD 5-140 PAGE 1964 LINE 101238 define pcodeop vfmadd213sd_avx512f ; -:VFMADD213SD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xA9; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VFMADD213SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xA9; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfmadd213sd_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vfmadd213sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -8081,10 +8081,10 @@ define pcodeop vfmadd213sd_avx512f ; # VFMADD132SD/VFMADD213SD/VFMADD231SD 5-140 PAGE 1964 LINE 101241 define pcodeop vfmadd231sd_avx512f ; -:VFMADD231SD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xB9; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VFMADD231SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xB9; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfmadd231sd_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vfmadd231sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -8092,10 +8092,10 @@ define pcodeop vfmadd231sd_avx512f ; # VFMADD132SS/VFMADD213SS/VFMADD231SS 5-143 PAGE 1967 LINE 101403 define pcodeop vfmadd132ss_avx512f ; -:VFMADD132SS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x99; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VFMADD132SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x99; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfmadd132ss_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vfmadd132ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -8103,10 +8103,10 @@ define pcodeop vfmadd132ss_avx512f ; # VFMADD132SS/VFMADD213SS/VFMADD231SS 5-143 PAGE 1967 LINE 101406 define pcodeop vfmadd213ss_avx512f ; -:VFMADD213SS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xA9; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VFMADD213SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xA9; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfmadd213ss_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vfmadd213ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -8114,10 +8114,10 @@ define pcodeop vfmadd213ss_avx512f ; # VFMADD132SS/VFMADD213SS/VFMADD231SS 5-143 PAGE 1967 LINE 101409 define pcodeop vfmadd231ss_avx512f ; -:VFMADD231SS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xB9; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VFMADD231SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xB9; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfmadd231ss_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vfmadd231ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -8125,10 +8125,10 @@ define pcodeop vfmadd231ss_avx512f ; # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-146 PAGE 1970 LINE 101585 define pcodeop vfmaddsub213pd_avx512vl ; -:VFMADDSUB213PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xA6; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFMADDSUB213PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xA6; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmaddsub213pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfmaddsub213pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -8136,10 +8136,10 @@ define pcodeop vfmaddsub213pd_avx512vl ; # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-146 PAGE 1970 LINE 101589 define pcodeop vfmaddsub231pd_avx512vl ; -:VFMADDSUB231PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xB6; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFMADDSUB231PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xB6; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmaddsub231pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfmaddsub231pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -8147,40 +8147,40 @@ define pcodeop vfmaddsub231pd_avx512vl ; # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-146 PAGE 1970 LINE 101593 define pcodeop vfmaddsub132pd_avx512vl ; -:VFMADDSUB132PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x96; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFMADDSUB132PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x96; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmaddsub132pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfmaddsub132pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-146 PAGE 1970 LINE 101597 -:VFMADDSUB213PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xA6; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFMADDSUB213PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xA6; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmaddsub213pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfmaddsub213pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-146 PAGE 1970 LINE 101601 -:VFMADDSUB231PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xB6; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFMADDSUB231PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xB6; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmaddsub231pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfmaddsub231pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-146 PAGE 1970 LINE 101605 -:VFMADDSUB132PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x96; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFMADDSUB132PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x96; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmaddsub132pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfmaddsub132pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -8221,10 +8221,10 @@ define pcodeop vfmaddsub132pd_avx512f ; # VFMADDSUB132PS/VFMADDSUB213PS/VFMADDSUB231PS 5-156 PAGE 1980 LINE 102024 define pcodeop vfmaddsub213ps_avx512vl ; -:VFMADDSUB213PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xA6; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFMADDSUB213PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xA6; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmaddsub213ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfmaddsub213ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -8232,10 +8232,10 @@ define pcodeop vfmaddsub213ps_avx512vl ; # VFMADDSUB132PS/VFMADDSUB213PS/VFMADDSUB231PS 5-156 PAGE 1980 LINE 102028 define pcodeop vfmaddsub231ps_avx512vl ; -:VFMADDSUB231PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xB6; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFMADDSUB231PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xB6; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmaddsub231ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfmaddsub231ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -8243,40 +8243,40 @@ define pcodeop vfmaddsub231ps_avx512vl ; # VFMADDSUB132PS/VFMADDSUB213PS/VFMADDSUB231PS 5-156 PAGE 1980 LINE 102031 define pcodeop vfmaddsub132ps_avx512vl ; -:VFMADDSUB132PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x96; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFMADDSUB132PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x96; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmaddsub132ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfmaddsub132ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMADDSUB132PS/VFMADDSUB213PS/VFMADDSUB231PS 5-156 PAGE 1980 LINE 102034 -:VFMADDSUB213PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xA6; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFMADDSUB213PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xA6; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmaddsub213ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfmaddsub213ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMADDSUB132PS/VFMADDSUB213PS/VFMADDSUB231PS 5-156 PAGE 1980 LINE 102038 -:VFMADDSUB231PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xB6; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFMADDSUB231PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xB6; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmaddsub231ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfmaddsub231ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMADDSUB132PS/VFMADDSUB213PS/VFMADDSUB231PS 5-156 PAGE 1980 LINE 102041 -:VFMADDSUB132PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x96; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFMADDSUB132PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x96; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmaddsub132ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfmaddsub132ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -8317,10 +8317,10 @@ define pcodeop vfmaddsub132ps_avx512f ; # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-165 PAGE 1989 LINE 102454 define pcodeop vfmsubadd132pd_avx512vl ; -:VFMSUBADD132PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x97; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFMSUBADD132PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x97; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmsubadd132pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfmsubadd132pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -8328,10 +8328,10 @@ define pcodeop vfmsubadd132pd_avx512vl ; # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-165 PAGE 1989 LINE 102458 define pcodeop vfmsubadd213pd_avx512vl ; -:VFMSUBADD213PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xA7; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFMSUBADD213PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xA7; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmsubadd213pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfmsubadd213pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -8339,40 +8339,40 @@ define pcodeop vfmsubadd213pd_avx512vl ; # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-165 PAGE 1989 LINE 102462 define pcodeop vfmsubadd231pd_avx512vl ; -:VFMSUBADD231PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xB7; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFMSUBADD231PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xB7; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmsubadd231pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfmsubadd231pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-165 PAGE 1989 LINE 102466 -:VFMSUBADD132PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x97; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFMSUBADD132PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x97; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmsubadd132pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfmsubadd132pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-165 PAGE 1989 LINE 102470 -:VFMSUBADD213PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xA7; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFMSUBADD213PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xA7; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmsubadd213pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfmsubadd213pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-165 PAGE 1989 LINE 102474 -:VFMSUBADD231PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xB7; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFMSUBADD231PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xB7; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmsubadd231pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfmsubadd231pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -8413,10 +8413,10 @@ define pcodeop vfmsubadd231pd_avx512f ; # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-175 PAGE 1999 LINE 102894 define pcodeop vfmsubadd132ps_avx512vl ; -:VFMSUBADD132PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x97; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFMSUBADD132PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x97; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmsubadd132ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfmsubadd132ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -8424,10 +8424,10 @@ define pcodeop vfmsubadd132ps_avx512vl ; # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-175 PAGE 1999 LINE 102897 define pcodeop vfmsubadd213ps_avx512vl ; -:VFMSUBADD213PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xA7; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFMSUBADD213PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xA7; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmsubadd213ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfmsubadd213ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -8435,40 +8435,40 @@ define pcodeop vfmsubadd213ps_avx512vl ; # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-175 PAGE 1999 LINE 102901 define pcodeop vfmsubadd231ps_avx512vl ; -:VFMSUBADD231PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xB7; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFMSUBADD231PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xB7; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmsubadd231ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfmsubadd231ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-175 PAGE 1999 LINE 102904 -:VFMSUBADD132PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x97; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFMSUBADD132PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x97; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmsubadd132ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfmsubadd132ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-175 PAGE 1999 LINE 102907 -:VFMSUBADD213PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xA7; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFMSUBADD213PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xA7; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmsubadd213ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfmsubadd213ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-175 PAGE 1999 LINE 102911 -:VFMSUBADD231PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xB7; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFMSUBADD231PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xB7; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmsubadd231ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfmsubadd231ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -8509,10 +8509,10 @@ define pcodeop vfmsubadd231ps_avx512f ; # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-185 PAGE 2009 LINE 103332 define pcodeop vfmsub132pd_avx512vl ; -:VFMSUB132PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x9A; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFMSUB132PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x9A; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmsub132pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfmsub132pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -8520,10 +8520,10 @@ define pcodeop vfmsub132pd_avx512vl ; # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-185 PAGE 2009 LINE 103335 define pcodeop vfmsub213pd_avx512vl ; -:VFMSUB213PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xAA; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFMSUB213PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xAA; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmsub213pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfmsub213pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -8531,40 +8531,40 @@ define pcodeop vfmsub213pd_avx512vl ; # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-185 PAGE 2009 LINE 103338 define pcodeop vfmsub231pd_avx512vl ; -:VFMSUB231PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xBA; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFMSUB231PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xBA; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmsub231pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfmsub231pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-185 PAGE 2009 LINE 103341 -:VFMSUB132PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x9A; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFMSUB132PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x9A; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmsub132pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfmsub132pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-185 PAGE 2009 LINE 103344 -:VFMSUB213PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xAA; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFMSUB213PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xAA; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmsub213pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfmsub213pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-185 PAGE 2009 LINE 103347 -:VFMSUB231PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xBA; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFMSUB231PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xBA; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmsub231pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfmsub231pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -8605,10 +8605,10 @@ define pcodeop vfmsub231pd_avx512f ; # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-192 PAGE 2016 LINE 103692 define pcodeop vfmsub132ps_avx512vl ; -:VFMSUB132PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x9A; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFMSUB132PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x9A; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmsub132ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfmsub132ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -8616,10 +8616,10 @@ define pcodeop vfmsub132ps_avx512vl ; # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-192 PAGE 2016 LINE 103695 define pcodeop vfmsub213ps_avx512vl ; -:VFMSUB213PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xAA; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFMSUB213PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xAA; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmsub213ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfmsub213ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -8627,40 +8627,40 @@ define pcodeop vfmsub213ps_avx512vl ; # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-192 PAGE 2016 LINE 103698 define pcodeop vfmsub231ps_avx512vl ; -:VFMSUB231PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xBA; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFMSUB231PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xBA; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfmsub231ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfmsub231ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-192 PAGE 2016 LINE 103701 -:VFMSUB132PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x9A; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFMSUB132PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x9A; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmsub132ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfmsub132ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-192 PAGE 2016 LINE 103704 -:VFMSUB213PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xAA; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFMSUB213PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xAA; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmsub213ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfmsub213ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-192 PAGE 2016 LINE 103707 -:VFMSUB231PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xBA; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFMSUB231PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xBA; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfmsub231ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfmsub231ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -8701,10 +8701,10 @@ define pcodeop vfmsub231ps_avx512f ; # VFMSUB132SD/VFMSUB213SD/VFMSUB231SD 5-199 PAGE 2023 LINE 104042 define pcodeop vfmsub132sd_avx512f ; -:VFMSUB132SD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x9B; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VFMSUB132SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x9B; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfmsub132sd_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vfmsub132sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -8712,10 +8712,10 @@ define pcodeop vfmsub132sd_avx512f ; # VFMSUB132SD/VFMSUB213SD/VFMSUB231SD 5-199 PAGE 2023 LINE 104045 define pcodeop vfmsub213sd_avx512f ; -:VFMSUB213SD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xAB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VFMSUB213SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xAB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfmsub213sd_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vfmsub213sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -8723,10 +8723,10 @@ define pcodeop vfmsub213sd_avx512f ; # VFMSUB132SD/VFMSUB213SD/VFMSUB231SD 5-199 PAGE 2023 LINE 104048 define pcodeop vfmsub231sd_avx512f ; -:VFMSUB231SD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xBB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VFMSUB231SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xBB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfmsub231sd_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vfmsub231sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -8734,10 +8734,10 @@ define pcodeop vfmsub231sd_avx512f ; # VFMSUB132SS/VFMSUB213SS/VFMSUB231SS 5-202 PAGE 2026 LINE 104217 define pcodeop vfmsub132ss_avx512f ; -:VFMSUB132SS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x9B; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VFMSUB132SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x9B; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfmsub132ss_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vfmsub132ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -8745,10 +8745,10 @@ define pcodeop vfmsub132ss_avx512f ; # VFMSUB132SS/VFMSUB213SS/VFMSUB231SS 5-202 PAGE 2026 LINE 104220 define pcodeop vfmsub213ss_avx512f ; -:VFMSUB213SS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xAB; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VFMSUB213SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xAB; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfmsub213ss_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vfmsub213ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -8756,10 +8756,10 @@ define pcodeop vfmsub213ss_avx512f ; # VFMSUB132SS/VFMSUB213SS/VFMSUB231SS 5-202 PAGE 2026 LINE 104223 define pcodeop vfmsub231ss_avx512f ; -:VFMSUB231SS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xBB; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VFMSUB231SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xBB; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfmsub231ss_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vfmsub231ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -8767,10 +8767,10 @@ define pcodeop vfmsub231ss_avx512f ; # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-205 PAGE 2029 LINE 104401 define pcodeop vfnmadd132pd_avx512vl ; -:VFNMADD132PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x9C; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFNMADD132PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x9C; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfnmadd132pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfnmadd132pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -8778,10 +8778,10 @@ define pcodeop vfnmadd132pd_avx512vl ; # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-205 PAGE 2029 LINE 104405 define pcodeop vfnmadd213pd_avx512vl ; -:VFNMADD213PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xAC; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFNMADD213PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xAC; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfnmadd213pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfnmadd213pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -8789,40 +8789,40 @@ define pcodeop vfnmadd213pd_avx512vl ; # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-205 PAGE 2029 LINE 104408 define pcodeop vfnmadd231pd_avx512vl ; -:VFNMADD231PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xBC; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFNMADD231PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xBC; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfnmadd231pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfnmadd231pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-205 PAGE 2029 LINE 104412 -:VFNMADD132PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x9C; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFNMADD132PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x9C; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfnmadd132pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfnmadd132pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-205 PAGE 2029 LINE 104416 -:VFNMADD213PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xAC; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFNMADD213PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xAC; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfnmadd213pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfnmadd213pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-205 PAGE 2029 LINE 104419 -:VFNMADD231PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xBC; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFNMADD231PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xBC; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfnmadd231pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfnmadd231pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -8863,10 +8863,10 @@ define pcodeop vfnmadd231pd_avx512f ; # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-212 PAGE 2036 LINE 104760 define pcodeop vfnmadd132ps_avx512vl ; -:VFNMADD132PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x9C; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFNMADD132PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x9C; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfnmadd132ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfnmadd132ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -8874,10 +8874,10 @@ define pcodeop vfnmadd132ps_avx512vl ; # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-212 PAGE 2036 LINE 104763 define pcodeop vfnmadd213ps_avx512vl ; -:VFNMADD213PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xAC; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFNMADD213PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xAC; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfnmadd213ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfnmadd213ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -8885,40 +8885,40 @@ define pcodeop vfnmadd213ps_avx512vl ; # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-212 PAGE 2036 LINE 104766 define pcodeop vfnmadd231ps_avx512vl ; -:VFNMADD231PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xBC; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFNMADD231PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xBC; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfnmadd231ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfnmadd231ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-212 PAGE 2036 LINE 104769 -:VFNMADD132PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x9C; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFNMADD132PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x9C; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfnmadd132ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfnmadd132ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-212 PAGE 2036 LINE 104772 -:VFNMADD213PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xAC; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFNMADD213PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xAC; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfnmadd213ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfnmadd213ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-212 PAGE 2036 LINE 104775 -:VFNMADD231PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xBC; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFNMADD231PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xBC; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfnmadd231ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfnmadd231ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -8958,10 +8958,10 @@ define pcodeop vfnmadd231ps_avx512f ; # VFNMADD132SD/VFNMADD213SD/VFNMADD231SD 5-218 PAGE 2042 LINE 105098 define pcodeop vfnmadd132sd_avx512f ; -:VFNMADD132SD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x9D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VFNMADD132SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x9D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfnmadd132sd_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vfnmadd132sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -8969,10 +8969,10 @@ define pcodeop vfnmadd132sd_avx512f ; # VFNMADD132SD/VFNMADD213SD/VFNMADD231SD 5-218 PAGE 2042 LINE 105101 define pcodeop vfnmadd213sd_avx512f ; -:VFNMADD213SD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xAD; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VFNMADD213SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xAD; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfnmadd213sd_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vfnmadd213sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -8980,10 +8980,10 @@ define pcodeop vfnmadd213sd_avx512f ; # VFNMADD132SD/VFNMADD213SD/VFNMADD231SD 5-218 PAGE 2042 LINE 105104 define pcodeop vfnmadd231sd_avx512f ; -:VFNMADD231SD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xBD; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VFNMADD231SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xBD; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfnmadd231sd_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vfnmadd231sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -8991,10 +8991,10 @@ define pcodeop vfnmadd231sd_avx512f ; # VFNMADD132SS/VFNMADD213SS/VFNMADD231SS 5-221 PAGE 2045 LINE 105270 define pcodeop vfnmadd132ss_avx512f ; -:VFNMADD132SS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x9D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VFNMADD132SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x9D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfnmadd132ss_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vfnmadd132ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -9002,10 +9002,10 @@ define pcodeop vfnmadd132ss_avx512f ; # VFNMADD132SS/VFNMADD213SS/VFNMADD231SS 5-221 PAGE 2045 LINE 105273 define pcodeop vfnmadd213ss_avx512f ; -:VFNMADD213SS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xAD; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VFNMADD213SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xAD; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfnmadd213ss_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vfnmadd213ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -9013,10 +9013,10 @@ define pcodeop vfnmadd213ss_avx512f ; # VFNMADD132SS/VFNMADD213SS/VFNMADD231SS 5-221 PAGE 2045 LINE 105276 define pcodeop vfnmadd231ss_avx512f ; -:VFNMADD231SS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xBD; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VFNMADD231SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xBD; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfnmadd231ss_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vfnmadd231ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -9024,10 +9024,10 @@ define pcodeop vfnmadd231ss_avx512f ; # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-224 PAGE 2048 LINE 105451 define pcodeop vfnmsub132pd_avx512vl ; -:VFNMSUB132PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x9E; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFNMSUB132PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x9E; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfnmsub132pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfnmsub132pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -9035,10 +9035,10 @@ define pcodeop vfnmsub132pd_avx512vl ; # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-224 PAGE 2048 LINE 105455 define pcodeop vfnmsub213pd_avx512vl ; -:VFNMSUB213PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xAE; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFNMSUB213PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xAE; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfnmsub213pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfnmsub213pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -9046,40 +9046,40 @@ define pcodeop vfnmsub213pd_avx512vl ; # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-224 PAGE 2048 LINE 105458 define pcodeop vfnmsub231pd_avx512vl ; -:VFNMSUB231PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xBE; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VFNMSUB231PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xBE; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfnmsub231pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vfnmsub231pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-224 PAGE 2048 LINE 105462 -:VFNMSUB132PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x9E; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFNMSUB132PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x9E; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfnmsub132pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfnmsub132pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-224 PAGE 2048 LINE 105466 -:VFNMSUB213PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xAE; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFNMSUB213PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xAE; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfnmsub213pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfnmsub213pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-224 PAGE 2048 LINE 105469 -:VFNMSUB231PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xBE; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VFNMSUB231PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xBE; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfnmsub231pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vfnmsub231pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -9120,10 +9120,10 @@ define pcodeop vfnmsub231pd_avx512f ; # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-230 PAGE 2054 LINE 105800 define pcodeop vfnmsub132ps_avx512vl ; -:VFNMSUB132PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x9E; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFNMSUB132PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x9E; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfnmsub132ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfnmsub132ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -9131,10 +9131,10 @@ define pcodeop vfnmsub132ps_avx512vl ; # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-230 PAGE 2054 LINE 105803 define pcodeop vfnmsub213ps_avx512vl ; -:VFNMSUB213PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xAE; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFNMSUB213PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xAE; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfnmsub213ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfnmsub213ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -9142,40 +9142,40 @@ define pcodeop vfnmsub213ps_avx512vl ; # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-230 PAGE 2054 LINE 105806 define pcodeop vfnmsub231ps_avx512vl ; -:VFNMSUB231PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xBE; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VFNMSUB231PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xBE; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vfnmsub231ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vfnmsub231ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-230 PAGE 2054 LINE 105809 -:VFNMSUB132PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x9E; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFNMSUB132PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x9E; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfnmsub132ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfnmsub132ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-230 PAGE 2054 LINE 105812 -:VFNMSUB213PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xAE; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFNMSUB213PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xAE; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfnmsub213ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfnmsub213ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-230 PAGE 2054 LINE 105815 -:VFNMSUB231PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xBE; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VFNMSUB231PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xBE; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vfnmsub231ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vfnmsub231ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -9216,10 +9216,10 @@ define pcodeop vfnmsub231ps_avx512f ; # VFNMSUB132SD/VFNMSUB213SD/VFNMSUB231SD 5-236 PAGE 2060 LINE 106135 define pcodeop vfnmsub132sd_avx512f ; -:VFNMSUB132SD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x9F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VFNMSUB132SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x9F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfnmsub132sd_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vfnmsub132sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -9227,10 +9227,10 @@ define pcodeop vfnmsub132sd_avx512f ; # VFNMSUB132SD/VFNMSUB213SD/VFNMSUB231SD 5-236 PAGE 2060 LINE 106138 define pcodeop vfnmsub213sd_avx512f ; -:VFNMSUB213SD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xAF; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VFNMSUB213SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xAF; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfnmsub213sd_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vfnmsub213sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -9238,10 +9238,10 @@ define pcodeop vfnmsub213sd_avx512f ; # VFNMSUB132SD/VFNMSUB213SD/VFNMSUB231SD 5-236 PAGE 2060 LINE 106141 define pcodeop vfnmsub231sd_avx512f ; -:VFNMSUB231SD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xBF; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VFNMSUB231SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xBF; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfnmsub231sd_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vfnmsub231sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -9249,10 +9249,10 @@ define pcodeop vfnmsub231sd_avx512f ; # VFNMSUB132SS/VFNMSUB213SS/VFNMSUB231SS 5-239 PAGE 2063 LINE 106307 define pcodeop vfnmsub132ss_avx512f ; -:VFNMSUB132SS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x9F; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VFNMSUB132SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x9F; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfnmsub132ss_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vfnmsub132ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -9260,10 +9260,10 @@ define pcodeop vfnmsub132ss_avx512f ; # VFNMSUB132SS/VFNMSUB213SS/VFNMSUB231SS 5-239 PAGE 2063 LINE 106310 define pcodeop vfnmsub213ss_avx512f ; -:VFNMSUB213SS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xAF; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VFNMSUB213SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xAF; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfnmsub213ss_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vfnmsub213ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -9271,10 +9271,10 @@ define pcodeop vfnmsub213ss_avx512f ; # VFNMSUB132SS/VFNMSUB213SS/VFNMSUB231SS 5-239 PAGE 2063 LINE 106313 define pcodeop vfnmsub231ss_avx512f ; -:VFNMSUB231SS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xBF; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VFNMSUB231SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xBF; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vfnmsub231ss_avx512f( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vfnmsub231ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -9770,10 +9770,10 @@ define pcodeop vgetexpps_avx512f ; # VGETEXPSD 5-295 PAGE 2119 LINE 108959 define pcodeop vgetexpsd_avx512f ; -:VGETEXPSD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x43; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VGETEXPSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x43; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vgetexpsd_avx512f( vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vgetexpsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -9781,10 +9781,10 @@ define pcodeop vgetexpsd_avx512f ; # VGETEXPSS 5-297 PAGE 2121 LINE 109037 define pcodeop vgetexpss_avx512f ; -:VGETEXPSS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x43; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VGETEXPSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x43; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vgetexpss_avx512f( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vgetexpss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -9856,10 +9856,10 @@ define pcodeop vgetmantps_avx512f ; # VGETMANTSD 5-306 PAGE 2130 LINE 109519 define pcodeop vgetmantsd_avx512f ; -:VGETMANTSD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x27; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VGETMANTSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x27; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vgetmantsd_avx512f( vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vgetmantsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -9867,10 +9867,10 @@ define pcodeop vgetmantsd_avx512f ; # VGETMANTSS 5-308 PAGE 2132 LINE 109610 define pcodeop vgetmantss_avx512f ; -:VGETMANTSS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x27; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VGETMANTSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x27; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vgetmantss_avx512f( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vgetmantss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -9878,10 +9878,10 @@ define pcodeop vgetmantss_avx512f ; # VINSERTF128/VINSERTF32x4/VINSERTF64x2/VINSERTF32x8/VINSERTF64x4 5-310 PAGE 2134 LINE 109706 define pcodeop vinsertf32x4_avx512vl ; -:VINSERTF32X4 YmmReg1^YmmOpMask32, vexVVVV_YmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x18; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128; imm8 +:VINSERTF32X4 YmmReg1^YmmOpMask32, evexV5_YmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x18; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { - YmmResult = vinsertf32x4_avx512vl( vexVVVV_YmmReg, XmmReg2_m128, imm8:1 ); + YmmResult = vinsertf32x4_avx512vl( evexV5_YmmReg, XmmReg2_m128, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -9900,10 +9900,10 @@ define pcodeop vinsertf32x4_avx512f ; # VINSERTF128/VINSERTF32x4/VINSERTF64x2/VINSERTF32x8/VINSERTF64x4 5-310 PAGE 2134 LINE 109712 define pcodeop vinsertf64x2_avx512vl ; -:VINSERTF64X2 YmmReg1^YmmOpMask64, vexVVVV_YmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x18; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128; imm8 +:VINSERTF64X2 YmmReg1^YmmOpMask64, evexV5_YmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x18; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { - YmmResult = vinsertf64x2_avx512vl( vexVVVV_YmmReg, XmmReg2_m128, imm8:1 ); + YmmResult = vinsertf64x2_avx512vl( evexV5_YmmReg, XmmReg2_m128, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -9944,10 +9944,10 @@ define pcodeop vinsertf64x4_avx512f ; # VINSERTI128/VINSERTI32x4/VINSERTI64x2/VINSERTI32x8/VINSERTI64x4 5-314 PAGE 2138 LINE 109930 define pcodeop vinserti32x4_avx512vl ; -:VINSERTI32X4 YmmReg1^YmmOpMask32, vexVVVV_YmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x38; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128; imm8 +:VINSERTI32X4 YmmReg1^YmmOpMask32, evexV5_YmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x38; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { - YmmResult = vinserti32x4_avx512vl( vexVVVV_YmmReg, XmmReg2_m128, imm8:1 ); + YmmResult = vinserti32x4_avx512vl( evexV5_YmmReg, XmmReg2_m128, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -9966,10 +9966,10 @@ define pcodeop vinserti32x4_avx512f ; # VINSERTI128/VINSERTI32x4/VINSERTI64x2/VINSERTI32x8/VINSERTI64x4 5-314 PAGE 2138 LINE 109936 define pcodeop vinserti64x2_avx512vl ; -:VINSERTI64X2 YmmReg1^YmmOpMask64, vexVVVV_YmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x38; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128; imm8 +:VINSERTI64X2 YmmReg1^YmmOpMask64, evexV5_YmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x38; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { - YmmResult = vinserti64x2_avx512vl( vexVVVV_YmmReg, XmmReg2_m128, imm8:1 ); + YmmResult = vinserti64x2_avx512vl( evexV5_YmmReg, XmmReg2_m128, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -10010,20 +10010,20 @@ define pcodeop vinserti64x4_avx512f ; # VPBLENDMB/VPBLENDMW 5-323 PAGE 2147 LINE 110393 define pcodeop vpblendmb_avx512vl ; -:VPBLENDMB XmmReg1^XmmOpMask8, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x66; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 +:VPBLENDMB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x66; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpblendmb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpblendmb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # VPBLENDMB/VPBLENDMW 5-323 PAGE 2147 LINE 110396 -:VPBLENDMB YmmReg1^YmmOpMask8, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x66; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 +:VPBLENDMB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x66; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpblendmb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpblendmb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); @@ -10042,20 +10042,20 @@ define pcodeop vpblendmb_avx512bw ; # VPBLENDMB/VPBLENDMW 5-323 PAGE 2147 LINE 110402 define pcodeop vpblendmw_avx512vl ; -:VPBLENDMW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x66; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPBLENDMW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x66; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpblendmw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpblendmw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPBLENDMB/VPBLENDMW 5-323 PAGE 2147 LINE 110405 -:VPBLENDMW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x66; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPBLENDMW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x66; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpblendmw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpblendmw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -10074,20 +10074,20 @@ define pcodeop vpblendmw_avx512bw ; # VPBLENDMD/VPBLENDMQ 5-325 PAGE 2149 LINE 110495 define pcodeop vpblendmd_avx512vl ; -:VPBLENDMD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x64; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPBLENDMD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x64; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpblendmd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpblendmd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPBLENDMD/VPBLENDMQ 5-325 PAGE 2149 LINE 110498 -:VPBLENDMD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x64; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPBLENDMD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x64; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpblendmd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpblendmd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -10106,20 +10106,20 @@ define pcodeop vpblendmd_avx512f ; # VPBLENDMD/VPBLENDMQ 5-325 PAGE 2149 LINE 110504 define pcodeop vpblendmq_avx512vl ; -:VPBLENDMQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x64; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPBLENDMQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x64; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpblendmq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpblendmq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPBLENDMD/VPBLENDMQ 5-325 PAGE 2149 LINE 110507 -:VPBLENDMQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x64; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPBLENDMQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x64; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpblendmq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpblendmq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -10502,18 +10502,18 @@ define pcodeop vbroadcasti64x4_avx512f ; # VPCMPB/VPCMPUB 5-339 PAGE 2163 LINE 111259 define pcodeop vpcmpb_avx512vl ; -:VPCMPB KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & vexVVVV_XmmReg; byte=0x3F; KReg_reg ... & XmmReg2_m128 +:VPCMPB KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x3F; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vpcmpb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + local tmp = vpcmpb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # VPCMPB/VPCMPUB 5-339 PAGE 2163 LINE 111263 -:VPCMPB KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & vexVVVV_YmmReg; byte=0x3F; KReg_reg ... & YmmReg2_m256 +:VPCMPB KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x3F; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vpcmpb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + local tmp = vpcmpb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } @@ -10528,18 +10528,18 @@ define pcodeop vpcmpb_avx512bw ; # VPCMPB/VPCMPUB 5-339 PAGE 2163 LINE 111271 define pcodeop vpcmpub_avx512vl ; -:VPCMPUB KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & vexVVVV_XmmReg; byte=0x3E; KReg_reg ... & XmmReg2_m128 +:VPCMPUB KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x3E; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vpcmpub_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + local tmp = vpcmpub_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # VPCMPB/VPCMPUB 5-339 PAGE 2163 LINE 111275 -:VPCMPUB KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & vexVVVV_YmmReg; byte=0x3E; KReg_reg ... & YmmReg2_m256 +:VPCMPUB KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x3E; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vpcmpub_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + local tmp = vpcmpub_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } @@ -10555,18 +10555,18 @@ define pcodeop vpcmpub_avx512bw ; # VPCMPD/VPCMPUD 5-342 PAGE 2166 LINE 111422 define pcodeop vpcmpd_avx512vl ; -:VPCMPD KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & vexVVVV_XmmReg; byte=0x1F; KReg_reg ... & XmmReg2_m128_m32bcst; imm8 +:VPCMPD KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x1F; KReg_reg ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vpcmpd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); + local tmp = vpcmpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # VPCMPD/VPCMPUD 5-342 PAGE 2166 LINE 111426 -:VPCMPD KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & vexVVVV_YmmReg; byte=0x1F; KReg_reg ... & YmmReg2_m256_m32bcst; imm8 +:VPCMPD KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x1F; KReg_reg ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vpcmpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); + local tmp = vpcmpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } @@ -10581,18 +10581,18 @@ define pcodeop vpcmpd_avx512f ; # VPCMPD/VPCMPUD 5-342 PAGE 2166 LINE 111434 define pcodeop vpcmpud_avx512vl ; -:VPCMPUD KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & vexVVVV_XmmReg; byte=0x1E; KReg_reg ... & XmmReg2_m128_m32bcst; imm8 +:VPCMPUD KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x1E; KReg_reg ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vpcmpud_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); + local tmp = vpcmpud_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # VPCMPD/VPCMPUD 5-342 PAGE 2166 LINE 111438 -:VPCMPUD KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & vexVVVV_YmmReg; byte=0x1E; KReg_reg ... & YmmReg2_m256_m32bcst; imm8 +:VPCMPUD KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x1E; KReg_reg ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vpcmpud_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); + local tmp = vpcmpud_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } @@ -10607,18 +10607,18 @@ define pcodeop vpcmpud_avx512f ; # VPCMPQ/VPCMPUQ 5-345 PAGE 2169 LINE 111573 define pcodeop vpcmpq_avx512vl ; -:VPCMPQ KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & vexVVVV_XmmReg; byte=0x1F; KReg_reg ... & XmmReg2_m128_m64bcst; imm8 +:VPCMPQ KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x1F; KReg_reg ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vpcmpq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); + local tmp = vpcmpq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); KReg_reg = zext(AVXOpMask[0,2]) & tmp; } # VPCMPQ/VPCMPUQ 5-345 PAGE 2169 LINE 111577 -:VPCMPQ KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & vexVVVV_YmmReg; byte=0x1F; KReg_reg ... & YmmReg2_m256_m64bcst; imm8 +:VPCMPQ KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x1F; KReg_reg ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vpcmpq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); + local tmp = vpcmpq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } @@ -10633,18 +10633,18 @@ define pcodeop vpcmpq_avx512f ; # VPCMPQ/VPCMPUQ 5-345 PAGE 2169 LINE 111585 define pcodeop vpcmpuq_avx512vl ; -:VPCMPUQ KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & vexVVVV_XmmReg; byte=0x1E; KReg_reg ... & XmmReg2_m128_m64bcst; imm8 +:VPCMPUQ KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x1E; KReg_reg ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vpcmpuq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); + local tmp = vpcmpuq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); KReg_reg = zext(AVXOpMask[0,2]) & tmp; } # VPCMPQ/VPCMPUQ 5-345 PAGE 2169 LINE 111589 -:VPCMPUQ KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & vexVVVV_YmmReg; byte=0x1E; KReg_reg ... & YmmReg2_m256_m64bcst; imm8 +:VPCMPUQ KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x1E; KReg_reg ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vpcmpuq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); + local tmp = vpcmpuq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } @@ -10659,18 +10659,18 @@ define pcodeop vpcmpuq_avx512f ; # VPCMPW/VPCMPUW 5-348 PAGE 2172 LINE 111724 define pcodeop vpcmpw_avx512vl ; -:VPCMPW KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & vexVVVV_XmmReg; byte=0x3F; KReg_reg ... & XmmReg2_m128 +:VPCMPW KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x3F; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vpcmpw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + local tmp = vpcmpw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # VPCMPW/VPCMPUW 5-348 PAGE 2172 LINE 111728 -:VPCMPW KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & vexVVVV_YmmReg; byte=0x3F; KReg_reg ... & YmmReg2_m256 +:VPCMPW KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x3F; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vpcmpw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + local tmp = vpcmpw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } @@ -10685,18 +10685,18 @@ define pcodeop vpcmpw_avx512bw ; # VPCMPW/VPCMPUW 5-348 PAGE 2172 LINE 111736 define pcodeop vpcmpuw_avx512vl ; -:VPCMPUW KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & vexVVVV_XmmReg; byte=0x3E; KReg_reg ... & XmmReg2_m128 +:VPCMPUW KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x3E; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vpcmpuw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + local tmp = vpcmpuw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # VPCMPW/VPCMPUW 5-348 PAGE 2172 LINE 111740 -:VPCMPUW KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & vexVVVV_YmmReg; byte=0x3E; KReg_reg ... & YmmReg2_m256 +:VPCMPUW KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x3E; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vpcmpuw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + local tmp = vpcmpuw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } @@ -10875,10 +10875,10 @@ define pcodeop vpconflictq_avx512cd ; # VPERMD/VPERMW 5-362 PAGE 2186 LINE 112407 define pcodeop vpermd_avx512vl ; -:VPERMD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x36; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPERMD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x36; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpermd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpermd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -10897,20 +10897,20 @@ define pcodeop vpermd_avx512f ; # VPERMD/VPERMW 5-362 PAGE 2186 LINE 112413 define pcodeop vpermw_avx512vl ; -:VPERMW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x8D; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPERMW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x8D; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpermw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpermw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPERMD/VPERMW 5-362 PAGE 2186 LINE 112417 -:VPERMW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x8D; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPERMW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x8D; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpermw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpermw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -10929,20 +10929,20 @@ define pcodeop vpermw_avx512bw ; # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112553 define pcodeop vpermi2w_avx512vl ; -:VPERMI2W XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x75; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPERMI2W XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x75; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpermi2w_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpermi2w_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112556 -:VPERMI2W YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x75; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPERMI2W YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x75; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpermi2w_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpermi2w_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -10961,20 +10961,20 @@ define pcodeop vpermi2w_avx512bw ; # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112562 define pcodeop vpermi2d_avx512vl ; -:VPERMI2D XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x76; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPERMI2D XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x76; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpermi2d_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpermi2d_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112566 -:VPERMI2D YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x76; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPERMI2D YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x76; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpermi2d_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpermi2d_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -10993,20 +10993,20 @@ define pcodeop vpermi2d_avx512f ; # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112574 define pcodeop vpermi2q_avx512vl ; -:VPERMI2Q XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x76; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPERMI2Q XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x76; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpermi2q_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpermi2q_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112578 -:VPERMI2Q YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x76; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPERMI2Q YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x76; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpermi2q_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpermi2q_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -11025,20 +11025,20 @@ define pcodeop vpermi2q_avx512f ; # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112586 define pcodeop vpermi2ps_avx512vl ; -:VPERMI2PS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x77; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPERMI2PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x77; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpermi2ps_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpermi2ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112590 -:VPERMI2PS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x77; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPERMI2PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x77; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpermi2ps_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpermi2ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -11057,20 +11057,20 @@ define pcodeop vpermi2ps_avx512f ; # VPERMI2W/D/Q/PS/PD 5-366 PAGE 2190 LINE 112610 define pcodeop vpermi2pd_avx512vl ; -:VPERMI2PD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x77; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPERMI2PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x77; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpermi2pd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpermi2pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPERMI2W/D/Q/PS/PD 5-366 PAGE 2190 LINE 112614 -:VPERMI2PD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x77; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPERMI2PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x77; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpermi2pd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpermi2pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -11089,20 +11089,20 @@ define pcodeop vpermi2pd_avx512f ; # VPERMILPD 5-371 PAGE 2195 LINE 112866 define pcodeop vpermilpd_avx512vl ; -:VPERMILPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x0D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPERMILPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x0D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { - XmmResult = vpermilpd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpermilpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPERMILPD 5-371 PAGE 2195 LINE 112869 -:VPERMILPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x0D; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPERMILPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x0D; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { - YmmResult = vpermilpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpermilpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -11151,20 +11151,20 @@ define pcodeop vpermilpd_avx512f ; # VPERMILPS 5-376 PAGE 2200 LINE 113170 define pcodeop vpermilps_avx512vl ; -:VPERMILPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x0C; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPERMILPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x0C; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { - XmmResult = vpermilps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpermilps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPERMILPS 5-376 PAGE 2200 LINE 113173 -:VPERMILPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x0C; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPERMILPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x0C; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { - YmmResult = vpermilps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpermilps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -11234,10 +11234,10 @@ define pcodeop vpermpd_avx512f ; } # VPERMPD 5-381 PAGE 2205 LINE 113462 -:VPERMPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x16; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPERMPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x16; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { - YmmResult = vpermpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpermpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -11255,10 +11255,10 @@ define pcodeop vpermpd_avx512f ; # VPERMPS 5-384 PAGE 2208 LINE 113636 define pcodeop vpermps_avx512vl ; -:VPERMPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x16; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPERMPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x16; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpermps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpermps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -11298,10 +11298,10 @@ define pcodeop vpermq_avx512f ; } # VPERMQ 5-387 PAGE 2211 LINE 113777 -:VPERMQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x36; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPERMQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x36; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { - YmmResult = vpermq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpermq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -11624,10 +11624,10 @@ define pcodeop vpmovq2m_avx512dq ; # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115959 define pcodeop vprolvd_avx512vl ; -:VPROLVD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x15; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPROLVD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x15; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { - XmmResult = vprolvd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vprolvd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -11635,21 +11635,21 @@ define pcodeop vprolvd_avx512vl ; # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115962 define pcodeop vprold_avx512vl ; -:VPROLD vexVVVV_XmmReg^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (vexVVVV_XmmReg & vexVVVV_ZmmReg) & XmmOpMask32; byte=0x72; reg_opcode=1 ... & XmmReg2_m128_m32bcst; imm8 +:VPROLD evexV5_XmmReg^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask32; byte=0x72; reg_opcode=1 ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { XmmResult = vprold_avx512vl( XmmReg2_m128_m32bcst, imm8:1 ); - XmmMask = vexVVVV_XmmReg; + XmmMask = evexV5_XmmReg; build XmmOpMask32; - vexVVVV_ZmmReg = zext(XmmResult); + evexV5_ZmmReg = zext(XmmResult); } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115965 define pcodeop vprolvq_avx512vl ; -:VPROLVQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x15; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPROLVQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x15; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { - XmmResult = vprolvq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vprolvq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -11657,53 +11657,53 @@ define pcodeop vprolvq_avx512vl ; # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115968 define pcodeop vprolq_avx512vl ; -:VPROLQ vexVVVV_XmmReg^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (vexVVVV_XmmReg & vexVVVV_ZmmReg) & XmmOpMask64; byte=0x72; reg_opcode=1 ... & XmmReg2_m128_m64bcst; imm8 +:VPROLQ evexV5_XmmReg^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask64; byte=0x72; reg_opcode=1 ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { XmmResult = vprolq_avx512vl( XmmReg2_m128_m64bcst, imm8:1 ); - XmmMask = vexVVVV_XmmReg; + XmmMask = evexV5_XmmReg; build XmmOpMask64; - vexVVVV_ZmmReg = zext(XmmResult); + evexV5_ZmmReg = zext(XmmResult); } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115971 -:VPROLVD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x15; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPROLVD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x15; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { - YmmResult = vprolvd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vprolvd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115974 -:VPROLD vexVVVV_YmmReg^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (vexVVVV_YmmReg & vexVVVV_ZmmReg) & YmmOpMask32; byte=0x72; reg_opcode=1 ... & YmmReg2_m256_m32bcst; imm8 +:VPROLD evexV5_YmmReg^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask32; byte=0x72; reg_opcode=1 ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { YmmResult = vprold_avx512vl( YmmReg2_m256_m32bcst, imm8:1 ); - YmmMask = vexVVVV_YmmReg; + YmmMask = evexV5_YmmReg; build YmmOpMask32; - vexVVVV_ZmmReg = zext(YmmResult); + evexV5_ZmmReg = zext(YmmResult); } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115977 -:VPROLVQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x15; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPROLVQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x15; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { - YmmResult = vprolvq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vprolvq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115980 -:VPROLQ vexVVVV_YmmReg^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (vexVVVV_YmmReg & vexVVVV_ZmmReg) & YmmOpMask64; byte=0x72; reg_opcode=1 ... & YmmReg2_m256_m64bcst; imm8 +:VPROLQ evexV5_YmmReg^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask64; byte=0x72; reg_opcode=1 ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { YmmResult = vprolq_avx512vl( YmmReg2_m256_m64bcst, imm8:1 ); - YmmMask = vexVVVV_YmmReg; + YmmMask = evexV5_YmmReg; build YmmOpMask64; - vexVVVV_ZmmReg = zext(YmmResult); + evexV5_ZmmReg = zext(YmmResult); } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115983 @@ -11752,10 +11752,10 @@ define pcodeop vprolq_avx512f ; # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116190 define pcodeop vprorvd_avx512vl ; -:VPRORVD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x14; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPRORVD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x14; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { - XmmResult = vprorvd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vprorvd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -11763,21 +11763,21 @@ define pcodeop vprorvd_avx512vl ; # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116194 define pcodeop vprord_avx512vl ; -:VPRORD vexVVVV_XmmReg^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (vexVVVV_XmmReg & vexVVVV_ZmmReg) & XmmOpMask32; byte=0x72; reg_opcode=0 ... & XmmReg2_m128_m32bcst; imm8 +:VPRORD evexV5_XmmReg^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask32; byte=0x72; reg_opcode=0 ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { XmmResult = vprord_avx512vl( XmmReg2_m128_m32bcst, imm8:1 ); - XmmMask = vexVVVV_XmmReg; + XmmMask = evexV5_XmmReg; build XmmOpMask32; - vexVVVV_ZmmReg = zext(XmmResult); + evexV5_ZmmReg = zext(XmmResult); } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116197 define pcodeop vprorvq_avx512vl ; -:VPRORVQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x14; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPRORVQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x14; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { - XmmResult = vprorvq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vprorvq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -11785,53 +11785,53 @@ define pcodeop vprorvq_avx512vl ; # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116200 define pcodeop vprorq_avx512vl ; -:VPRORQ vexVVVV_XmmReg^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (vexVVVV_XmmReg & vexVVVV_ZmmReg) & XmmOpMask64; byte=0x72; reg_opcode=0 ... & XmmReg2_m128_m64bcst; imm8 +:VPRORQ evexV5_XmmReg^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask64; byte=0x72; reg_opcode=0 ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { XmmResult = vprorq_avx512vl( XmmReg2_m128_m64bcst, imm8:1 ); - XmmMask = vexVVVV_XmmReg; + XmmMask = evexV5_XmmReg; build XmmOpMask64; - vexVVVV_ZmmReg = zext(XmmResult); + evexV5_ZmmReg = zext(XmmResult); } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116203 -:VPRORVD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x14; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPRORVD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x14; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { - YmmResult = vprorvd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vprorvd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116207 -:VPRORD vexVVVV_YmmReg^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (vexVVVV_YmmReg & vexVVVV_ZmmReg) & YmmOpMask32; byte=0x72; reg_opcode=0 ... & YmmReg2_m256_m32bcst; imm8 +:VPRORD evexV5_YmmReg^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask32; byte=0x72; reg_opcode=0 ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { YmmResult = vprord_avx512vl( YmmReg2_m256_m32bcst, imm8:1 ); - YmmMask = vexVVVV_YmmReg; + YmmMask = evexV5_YmmReg; build YmmOpMask32; - vexVVVV_ZmmReg = zext(YmmResult); + evexV5_ZmmReg = zext(YmmResult); } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116210 -:VPRORVQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x14; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPRORVQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x14; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { - YmmResult = vprorvq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vprorvq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116213 -:VPRORQ vexVVVV_YmmReg^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (vexVVVV_YmmReg & vexVVVV_ZmmReg) & YmmOpMask64; byte=0x72; reg_opcode=0 ... & YmmReg2_m256_m64bcst; imm8 +:VPRORQ evexV5_YmmReg^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask64; byte=0x72; reg_opcode=0 ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { YmmResult = vprorq_avx512vl( YmmReg2_m256_m64bcst, imm8:1 ); - YmmMask = vexVVVV_YmmReg; + YmmMask = evexV5_YmmReg; build YmmOpMask64; - vexVVVV_ZmmReg = zext(YmmResult); + evexV5_ZmmReg = zext(YmmResult); } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116216 @@ -11993,20 +11993,20 @@ define pcodeop vpscatterqq_avx512f ; # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116632 define pcodeop vpsllvw_avx512vl ; -:VPSLLVW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x12; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPSLLVW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x12; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpsllvw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpsllvw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116635 -:VPSLLVW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x12; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPSLLVW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x12; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpsllvw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpsllvw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -12025,20 +12025,20 @@ define pcodeop vpsllvw_avx512bw ; # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116641 define pcodeop vpsllvd_avx512vl ; -:VPSLLVD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x47; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPSLLVD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x47; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpsllvd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpsllvd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116644 -:VPSLLVD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x47; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPSLLVD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x47; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpsllvd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpsllvd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -12057,20 +12057,20 @@ define pcodeop vpsllvd_avx512f ; # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116650 define pcodeop vpsllvq_avx512vl ; -:VPSLLVQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x47; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPSLLVQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x47; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpsllvq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpsllvq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116653 -:VPSLLVQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x47; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPSLLVQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x47; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpsllvq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpsllvq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -12089,20 +12089,20 @@ define pcodeop vpsllvq_avx512f ; # VPSRAVW/VPSRAVD/VPSRAVQ 5-450 PAGE 2274 LINE 116880 define pcodeop vpsravw_avx512vl ; -:VPSRAVW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x11; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPSRAVW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x11; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpsravw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpsravw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPSRAVW/VPSRAVD/VPSRAVQ 5-450 PAGE 2274 LINE 116883 -:VPSRAVW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x11; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPSRAVW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x11; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpsravw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpsravw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -12121,20 +12121,20 @@ define pcodeop vpsravw_avx512bw ; # VPSRAVW/VPSRAVD/VPSRAVQ 5-450 PAGE 2274 LINE 116889 define pcodeop vpsravd_avx512vl ; -:VPSRAVD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x46; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPSRAVD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x46; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpsravd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpsravd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPSRAVW/VPSRAVD/VPSRAVQ 5-450 PAGE 2274 LINE 116893 -:VPSRAVD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x46; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPSRAVD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x46; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpsravd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpsravd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -12153,20 +12153,20 @@ define pcodeop vpsravd_avx512f ; # VPSRAVW/VPSRAVD/VPSRAVQ 5-450 PAGE 2274 LINE 116901 define pcodeop vpsravq_avx512vl ; -:VPSRAVQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x46; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPSRAVQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x46; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpsravq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpsravq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPSRAVW/VPSRAVD/VPSRAVQ 5-450 PAGE 2274 LINE 116905 -:VPSRAVQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x46; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPSRAVQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x46; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpsravq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpsravq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -12185,20 +12185,20 @@ define pcodeop vpsravq_avx512f ; # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117151 define pcodeop vpsrlvw_avx512vl ; -:VPSRLVW XmmReg1^XmmOpMask16, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x10; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 +:VPSRLVW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x10; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - XmmResult = vpsrlvw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + XmmResult = vpsrlvw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117154 -:VPSRLVW YmmReg1^YmmOpMask16, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x10; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 +:VPSRLVW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x10; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - YmmResult = vpsrlvw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + YmmResult = vpsrlvw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); @@ -12217,20 +12217,20 @@ define pcodeop vpsrlvw_avx512bw ; # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117160 define pcodeop vpsrlvd_avx512vl ; -:VPSRLVD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x45; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VPSRLVD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x45; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpsrlvd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vpsrlvd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117163 -:VPSRLVD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x45; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VPSRLVD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x45; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpsrlvd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vpsrlvd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -12249,20 +12249,20 @@ define pcodeop vpsrlvd_avx512f ; # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117169 define pcodeop vpsrlvq_avx512vl ; -:VPSRLVQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x45; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VPSRLVQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x45; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpsrlvq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vpsrlvq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117172 -:VPSRLVQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x45; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VPSRLVQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x45; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpsrlvq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vpsrlvq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -12281,20 +12281,20 @@ define pcodeop vpsrlvq_avx512f ; # VPTERNLOGD/VPTERNLOGQ 5-460 PAGE 2284 LINE 117395 define pcodeop vpternlogd_avx512vl ; -:VPTERNLOGD XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x25; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst; imm8 +:VPTERNLOGD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x25; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpternlogd_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); + XmmResult = vpternlogd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPTERNLOGD/VPTERNLOGQ 5-460 PAGE 2284 LINE 117400 -:VPTERNLOGD YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x25; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 +:VPTERNLOGD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x25; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpternlogd_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); + YmmResult = vpternlogd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -12313,20 +12313,20 @@ define pcodeop vpternlogd_avx512f ; # VPTERNLOGD/VPTERNLOGQ 5-460 PAGE 2284 LINE 117410 define pcodeop vpternlogq_avx512vl ; -:VPTERNLOGQ XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x25; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst; imm8 +:VPTERNLOGQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x25; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vpternlogq_avx512vl( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); + XmmResult = vpternlogq_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPTERNLOGD/VPTERNLOGQ 5-460 PAGE 2284 LINE 117415 -:VPTERNLOGQ YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x25; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 +:VPTERNLOGQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x25; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vpternlogq_avx512vl( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); + YmmResult = vpternlogq_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -12345,18 +12345,18 @@ define pcodeop vpternlogq_avx512f ; # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117559 define pcodeop vptestmb_avx512vl ; -:VPTESTMB KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & vexVVVV_XmmReg; byte=0x26; KReg_reg ... & XmmReg2_m128 +:VPTESTMB KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x26; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vptestmb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + local tmp = vptestmb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117562 -:VPTESTMB KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & vexVVVV_YmmReg; byte=0x26; KReg_reg ... & YmmReg2_m256 +:VPTESTMB KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x26; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vptestmb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + local tmp = vptestmb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } @@ -12371,18 +12371,18 @@ define pcodeop vptestmb_avx512bw ; # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117568 define pcodeop vptestmw_avx512vl ; -:VPTESTMW KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & vexVVVV_XmmReg; byte=0x26; KReg_reg ... & XmmReg2_m128 +:VPTESTMW KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x26; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vptestmw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + local tmp = vptestmw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117571 -:VPTESTMW KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & vexVVVV_YmmReg; byte=0x26; KReg_reg ... & YmmReg2_m256 +:VPTESTMW KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x26; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vptestmw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + local tmp = vptestmw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } @@ -12397,18 +12397,18 @@ define pcodeop vptestmw_avx512bw ; # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117577 define pcodeop vptestmd_avx512vl ; -:VPTESTMD KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & vexVVVV_XmmReg; byte=0x27; KReg_reg ... & XmmReg2_m128_m32bcst +:VPTESTMD KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x27; KReg_reg ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vptestmd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + local tmp = vptestmd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117581 -:VPTESTMD KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & vexVVVV_YmmReg; byte=0x27; KReg_reg ... & YmmReg2_m256_m32bcst +:VPTESTMD KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x27; KReg_reg ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vptestmd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + local tmp = vptestmd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } @@ -12423,18 +12423,18 @@ define pcodeop vptestmd_avx512f ; # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117589 define pcodeop vptestmq_avx512vl ; -:VPTESTMQ KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & vexVVVV_XmmReg; byte=0x27; KReg_reg ... & XmmReg2_m128_m64bcst +:VPTESTMQ KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x27; KReg_reg ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vptestmq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + local tmp = vptestmq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); KReg_reg = zext(AVXOpMask[0,2]) & tmp; } # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117593 -:VPTESTMQ KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & vexVVVV_YmmReg; byte=0x27; KReg_reg ... & YmmReg2_m256_m64bcst +:VPTESTMQ KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x27; KReg_reg ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vptestmq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + local tmp = vptestmq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } @@ -12449,18 +12449,18 @@ define pcodeop vptestmq_avx512f ; # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117717 define pcodeop vptestnmb_avx512vl ; -:VPTESTNMB KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & vexVVVV_XmmReg; byte=0x26; KReg_reg ... & XmmReg2_m128 +:VPTESTNMB KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x26; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vptestnmb_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + local tmp = vptestnmb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117721 -:VPTESTNMB KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & vexVVVV_YmmReg; byte=0x26; KReg_reg ... & YmmReg2_m256 +:VPTESTNMB KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x26; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vptestnmb_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + local tmp = vptestnmb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } @@ -12475,18 +12475,18 @@ define pcodeop vptestnmb_avx512f ; # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117729 define pcodeop vptestnmw_avx512vl ; -:VPTESTNMW KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & vexVVVV_XmmReg; byte=0x26; KReg_reg ... & XmmReg2_m128 +:VPTESTNMW KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x26; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vptestnmw_avx512vl( vexVVVV_XmmReg, XmmReg2_m128 ); + local tmp = vptestnmw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117733 -:VPTESTNMW KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & vexVVVV_YmmReg; byte=0x26; KReg_reg ... & YmmReg2_m256 +:VPTESTNMW KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x26; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { - local tmp = vptestnmw_avx512vl( vexVVVV_YmmReg, YmmReg2_m256 ); + local tmp = vptestnmw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } @@ -12501,18 +12501,18 @@ define pcodeop vptestnmw_avx512f ; # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117741 define pcodeop vptestnmd_avx512vl ; -:VPTESTNMD KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & vexVVVV_XmmReg; byte=0x27; KReg_reg ... & XmmReg2_m128_m32bcst +:VPTESTNMD KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x27; KReg_reg ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vptestnmd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + local tmp = vptestnmd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117745 -:VPTESTNMD KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & vexVVVV_YmmReg; byte=0x27; KReg_reg ... & YmmReg2_m256_m32bcst +:VPTESTNMD KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x27; KReg_reg ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vptestnmd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + local tmp = vptestnmd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } @@ -12527,18 +12527,18 @@ define pcodeop vptestnmd_avx512f ; # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117753 define pcodeop vptestnmq_avx512vl ; -:VPTESTNMQ KReg_reg AVXOpMask, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & vexVVVV_XmmReg; byte=0x27; KReg_reg ... & XmmReg2_m128_m64bcst +:VPTESTNMQ KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x27; KReg_reg ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vptestnmq_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + local tmp = vptestnmq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); KReg_reg = zext(AVXOpMask[0,2]) & tmp; } # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117757 -:VPTESTNMQ KReg_reg AVXOpMask, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & vexVVVV_YmmReg; byte=0x27; KReg_reg ... & YmmReg2_m256_m64bcst +:VPTESTNMQ KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x27; KReg_reg ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - local tmp = vptestnmq_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + local tmp = vptestnmq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } @@ -12553,20 +12553,20 @@ define pcodeop vptestnmq_avx512f ; # VRANGEPD 5-470 PAGE 2294 LINE 117905 define pcodeop vrangepd_avx512vl ; -:VRANGEPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x50; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst; imm8 +:VRANGEPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x50; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vrangepd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); + XmmResult = vrangepd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VRANGEPD 5-470 PAGE 2294 LINE 117910 -:VRANGEPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x50; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 +:VRANGEPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x50; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vrangepd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); + YmmResult = vrangepd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -12585,20 +12585,20 @@ define pcodeop vrangepd_avx512dq ; # VRANGEPS 5-475 PAGE 2299 LINE 118139 define pcodeop vrangeps_avx512vl ; -:VRANGEPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x50; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst; imm8 +:VRANGEPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x50; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vrangeps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); + XmmResult = vrangeps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VRANGEPS 5-475 PAGE 2299 LINE 118144 -:VRANGEPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x50; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 +:VRANGEPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x50; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vrangeps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); + YmmResult = vrangeps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -12617,10 +12617,10 @@ define pcodeop vrangeps_avx512dq ; # VRANGESD 5-479 PAGE 2303 LINE 118318 define pcodeop vrangesd_avx512dq ; -:VRANGESD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64, imm8 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64; imm8 +:VRANGESD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64, imm8 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vrangesd_avx512dq( vexVVVV_XmmReg, XmmReg2_m64, imm8:1 ); + XmmResult = vrangesd_avx512dq( evexV5_XmmReg, XmmReg2_m64, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -12628,10 +12628,10 @@ define pcodeop vrangesd_avx512dq ; # VRANGESS 5-482 PAGE 2306 LINE 118473 define pcodeop vrangess_avx512dq ; -:VRANGESS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VRANGESS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vrangess_avx512dq( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vrangess_avx512dq( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -12671,10 +12671,10 @@ define pcodeop vrcp14pd_avx512f ; # VRCP14SD 5-487 PAGE 2311 LINE 118726 define pcodeop vrcp14sd_avx512f ; -:VRCP14SD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x4D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VRCP14SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x4D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vrcp14sd_avx512f( vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vrcp14sd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -12714,10 +12714,10 @@ define pcodeop vrcp14ps_avx512f ; # VRCP14SS 5-491 PAGE 2315 LINE 118904 define pcodeop vrcp14ss_avx512f ; -:VRCP14SS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x4D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VRCP14SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x4D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vrcp14ss_avx512f( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vrcp14ss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -12736,10 +12736,10 @@ define pcodeop vrcp28pd_avx512er ; # VRCP28SD 5-495 PAGE 2319 LINE 119074 define pcodeop vrcp28sd_avx512er ; -:VRCP28SD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xCB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VRCP28SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xCB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vrcp28sd_avx512er( vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vrcp28sd_avx512er( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -12758,10 +12758,10 @@ define pcodeop vrcp28ps_avx512er ; # VRCP28SS 5-499 PAGE 2323 LINE 119263 define pcodeop vrcp28ss_avx512er ; -:VRCP28SS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xCB; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VRCP28SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xCB; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vrcp28ss_avx512er( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vrcp28ss_avx512er( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -12801,10 +12801,10 @@ define pcodeop vreducepd_avx512dq ; # VREDUCESD 5-504 PAGE 2328 LINE 119510 define pcodeop vreducesd_avx512dq ; -:VREDUCESD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VREDUCESD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vreducesd_avx512dq( vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vreducesd_avx512dq( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -12844,10 +12844,10 @@ define pcodeop vreduceps_avx512dq ; # VREDUCESS 5-508 PAGE 2332 LINE 119719 define pcodeop vreducess_avx512dq ; -:VREDUCESS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VREDUCESS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vreducess_avx512dq( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vreducess_avx512dq( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -12887,10 +12887,10 @@ define pcodeop vrndscalepd_avx512f ; # VRNDSCALESD 5-514 PAGE 2338 LINE 119998 define pcodeop vrndscalesd_avx512f ; -:VRNDSCALESD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64, imm8 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x0B; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64; imm8 +:VRNDSCALESD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64, imm8 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x0B; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vrndscalesd_avx512f( vexVVVV_XmmReg, XmmReg2_m64, imm8:1 ); + XmmResult = vrndscalesd_avx512f( evexV5_XmmReg, XmmReg2_m64, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -12930,10 +12930,10 @@ define pcodeop vrndscaleps_avx512f ; # VRNDSCALESS 5-519 PAGE 2343 LINE 120263 define pcodeop vrndscaless_avx512f ; -:VRNDSCALESS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x0A; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VRNDSCALESS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x0A; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vrndscaless_avx512f( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vrndscaless_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -12973,10 +12973,10 @@ define pcodeop vrsqrt14pd_avx512f ; # VRSQRT14SD 5-523 PAGE 2347 LINE 120491 define pcodeop vrsqrt14sd_avx512f ; -:VRSQRT14SD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x4F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VRSQRT14SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x4F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vrsqrt14sd_avx512f( vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vrsqrt14sd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -13016,10 +13016,10 @@ define pcodeop vrsqrt14ps_avx512f ; # VRSQRT14SS 5-527 PAGE 2351 LINE 120690 define pcodeop vrsqrt14ss_avx512f ; -:VRSQRT14SS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x4F; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VRSQRT14SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x4F; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vrsqrt14ss_avx512f( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vrsqrt14ss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -13038,10 +13038,10 @@ define pcodeop vrsqrt28pd_avx512er ; # VRSQRT28SD 5-531 PAGE 2355 LINE 120869 define pcodeop vrsqrt28sd_avx512er ; -:VRSQRT28SD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xCD; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VRSQRT28SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xCD; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vrsqrt28sd_avx512er( vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vrsqrt28sd_avx512er( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -13060,10 +13060,10 @@ define pcodeop vrsqrt28ps_avx512er ; # VRSQRT28SS 5-535 PAGE 2359 LINE 121051 define pcodeop vrsqrt28ss_avx512er ; -:VRSQRT28SS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xCD; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VRSQRT28SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xCD; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vrsqrt28ss_avx512er( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vrsqrt28ss_avx512er( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -13071,20 +13071,20 @@ define pcodeop vrsqrt28ss_avx512er ; # VSCALEFPD 5-537 PAGE 2361 LINE 121140 define pcodeop vscalefpd_avx512vl ; -:VSCALEFPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x2C; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VSCALEFPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x2C; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vscalefpd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vscalefpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VSCALEFPD 5-537 PAGE 2361 LINE 121143 -:VSCALEFPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x2C; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VSCALEFPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x2C; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vscalefpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vscalefpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -13103,10 +13103,10 @@ define pcodeop vscalefpd_avx512f ; # VSCALEFSD 5-540 PAGE 2364 LINE 121269 define pcodeop vscalefsd_avx512f ; -:VSCALEFSD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x2D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 +:VSCALEFSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x2D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vscalefsd_avx512f( vexVVVV_XmmReg, XmmReg2_m64 ); + XmmResult = vscalefsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); @@ -13114,20 +13114,20 @@ define pcodeop vscalefsd_avx512f ; # VSCALEFPS 5-542 PAGE 2366 LINE 121355 define pcodeop vscalefps_avx512vl ; -:VSCALEFPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x2C; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VSCALEFPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x2C; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vscalefps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vscalefps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VSCALEFPS 5-542 PAGE 2366 LINE 121358 -:VSCALEFPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x2C; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VSCALEFPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x2C; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vscalefps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vscalefps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -13146,10 +13146,10 @@ define pcodeop vscalefps_avx512f ; # VSCALEFSS 5-544 PAGE 2368 LINE 121470 define pcodeop vscalefss_avx512f ; -:VSCALEFSS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x2D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 +:VSCALEFSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x2D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { - XmmResult = vscalefss_avx512f( vexVVVV_XmmReg, XmmReg2_m32 ); + XmmResult = vscalefss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); @@ -13345,10 +13345,10 @@ define pcodeop vscatterpf1qpd_avx512pf ; # VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 121994 define pcodeop vshuff32x4_avx512vl ; -:VSHUFF32X4 YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x23; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VSHUFF32X4 YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x23; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vshuff32x4_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vshuff32x4_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -13367,10 +13367,10 @@ define pcodeop vshuff32x4_avx512f ; # VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 122002 define pcodeop vshuff64x2_avx512vl ; -:VSHUFF64X2 YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x23; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VSHUFF64X2 YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x23; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vshuff64x2_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vshuff64x2_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -13389,10 +13389,10 @@ define pcodeop vshuff64x2_avx512f ; # VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 122010 define pcodeop vshufi32x4_avx512vl ; -:VSHUFI32X4 YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x43; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VSHUFI32X4 YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x43; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vshufi32x4_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vshufi32x4_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -13411,10 +13411,10 @@ define pcodeop vshufi32x4_avx512f ; # VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 122016 define pcodeop vshufi64x2_avx512vl ; -:VSHUFI64X2 YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x43; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VSHUFI64X2 YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x43; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vshufi64x2_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vshufi64x2_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -13433,20 +13433,20 @@ define pcodeop vshufi64x2_avx512f ; # XORPD 5-596 PAGE 2420 LINE 123834 define pcodeop vxorpd_avx512vl ; -:VXORPD XmmReg1^XmmOpMask64, vexVVVV_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst +:VXORPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vxorpd_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m64bcst ); + XmmResult = vxorpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # XORPD 5-596 PAGE 2420 LINE 123837 -:VXORPD YmmReg1^YmmOpMask64, vexVVVV_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x57; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst +:VXORPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x57; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vxorpd_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m64bcst ); + YmmResult = vxorpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); @@ -13465,20 +13465,20 @@ define pcodeop vxorpd_avx512dq ; # XORPS 5-599 PAGE 2423 LINE 123959 define pcodeop vxorps_avx512vl ; -:VXORPS XmmReg1^XmmOpMask32, vexVVVV_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst +:VXORPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - XmmResult = vxorps_avx512vl( vexVVVV_XmmReg, XmmReg2_m128_m32bcst ); + XmmResult = vxorps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # XORPS 5-599 PAGE 2423 LINE 123962 -:VXORPS YmmReg1^YmmOpMask32, vexVVVV_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x57; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst +:VXORPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x57; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { - YmmResult = vxorps_avx512vl( vexVVVV_YmmReg, YmmReg2_m256_m32bcst ); + YmmResult = vxorps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); @@ -13495,3 +13495,4061 @@ define pcodeop vxorps_avx512dq ; ZmmReg1 = ZmmResult; } +# AVX512 BMI, FMA, and FP16 updates + +# AESDEC 3-51 PAGE 621 LINE 35875 +define pcodeop vaesdec_vaes ; +:VAESDEC XmmReg1, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0xDE; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + local tmp:16 = vaesdec_vaes( evexV5_XmmReg, XmmReg2_m128 ); + ZmmReg1 = zext(tmp); +} + +# AESDEC 3-51 PAGE 621 LINE 35879 +:VAESDEC YmmReg1, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0xDE; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + local tmp:32 = vaesdec_vaes( evexV5_YmmReg, YmmReg2_m256 ); + ZmmReg1 = zext(tmp); +} + +# AESDEC 3-51 PAGE 621 LINE 35883 +:VAESDEC ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xDE; ZmmReg1 ... & ZmmReg2_m512 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + ZmmReg1 = vaesdec_vaes( evexV5_ZmmReg, ZmmReg2_m512 ); +} + +# AESDECLAST 3-57 PAGE 627 LINE 36144 +define pcodeop vaesdeclast_vaes ; +:VAESDECLAST XmmReg1, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0xDF; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + local tmp:16 = vaesdeclast_vaes( evexV5_XmmReg, XmmReg2_m128 ); + ZmmReg1 = zext(tmp); +} + +# AESDECLAST 3-57 PAGE 627 LINE 36148 +:VAESDECLAST YmmReg1, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0xDF; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + local tmp:32 = vaesdeclast_vaes( evexV5_YmmReg, YmmReg2_m256 ); + ZmmReg1 = zext(tmp); +} + +# AESDECLAST 3-57 PAGE 627 LINE 36152 +:VAESDECLAST ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xDF; ZmmReg1 ... & ZmmReg2_m512 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + ZmmReg1 = vaesdeclast_vaes( evexV5_ZmmReg, ZmmReg2_m512 ); +} + +# AESENC 3-63 PAGE 633 LINE 36420 +define pcodeop vaesenc_vaes ; +:VAESENC XmmReg1, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0xDC; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + local tmp:16 = vaesenc_vaes( evexV5_XmmReg, XmmReg2_m128 ); + ZmmReg1 = zext(tmp); +} + +# AESENC 3-63 PAGE 633 LINE 36423 +:VAESENC YmmReg1, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0xDC; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + local tmp:32 = vaesenc_vaes( evexV5_YmmReg, YmmReg2_m256 ); + ZmmReg1 = zext(tmp); +} + +# AESENC 3-63 PAGE 633 LINE 36426 +:VAESENC ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xDC; ZmmReg1 ... & ZmmReg2_m512 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + ZmmReg1 = vaesenc_vaes( evexV5_ZmmReg, ZmmReg2_m512 ); +} + +# AESENCLAST 3-69 PAGE 639 LINE 36687 +define pcodeop vaesenclast_vaes ; +:VAESENCLAST XmmReg1, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0xDD; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + local tmp:16 = vaesenclast_vaes( evexV5_XmmReg, XmmReg2_m128 ); + ZmmReg1 = zext(tmp); +} + +# AESENCLAST 3-69 PAGE 639 LINE 36691 +:VAESENCLAST YmmReg1, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0xDD; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + local tmp:32 = vaesenclast_vaes( evexV5_YmmReg, YmmReg2_m256 ); + ZmmReg1 = zext(tmp); +} + +# AESENCLAST 3-69 PAGE 639 LINE 36695 +:VAESENCLAST ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xDD; ZmmReg1 ... & ZmmReg2_m512 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + ZmmReg1 = vaesenclast_vaes( evexV5_ZmmReg, ZmmReg2_m512 ); +} + +# GF2P8AFFINEINVQB 3-476 PAGE 1046 LINE 56498 +define pcodeop vgf2p8affineinvqb_avx512vl ; +:VGF2P8AFFINEINVQB XmmReg1 XmmOpMask8, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask8 & evexV5_XmmReg; byte=0xCF; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vgf2p8affineinvqb_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); + XmmMask = XmmReg1; + build XmmOpMask8; + ZmmReg1 = zext(XmmResult); +} + +# GF2P8AFFINEINVQB 3-476 PAGE 1046 LINE 56501 +:VGF2P8AFFINEINVQB YmmReg1 YmmOpMask8, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & YmmOpMask8 & evexV5_YmmReg; byte=0xCF; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vgf2p8affineinvqb_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); + YmmMask = YmmReg1; + build YmmOpMask8; + ZmmReg1 = zext(YmmResult); +} + +# GF2P8AFFINEINVQB 3-476 PAGE 1046 LINE 56504 +define pcodeop vgf2p8affineinvqb_avx512f ; +:VGF2P8AFFINEINVQB ZmmReg1 ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & ZmmOpMask8 & evexV5_ZmmReg; byte=0xCF; ZmmReg1 ... & ZmmReg2_m512_m64bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vgf2p8affineinvqb_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 ); + ZmmMask = ZmmReg1; + build ZmmOpMask8; + ZmmReg1 = ZmmResult; +} + +# GF2P8AFFINEQB 3-479 PAGE 1049 LINE 56642 +define pcodeop vgf2p8affineqb_avx512vl ; +:VGF2P8AFFINEQB XmmReg1 XmmOpMask8, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask8 & evexV5_XmmReg; byte=0xCE; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vgf2p8affineqb_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); + XmmMask = XmmReg1; + build XmmOpMask8; + ZmmReg1 = zext(XmmResult); +} + +# GF2P8AFFINEQB 3-479 PAGE 1049 LINE 56645 +:VGF2P8AFFINEQB YmmReg1 YmmOpMask8, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & YmmOpMask8 & evexV5_YmmReg; byte=0xCE; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vgf2p8affineqb_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); + YmmMask = YmmReg1; + build YmmOpMask8; + ZmmReg1 = zext(YmmResult); +} + +# GF2P8AFFINEQB 3-479 PAGE 1049 LINE 56648 +define pcodeop vgf2p8affineqb_avx512f ; +:VGF2P8AFFINEQB ZmmReg1 ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & ZmmOpMask8 & evexV5_ZmmReg; byte=0xCE; ZmmReg1 ... & ZmmReg2_m512_m64bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vgf2p8affineqb_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 ); + ZmmMask = ZmmReg1; + build ZmmOpMask8; + ZmmReg1 = ZmmResult; +} + +# GF2P8MULB 3-481 PAGE 1051 LINE 56754 +define pcodeop vgf2p8mulb_avx512vl ; +:VGF2P8MULB XmmReg1 XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8 & evexV5_XmmReg; byte=0xCF; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + XmmResult = vgf2p8mulb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); + XmmMask = XmmReg1; + build XmmOpMask8; + ZmmReg1 = zext(XmmResult); +} + +# GF2P8MULB 3-481 PAGE 1051 LINE 56757 +:VGF2P8MULB YmmReg1 YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask8 & evexV5_YmmReg; byte=0xCF; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + YmmResult = vgf2p8mulb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); + YmmMask = YmmReg1; + build YmmOpMask8; + ZmmReg1 = zext(YmmResult); +} + +# GF2P8MULB 3-481 PAGE 1051 LINE 56760 +define pcodeop vgf2p8mulb_avx512f ; +:VGF2P8MULB ZmmReg1 ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask8 & evexV5_ZmmReg; byte=0xCF; ZmmReg1 ... & ZmmReg2_m512 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + ZmmResult = vgf2p8mulb_avx512f( evexV5_ZmmReg, ZmmReg2_m512 ); + ZmmMask = ZmmReg1; + build ZmmOpMask8; + ZmmReg1 = ZmmResult; +} + +# PCLMULQDQ 4-242 PAGE 1362 LINE 76037 +define pcodeop vpclmulqdq_vpclmulqdq ; +:VPCLMULQDQ XmmReg1, evexV5_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & evexV5_XmmReg; byte=0x44; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + local tmp:16 = vpclmulqdq_vpclmulqdq( evexV5_XmmReg, XmmReg2_m128, imm8:1 ); + ZmmReg1 = zext(tmp); +} + +# PCLMULQDQ 4-242 PAGE 1362 LINE 76042 +:VPCLMULQDQ YmmReg1, evexV5_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & evexV5_YmmReg; byte=0x44; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + local tmp:32 = vpclmulqdq_vpclmulqdq( evexV5_YmmReg, YmmReg2_m256, imm8:1 ); + ZmmReg1 = zext(tmp); +} + +# PCLMULQDQ 4-242 PAGE 1362 LINE 76047 +:VPCLMULQDQ ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & evexV5_ZmmReg; byte=0x44; ZmmReg1 ... & ZmmReg2_m512; imm8 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + ZmmReg1 = vpclmulqdq_vpclmulqdq( evexV5_ZmmReg, ZmmReg2_m512, imm8:1 ); +} + +# VADDPH 5-5 PAGE 1829 LINE 101735 +define pcodeop vaddph_avx512fp16 ; +:VADDPH XmmReg1 XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vaddph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VADDPH 5-5 PAGE 1829 LINE 101738 +:VADDPH YmmReg1 YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x58; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vaddph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VADDPH 5-5 PAGE 1829 LINE 101741 +:VADDPH ZmmReg1 ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x58; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vaddph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VADDSH 5-7 PAGE 1831 LINE 101824 +define pcodeop vaddsh_avx512fp16 ; +:VADDSH XmmReg1 XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vaddsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +VCMPPH_mon: "VCMPEQPH" is imm8=0x0 { } +VCMPPH_op: "" is imm8=0x0 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPLTPH" is imm8=0x1 { } +VCMPPH_op: "" is imm8=0x1 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPLEPH" is imm8=0x2 { } +VCMPPH_op: "" is imm8=0x2 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPUNORDPH" is imm8=0x3 { } +VCMPPH_op: "" is imm8=0x3 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPNEQPH" is imm8=0x4 { } +VCMPPH_op: "" is imm8=0x4 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPNLTPH" is imm8=0x5 { } +VCMPPH_op: "" is imm8=0x5 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPNLEPH" is imm8=0x6 { } +VCMPPH_op: "" is imm8=0x6 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPORDPH" is imm8=0x7 { } +VCMPPH_op: "" is imm8=0x7 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPEQ_UQPH" is imm8=0x8 { } +VCMPPH_op: "" is imm8=0x8 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPNGEPH" is imm8=0x9 { } +VCMPPH_op: "" is imm8=0x9 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPNGTPH" is imm8=0xa { } +VCMPPH_op: "" is imm8=0xa & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPFALSEPH" is imm8=0xb { } +VCMPPH_op: "" is imm8=0xb & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPNEQ_OQPH" is imm8=0xc { } +VCMPPH_op: "" is imm8=0xc & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPGEPH" is imm8=0xd { } +VCMPPH_op: "" is imm8=0xd & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPGTPH" is imm8=0xe { } +VCMPPH_op: "" is imm8=0xe & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPTRUEPH" is imm8=0xf { } +VCMPPH_op: "" is imm8=0xf & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPEQ_OSPH" is imm8=0x10 { } +VCMPPH_op: "" is imm8=0x10 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPLT_OQPH" is imm8=0x11 { } +VCMPPH_op: "" is imm8=0x11 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPLE_OQPH" is imm8=0x12 { } +VCMPPH_op: "" is imm8=0x12 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPUNORD_SPH" is imm8=0x13 { } +VCMPPH_op: "" is imm8=0x13 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPNEQ_USPH" is imm8=0x14 { } +VCMPPH_op: "" is imm8=0x14 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPNLT_UQPH" is imm8=0x15 { } +VCMPPH_op: "" is imm8=0x15 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPNLE_UQPH" is imm8=0x16 { } +VCMPPH_op: "" is imm8=0x16 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPORD_SPH" is imm8=0x17 { } +VCMPPH_op: "" is imm8=0x17 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPEQ_USPH" is imm8=0x18 { } +VCMPPH_op: "" is imm8=0x18 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPNGE_UQPH" is imm8=0x19 { } +VCMPPH_op: "" is imm8=0x19 & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPNGT_UQPH" is imm8=0x1a { } +VCMPPH_op: "" is imm8=0x1a & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPFALSE_OSPH" is imm8=0x1b { } +VCMPPH_op: "" is imm8=0x1b & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPNEQ_OSPH" is imm8=0x1c { } +VCMPPH_op: "" is imm8=0x1c & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPGE_OQPH" is imm8=0x1d { } +VCMPPH_op: "" is imm8=0x1d & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPGT_OQPH" is imm8=0x1e { } +VCMPPH_op: "" is imm8=0x1e & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPTRUE_USPH" is imm8=0x1f { } +VCMPPH_op: "" is imm8=0x1f & imm8_val { export *[const]:1 imm8_val; } +VCMPPH_mon: "VCMPPH" is imm8 { } +VCMPPH_op: ", "^imm8 is imm8 { export *[const]:1 imm8; } + +define pcodeop vcmpph_avx512fp16; +:^VCMPPH_mon KReg_reg^XmmOpMask, evexV5_XmmReg, XmmReg2_m128_m16bcst^VCMPPH_op is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xC2; KReg_reg ... & XmmReg2_m128_m16bcst; VCMPPH_mon & VCMPPH_op +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + KReg_reg = vcmpph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m16bcst, XmmOpMask, VCMPPH_op ); +} + +# VCMPPH 5-21 PAGE 1845 LINE 102586 +:^VCMPPH_mon KReg_reg^YmmOpMask, evexV5_YmmReg, YmmReg2_m256_m16bcst^VCMPPH_op is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask & evexV5_YmmReg; byte=0xC2; KReg_reg ... & YmmReg2_m256_m16bcst; VCMPPH_mon & VCMPPH_op +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + + KReg_reg = vcmpph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m16bcst, YmmOpMask, VCMPPH_op ); +} + +# VCMPPH 5-21 PAGE 1845 LINE 102590 +:^VCMPPH_mon KReg_reg^ZmmOpMask, evexV5_ZmmReg, ZmmReg2_m512_m16bcst^VCMPPH_op is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & ZmmOpMask & evexV5_ZmmReg; byte=0xC2; KReg_reg ... & ZmmReg2_m512_m16bcst; VCMPPH_mon & VCMPPH_op +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + KReg_reg = vcmpph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m16bcst, ZmmOpMask, VCMPPH_op ); +} + +VCMPSH_mon: "VCMPEQSH" is imm8=0x0 { } +VCMPSH_op: "" is imm8=0x0 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPLTSH" is imm8=0x1 { } +VCMPSH_op: "" is imm8=0x1 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPLESH" is imm8=0x2 { } +VCMPSH_op: "" is imm8=0x2 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPUNORDSH" is imm8=0x3 { } +VCMPSH_op: "" is imm8=0x3 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPNEQSH" is imm8=0x4 { } +VCMPSH_op: "" is imm8=0x4 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPNLTSH" is imm8=0x5 { } +VCMPSH_op: "" is imm8=0x5 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPNLESH" is imm8=0x6 { } +VCMPSH_op: "" is imm8=0x6 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPORDSH" is imm8=0x7 { } +VCMPSH_op: "" is imm8=0x7 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPEQ_UQSH" is imm8=0x8 { } +VCMPSH_op: "" is imm8=0x8 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPNGESH" is imm8=0x9 { } +VCMPSH_op: "" is imm8=0x9 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPNGTSH" is imm8=0xa { } +VCMPSH_op: "" is imm8=0xa & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPFALSESH" is imm8=0xb { } +VCMPSH_op: "" is imm8=0xb & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPNEQ_OQSH" is imm8=0xc { } +VCMPSH_op: "" is imm8=0xc & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPGESH" is imm8=0xd { } +VCMPSH_op: "" is imm8=0xd & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPGTSH" is imm8=0xe { } +VCMPSH_op: "" is imm8=0xe & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPTRUESH" is imm8=0xf { } +VCMPSH_op: "" is imm8=0xf & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPEQ_OSSH" is imm8=0x10 { } +VCMPSH_op: "" is imm8=0x10 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPLT_OQSH" is imm8=0x11 { } +VCMPSH_op: "" is imm8=0x11 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPLE_OQSH" is imm8=0x12 { } +VCMPSH_op: "" is imm8=0x12 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPUNORD_SSH" is imm8=0x13 { } +VCMPSH_op: "" is imm8=0x13 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPNEQ_USSH" is imm8=0x14 { } +VCMPSH_op: "" is imm8=0x14 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPNLT_UQSH" is imm8=0x15 { } +VCMPSH_op: "" is imm8=0x15 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPNLE_UQSH" is imm8=0x16 { } +VCMPSH_op: "" is imm8=0x16 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPORD_SSH" is imm8=0x17 { } +VCMPSH_op: "" is imm8=0x17 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPEQ_USSH" is imm8=0x18 { } +VCMPSH_op: "" is imm8=0x18 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPNGE_UQSH" is imm8=0x19 { } +VCMPSH_op: "" is imm8=0x19 & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPNGT_UQSH" is imm8=0x1a { } +VCMPSH_op: "" is imm8=0x1a & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPFALSE_OSSH" is imm8=0x1b { } +VCMPSH_op: "" is imm8=0x1b & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPNEQ_OSSH" is imm8=0x1c { } +VCMPSH_op: "" is imm8=0x1c & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPGE_OQSH" is imm8=0x1d { } +VCMPSH_op: "" is imm8=0x1d & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPGT_OQSH" is imm8=0x1e { } +VCMPSH_op: "" is imm8=0x1e & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPTRUE_USSH" is imm8=0x1f { } +VCMPSH_op: "" is imm8=0x1f & imm8_val { export *[const]:1 imm8_val; } +VCMPSH_mon: "VCMPSH" is imm8 { } +VCMPSH_op: ", "^imm8 is imm8 { export *[const]:1 imm8; } + +# VCMPSH 5-23 PAGE 1847 LINE 102692 +define pcodeop vcmpsh_avx512fp16 ; +:^VCMPSH_mon KReg_reg^XmmOpMask, evexV5_XmmReg, XmmReg2_m16^VCMPSH_op is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xC2; KReg_reg ... & XmmReg2_m16; VCMPSH_mon & VCMPSH_op +{ + KReg_reg = vcmpsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16, XmmOpMask, VCMPSH_op ); +} + +# VCOMISH 5-25 PAGE 1849 LINE 102783 +define pcodeop vcomish_avx512fp16 ; +:VCOMISH XmmReg1, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0); byte=0x2F; XmmReg1 ... & XmmReg2_m16 +{ + vcomish_avx512fp16( XmmReg1, XmmReg2_m16 ); + # TODO missing destination or side effects +} + +# VCVTDQ2PH 5-31 PAGE 1855 LINE 103058 +define pcodeop vcvtdq2ph_avx512fp16 ; +:VCVTDQ2PH XmmReg1^XmmOpMask16, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x5B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtdq2ph_avx512fp16( XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult[0,64]); +} + +# VCVTDQ2PH 5-31 PAGE 1855 LINE 103062 +:VCVTDQ2PH XmmReg1^XmmOpMask16, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x5B; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtdq2ph_avx512fp16( YmmReg2_m256_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VCVTDQ2PH 5-31 PAGE 1855 LINE 103066 +:VCVTDQ2PH YmmReg1^YmmOpMask16, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x5B; (YmmReg1 & ZmmReg1) ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vcvtdq2ph_avx512fp16( ZmmReg2_m512_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VCVTNE2PS2BF16 5-33 PAGE 1857 LINE 103147 +define pcodeop vcvtne2ps2bf16_avx512vl ; +:VCVTNE2PS2BF16 XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x72; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtne2ps2bf16_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VCVTNE2PS2BF16 5-33 PAGE 1857 LINE 103150 +:VCVTNE2PS2BF16 YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x72; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vcvtne2ps2bf16_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VCVTNE2PS2BF16 5-33 PAGE 1857 LINE 103153 +define pcodeop vcvtne2ps2bf16_avx512f ; +:VCVTNE2PS2BF16 ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x72; ZmmReg1 ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vcvtne2ps2bf16_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VCVTNEPS2BF16 5-35 PAGE 1859 LINE 103231 +define pcodeop vcvtneps2bf16_avx512vl ; +:VCVTNEPS2BF16 XmmReg1^XmmOpMask16, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x72; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtneps2bf16_avx512vl( XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult[0,64]); +} + +# VCVTNEPS2BF16 5-35 PAGE 1859 LINE 103234 +:VCVTNEPS2BF16 XmmReg1^XmmOpMask16, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x72; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtneps2bf16_avx512vl( YmmReg2_m256_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VCVTNEPS2BF16 5-35 PAGE 1859 LINE 103237 +define pcodeop vcvtneps2bf16_avx512f ; +:VCVTNEPS2BF16 YmmReg1^YmmOpMask16, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask16; byte=0x72; (YmmReg1 & ZmmReg1) ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vcvtneps2bf16_avx512f( ZmmReg2_m512_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VCVTPD2PH 5-37 PAGE 1861 LINE 103327 +define pcodeop vcvtpd2ph_avx512fp16 ; +:VCVTPD2PH XmmReg1^XmmOpMask16, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x5A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtpd2ph_avx512fp16( XmmReg2_m128_m64bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult[0,32]); +} + +# VCVTPD2PH 5-37 PAGE 1861 LINE 103331 +:VCVTPD2PH XmmReg1^XmmOpMask16, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x5A; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtpd2ph_avx512fp16( YmmReg2_m256_m64bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult[0,64]); +} + +# VCVTPD2PH 5-37 PAGE 1861 LINE 103335 +:VCVTPD2PH XmmReg1^XmmOpMask16, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x5A; (XmmReg1 & ZmmReg1) ... & ZmmReg2_m512_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtpd2ph_avx512fp16( ZmmReg2_m512_m64bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VCVTPH2DQ 5-45 PAGE 1869 LINE 103774 +define pcodeop vcvtph2dq_avx512fp16 ; +:VCVTPH2DQ XmmReg1^XmmOpMask32, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask32; byte=0x5B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst +[ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) +{ + XmmResult = vcvtph2dq_avx512fp16( XmmReg2_m64_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VCVTPH2DQ 5-45 PAGE 1869 LINE 103778 +:VCVTPH2DQ YmmReg1^YmmOpMask32, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask32; byte=0x5B; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) +{ + YmmResult = vcvtph2dq_avx512fp16( XmmReg2_m128_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VCVTPH2DQ 5-45 PAGE 1869 LINE 103782 +:VCVTPH2DQ ZmmReg1^ZmmOpMask32, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask32; byte=0x5B; ZmmReg1 ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) +{ + ZmmResult = vcvtph2dq_avx512fp16( YmmReg2_m256_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VCVTPH2PD 5-47 PAGE 1871 LINE 103861 +define pcodeop vcvtph2pd_avx512fp16 ; +:VCVTPH2PD XmmReg1^XmmOpMask64, XmmReg2_m32_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask64; byte=0x5A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32_m16bcst +{ + XmmResult = vcvtph2pd_avx512fp16( XmmReg2_m32_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask64; + ZmmReg1 = zext(XmmResult); +} + +# VCVTPH2PD 5-47 PAGE 1871 LINE 103864 +:VCVTPH2PD YmmReg1^YmmOpMask64, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask64; byte=0x5A; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst +{ + YmmResult = vcvtph2pd_avx512fp16( XmmReg2_m64_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask64; + ZmmReg1 = zext(YmmResult); +} + +# VCVTPH2PD 5-47 PAGE 1871 LINE 103867 +:VCVTPH2PD ZmmReg1^ZmmOpMask64, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask64; byte=0x5A; ZmmReg1 ... & XmmReg2_m128_m16bcst +{ + ZmmResult = vcvtph2pd_avx512fp16( XmmReg2_m128_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask64; + ZmmReg1 = ZmmResult; +} + +# VCVTPH2PS/VCVTPH2PSX 5-49 PAGE 1873 LINE 103953 +define pcodeop vcvtph2psx_avx512fp16 ; +:VCVTPH2PSX XmmReg1^XmmOpMask32, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask32; byte=0x13; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst +[ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) +{ + XmmResult = vcvtph2psx_avx512fp16( XmmReg2_m64_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VCVTPH2PS/VCVTPH2PSX 5-49 PAGE 1873 LINE 103957 +:VCVTPH2PSX YmmReg1^YmmOpMask32, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask32; byte=0x13; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) +{ + YmmResult = vcvtph2psx_avx512fp16( XmmReg2_m128_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VCVTPH2PS/VCVTPH2PSX 5-49 PAGE 1873 LINE 103961 +:VCVTPH2PSX ZmmReg1^ZmmOpMask32, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask32; byte=0x13; ZmmReg1 ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) +{ + ZmmResult = vcvtph2psx_avx512fp16( YmmReg2_m256_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VCVTPH2QQ 5-53 PAGE 1877 LINE 104149 +define pcodeop vcvtph2qq_avx512fp16 ; +:VCVTPH2QQ XmmReg1^XmmOpMask64, XmmReg2_m32_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask64; byte=0x7B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32_m16bcst +{ + XmmResult = vcvtph2qq_avx512fp16( XmmReg2_m32_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask64; + ZmmReg1 = zext(XmmResult); +} + +# VCVTPH2QQ 5-53 PAGE 1877 LINE 104152 +:VCVTPH2QQ YmmReg1^YmmOpMask64, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask64; byte=0x7B; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst +{ + YmmResult = vcvtph2qq_avx512fp16( XmmReg2_m64_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask64; + ZmmReg1 = zext(YmmResult); +} + +# VCVTPH2QQ 5-53 PAGE 1877 LINE 104155 +:VCVTPH2QQ ZmmReg1^ZmmOpMask64, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask64; byte=0x7B; ZmmReg1 ... & XmmReg2_m128_m16bcst +{ + ZmmResult = vcvtph2qq_avx512fp16( XmmReg2_m128_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask64; + ZmmReg1 = ZmmResult; +} + +# VCVTPH2UDQ 5-55 PAGE 1879 LINE 104237 +define pcodeop vcvtph2udq_avx512fp16 ; +:VCVTPH2UDQ XmmReg1^XmmOpMask32, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask32; byte=0x79; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst +[ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) +{ + XmmResult = vcvtph2udq_avx512fp16( XmmReg2_m64_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VCVTPH2UDQ 5-55 PAGE 1879 LINE 104241 +:VCVTPH2UDQ YmmReg1^YmmOpMask32, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask32; byte=0x79; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) +{ + YmmResult = vcvtph2udq_avx512fp16( XmmReg2_m128_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VCVTPH2UDQ 5-55 PAGE 1879 LINE 104245 +:VCVTPH2UDQ ZmmReg1^ZmmOpMask32, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask32; byte=0x79; ZmmReg1 ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) +{ + ZmmResult = vcvtph2udq_avx512fp16( YmmReg2_m256_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VCVTPH2UQQ 5-57 PAGE 1881 LINE 104324 +define pcodeop vcvtph2uqq_avx512fp16 ; +:VCVTPH2UQQ XmmReg1^XmmOpMask64, XmmReg2_m32_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask64; byte=0x79; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32_m16bcst +{ + XmmResult = vcvtph2uqq_avx512fp16( XmmReg2_m32_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask64; + ZmmReg1 = zext(XmmResult); +} + +# VCVTPH2UQQ 5-57 PAGE 1881 LINE 104327 +:VCVTPH2UQQ YmmReg1^YmmOpMask64, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask64; byte=0x79; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst +{ + YmmResult = vcvtph2uqq_avx512fp16( XmmReg2_m64_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask64; + ZmmReg1 = zext(YmmResult); +} + +# VCVTPH2UQQ 5-57 PAGE 1881 LINE 104331 +:VCVTPH2UQQ ZmmReg1^ZmmOpMask64, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask64; byte=0x79; ZmmReg1 ... & XmmReg2_m128_m16bcst +{ + ZmmResult = vcvtph2uqq_avx512fp16( XmmReg2_m128_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask64; + ZmmReg1 = ZmmResult; +} + +# VCVTPH2UW 5-59 PAGE 1883 LINE 104412 +define pcodeop vcvtph2uw_avx512fp16 ; +:VCVTPH2UW XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x7D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtph2uw_avx512fp16( XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VCVTPH2UW 5-59 PAGE 1883 LINE 104415 +:VCVTPH2UW YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x7D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vcvtph2uw_avx512fp16( YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VCVTPH2UW 5-59 PAGE 1883 LINE 104418 +:VCVTPH2UW ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16; byte=0x7D; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vcvtph2uw_avx512fp16( ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VCVTPH2W 5-61 PAGE 1885 LINE 104499 +define pcodeop vcvtph2w_avx512fp16 ; +:VCVTPH2W XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x7D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtph2w_avx512fp16( XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VCVTPH2W 5-61 PAGE 1885 LINE 104502 +:VCVTPH2W YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x7D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vcvtph2w_avx512fp16( YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VCVTPH2W 5-61 PAGE 1885 LINE 104505 +:VCVTPH2W ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16; byte=0x7D; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vcvtph2w_avx512fp16( ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + + +# VCVTPS2PHX 5-67 PAGE 1891 LINE 104781 +define pcodeop vcvtps2phx_avx512fp16 ; +:VCVTPS2PHX XmmReg1^XmmOpMask16, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x1D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtps2phx_avx512fp16( XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult[0,64]); +} + +# VCVTPS2PHX 5-67 PAGE 1891 LINE 104785 +:VCVTPS2PHX XmmReg1^XmmOpMask16, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x1D; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtps2phx_avx512fp16( YmmReg2_m256_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VCVTPS2PHX 5-67 PAGE 1891 LINE 104789 +:VCVTPS2PHX YmmReg1^YmmOpMask16, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x1D; (YmmReg1 & ZmmReg1) ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vcvtps2phx_avx512fp16( ZmmReg2_m512_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VCVTQQ2PH 5-78 PAGE 1902 LINE 105354 +define pcodeop vcvtqq2ph_avx512fp16 ; +:VCVTQQ2PH XmmReg1^XmmOpMask16, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x5B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtqq2ph_avx512fp16( XmmReg2_m128_m64bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult[0,32]); +} + +# VCVTQQ2PH 5-78 PAGE 1902 LINE 105358 +:VCVTQQ2PH XmmReg1^XmmOpMask16, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x5B; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtqq2ph_avx512fp16( YmmReg2_m256_m64bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult[0,64]); +} + +# VCVTQQ2PH 5-78 PAGE 1902 LINE 105362 +:VCVTQQ2PH XmmReg1^XmmOpMask16, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x5B; (XmmReg1 & ZmmReg1) ... & ZmmReg2_m512_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtqq2ph_avx512fp16( ZmmReg2_m512_m64bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VCVTSD2SH 5-82 PAGE 1906 LINE 105553 +define pcodeop vcvtsd2sh_avx512fp16 ; +:VCVTSD2SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask & evexV5_XmmReg; byte=0x5A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 +{ + XmmResult = vcvtsd2sh_avx512fp16( evexV5_XmmReg, XmmReg2_m64 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VCVTSH2SD 5-85 PAGE 1909 LINE 105683 +define pcodeop vcvtsh2sd_avx512fp16 ; +:VCVTSH2SD XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x5A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vcvtsh2sd_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,64], XmmOpMask[0,1], XmmResult[0,64], XmmMask[0,64]); + XmmResult[64,64] = XmmReg1[16,64]; # DEST[127:64] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VCVTSH2SI 5-86 PAGE 1910 LINE 105738 +define pcodeop vcvtsh2si_avx512fp16 ; +:VCVTSH2SI Reg32, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0); byte=0x2D; Reg32 ... & XmmReg2_m16 +{ + Reg32 = vcvtsh2si_avx512fp16( XmmReg2_m16 ); +} + +# VCVTSH2SI 5-86 PAGE 1910 LINE 105740 +@ifdef IA64 +:VCVTSH2SI Reg64, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W1); byte=0x2D; Reg64 ... & XmmReg2_m16 +{ + Reg64 = vcvtsh2si_avx512fp16( XmmReg2_m16 ); +} +@endif + +# VCVTSH2SS 5-87 PAGE 1911 LINE 105796 +define pcodeop vcvtsh2ss_avx512fp16 ; +:VCVTSH2SS XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_NONE) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x13; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vcvtsh2ss_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,32], XmmOpMask[0,1], XmmResult[0,32], XmmMask[0,32]); + XmmResult[32,96] = XmmReg1[32,96]; # DEST[127:32] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VCVTSH2USI 5-88 PAGE 1912 LINE 105851 +define pcodeop vcvtsh2usi_avx512fp16 ; +:VCVTSH2USI Reg32, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0); byte=0x79; Reg32 ... & XmmReg2_m16 +{ + Reg32 = vcvtsh2usi_avx512fp16( XmmReg2_m16 ); +} + +# VCVTSH2USI 5-88 PAGE 1912 LINE 105853 +@ifdef IA64 +:VCVTSH2USI Reg64, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W1); byte=0x79; Reg64 ... & XmmReg2_m16 +{ + Reg64 = vcvtsh2usi_avx512fp16( XmmReg2_m16 ); +} +@endif + +# VCVTSI2SH 5-89 PAGE 1913 LINE 105910 +define pcodeop vcvtsi2sh_avx512fp16 ; +:VCVTSI2SH XmmReg1, evexV5_XmmReg, rm32 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & evexV5_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm32 +{ + local tmp:16 = vcvtsi2sh_avx512fp16( evexV5_XmmReg, rm32 ); + ZmmReg1 = zext(tmp); +} + +# VCVTSI2SH 5-89 PAGE 1913 LINE 105914 +@ifdef IA64 +:VCVTSI2SH XmmReg1, evexV5_XmmReg, rm64 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W1) & evexV5_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm64 +{ + local tmp:16 = vcvtsi2sh_avx512fp16( evexV5_XmmReg, rm64 ); + ZmmReg1 = zext(tmp); +} +@endif + +# VCVTSS2SH 5-91 PAGE 1915 LINE 105984 +define pcodeop vcvtss2sh_avx512fp16 ; +:VCVTSS2SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x1D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 +{ + XmmResult = vcvtss2sh_avx512fp16( evexV5_XmmReg, XmmReg2_m32 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VCVTTPH2DQ 5-100 PAGE 1924 LINE 106453 +define pcodeop vcvttph2dq_avx512fp16 ; +:VCVTTPH2DQ XmmReg1^XmmOpMask32, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask32; byte=0x5B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst +[ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) +{ + XmmResult = vcvttph2dq_avx512fp16( XmmReg2_m64_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VCVTTPH2DQ 5-100 PAGE 1924 LINE 106457 +:VCVTTPH2DQ YmmReg1^YmmOpMask32, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask32; byte=0x5B; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) +{ + YmmResult = vcvttph2dq_avx512fp16( XmmReg2_m128_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VCVTTPH2DQ 5-100 PAGE 1924 LINE 106461 +:VCVTTPH2DQ ZmmReg1^ZmmOpMask32, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask32; byte=0x5B; ZmmReg1 ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) +{ + ZmmResult = vcvttph2dq_avx512fp16( YmmReg2_m256_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VCVTTPH2QQ 5-102 PAGE 1926 LINE 106537 +define pcodeop vcvttph2qq_avx512fp16 ; +:VCVTTPH2QQ XmmReg1^XmmOpMask64, XmmReg2_m32_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask64; byte=0x7A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32_m16bcst +{ + XmmResult = vcvttph2qq_avx512fp16( XmmReg2_m32_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask64; + ZmmReg1 = zext(XmmResult); +} + +# VCVTTPH2QQ 5-102 PAGE 1926 LINE 106541 +:VCVTTPH2QQ YmmReg1^YmmOpMask64, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask64; byte=0x7A; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst +{ + YmmResult = vcvttph2qq_avx512fp16( XmmReg2_m64_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask64; + ZmmReg1 = zext(YmmResult); +} + +# VCVTTPH2QQ 5-102 PAGE 1926 LINE 106545 +:VCVTTPH2QQ ZmmReg1^ZmmOpMask64, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask64; byte=0x7A; ZmmReg1 ... & XmmReg2_m128_m16bcst +{ + ZmmResult = vcvttph2qq_avx512fp16( XmmReg2_m128_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask64; + ZmmReg1 = ZmmResult; +} + +# VCVTTPH2UDQ 5-104 PAGE 1928 LINE 106622 +define pcodeop vcvttph2udq_avx512fp16 ; +:VCVTTPH2UDQ XmmReg1^XmmOpMask32, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask32; byte=0x78; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst +[ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) +{ + XmmResult = vcvttph2udq_avx512fp16( XmmReg2_m64_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VCVTTPH2UDQ 5-104 PAGE 1928 LINE 106626 +:VCVTTPH2UDQ YmmReg1^YmmOpMask32, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask32; byte=0x78; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) +{ + YmmResult = vcvttph2udq_avx512fp16( XmmReg2_m128_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VCVTTPH2UDQ 5-104 PAGE 1928 LINE 106630 +:VCVTTPH2UDQ ZmmReg1^ZmmOpMask32, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask32; byte=0x78; ZmmReg1 ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) +{ + ZmmResult = vcvttph2udq_avx512fp16( YmmReg2_m256_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VCVTTPH2UQQ 5-106 PAGE 1930 LINE 106706 +define pcodeop vcvttph2uqq_avx512fp16 ; +:VCVTTPH2UQQ XmmReg1^XmmOpMask64, XmmReg2_m32_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask64; byte=0x78; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32_m16bcst +{ + XmmResult = vcvttph2uqq_avx512fp16( XmmReg2_m32_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask64; + ZmmReg1 = zext(XmmResult); +} + +# VCVTTPH2UQQ 5-106 PAGE 1930 LINE 106710 +:VCVTTPH2UQQ YmmReg1^YmmOpMask64, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask64; byte=0x78; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst +{ + YmmResult = vcvttph2uqq_avx512fp16( XmmReg2_m64_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask64; + ZmmReg1 = zext(YmmResult); +} + +# VCVTTPH2UQQ 5-106 PAGE 1930 LINE 106714 +:VCVTTPH2UQQ ZmmReg1^ZmmOpMask64, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask64; byte=0x78; ZmmReg1 ... & XmmReg2_m128_m16bcst +{ + ZmmResult = vcvttph2uqq_avx512fp16( XmmReg2_m128_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask64; + ZmmReg1 = ZmmResult; +} + +# VCVTTPH2UW 5-108 PAGE 1932 LINE 106790 +define pcodeop vcvttph2uw_avx512fp16 ; +:VCVTTPH2UW XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x7C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvttph2uw_avx512fp16( XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VCVTTPH2UW 5-108 PAGE 1932 LINE 106794 +:VCVTTPH2UW YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x7C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vcvttph2uw_avx512fp16( YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VCVTTPH2UW 5-108 PAGE 1932 LINE 106798 +:VCVTTPH2UW ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16; byte=0x7C; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vcvttph2uw_avx512fp16( ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VCVTTPH2W 5-110 PAGE 1934 LINE 106874 +define pcodeop vcvttph2w_avx512fp16 ; +:VCVTTPH2W XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x7C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvttph2w_avx512fp16( XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VCVTTPH2W 5-110 PAGE 1934 LINE 106878 +:VCVTTPH2W YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x7C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vcvttph2w_avx512fp16( YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VCVTTPH2W 5-110 PAGE 1934 LINE 106882 +:VCVTTPH2W ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16; byte=0x7C; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vcvttph2w_avx512fp16( ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VCVTTSH2SI 5-119 PAGE 1943 LINE 107355 +define pcodeop vcvttsh2si_avx512fp16 ; +:VCVTTSH2SI Reg32, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0); byte=0x2C; Reg32 ... & XmmReg2_m16 +{ + Reg32 = vcvttsh2si_avx512fp16( XmmReg2_m16 ); +} + +# VCVTTSH2SI 5-119 PAGE 1943 LINE 107358 +@ifdef IA64 +:VCVTTSH2SI Reg64, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W1); byte=0x2C; Reg64 ... & XmmReg2_m16 +{ + Reg64 = vcvttsh2si_avx512fp16( XmmReg2_m16 ); +} +@endif + +# VCVTTSH2USI 5-120 PAGE 1944 LINE 107409 +define pcodeop vcvttsh2usi_avx512fp16 ; +:VCVTTSH2USI Reg32, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0); byte=0x78; Reg32 ... & XmmReg2_m16 +{ + Reg32 = vcvttsh2usi_avx512fp16( XmmReg2_m16 ); +} + +# VCVTTSH2USI 5-120 PAGE 1944 LINE 107412 +@ifdef IA64 +:VCVTTSH2USI Reg64, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W1); byte=0x78; Reg64 ... & XmmReg2_m16 +{ + Reg64 = vcvttsh2usi_avx512fp16( XmmReg2_m16 ); +} +@endif + +# VCVTUDQ2PH 5-124 PAGE 1948 LINE 107633 +define pcodeop vcvtudq2ph_avx512fp16 ; +:VCVTUDQ2PH XmmReg1^XmmOpMask16, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x7A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtudq2ph_avx512fp16( XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult[0,64]); +} + +# VCVTUDQ2PH 5-124 PAGE 1948 LINE 107637 +:VCVTUDQ2PH XmmReg1^XmmOpMask16, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x7A; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtudq2ph_avx512fp16( YmmReg2_m256_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VCVTUDQ2PH 5-124 PAGE 1948 LINE 107641 +:VCVTUDQ2PH YmmReg1^YmmOpMask16, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x7A; (YmmReg1 & ZmmReg1) ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vcvtudq2ph_avx512fp16( ZmmReg2_m512_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VCVTUQQ2PH 5-130 PAGE 1954 LINE 107951 +define pcodeop vcvtuqq2ph_avx512fp16 ; +:VCVTUQQ2PH XmmReg1^XmmOpMask16, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x7A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtuqq2ph_avx512fp16( XmmReg2_m128_m64bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult[0,32]); +} + +# VCVTUQQ2PH 5-130 PAGE 1954 LINE 107955 +:VCVTUQQ2PH XmmReg1^XmmOpMask16, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x7A; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtuqq2ph_avx512fp16( YmmReg2_m256_m64bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult[0,64]); +} + +# VCVTUQQ2PH 5-130 PAGE 1954 LINE 107959 +:VCVTUQQ2PH XmmReg1^XmmOpMask16, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x7A; (XmmReg1 & ZmmReg1) ... & ZmmReg2_m512_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtuqq2ph_avx512fp16( ZmmReg2_m512_m64bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VCVTUSI2SH 5-132 PAGE 1956 LINE 108039 +define pcodeop vcvtusi2sh_avx512fp16 ; +:VCVTUSI2SH XmmReg1, evexV5_XmmReg, rm32 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & evexV5_XmmReg; byte=0x7B; (XmmReg1 & ZmmReg1) ... & rm32 +{ + local tmp:16 = vcvtusi2sh_avx512fp16( evexV5_XmmReg, rm32 ); + ZmmReg1 = zext(tmp); +} + +# VCVTUSI2SH 5-132 PAGE 1956 LINE 108043 +@ifdef IA64 +:VCVTUSI2SH XmmReg1, evexV5_XmmReg, rm64 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W1) & evexV5_XmmReg; byte=0x7B; (XmmReg1 & ZmmReg1) ... & rm64 +{ + local tmp:16 = vcvtusi2sh_avx512fp16( evexV5_XmmReg, rm64 ); + ZmmReg1 = zext(tmp); +} +@endif + +# VCVTUW2PH 5-140 PAGE 1964 LINE 108377 +define pcodeop vcvtuw2ph_avx512fp16 ; +:VCVTUW2PH XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x7D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtuw2ph_avx512fp16( XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VCVTUW2PH 5-140 PAGE 1964 LINE 108380 +:VCVTUW2PH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x7D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vcvtuw2ph_avx512fp16( YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VCVTUW2PH 5-140 PAGE 1964 LINE 108383 +:VCVTUW2PH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16; byte=0x7D; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vcvtuw2ph_avx512fp16( ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VCVTW2PH 5-142 PAGE 1966 LINE 108464 +define pcodeop vcvtw2ph_avx512fp16 ; +:VCVTW2PH XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x7D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vcvtw2ph_avx512fp16( XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VCVTW2PH 5-142 PAGE 1966 LINE 108467 +:VCVTW2PH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x7D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vcvtw2ph_avx512fp16( YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VCVTW2PH 5-142 PAGE 1966 LINE 108470 +:VCVTW2PH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16; byte=0x7D; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vcvtw2ph_avx512fp16( ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VDIVPH 5-147 PAGE 1971 LINE 108747 +define pcodeop vdivph_avx512fp16 ; +:VDIVPH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vdivph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VDIVPH 5-147 PAGE 1971 LINE 108750 +:VDIVPH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x5E; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vdivph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VDIVPH 5-147 PAGE 1971 LINE 108753 +:VDIVPH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x5E; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vdivph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VDIVSH 5-149 PAGE 1973 LINE 108842 +define pcodeop vdivsh_avx512fp16 ; +:VDIVSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vdivsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = evexV5_XmmReg[16,112]; #DEST[127:16] := SRC1[127:16] + ZmmReg1 = zext(XmmResult); +} + +# VDPBF16PS 5-150 PAGE 1974 LINE 108901 +define pcodeop vdpbf16ps_avx512vl ; +:VDPBF16PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x52; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vdpbf16ps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VDPBF16PS 5-150 PAGE 1974 LINE 108905 +:VDPBF16PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x52; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vdpbf16ps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VDPBF16PS 5-150 PAGE 1974 LINE 108909 +define pcodeop vdpbf16ps_avx512f ; +:VDPBF16PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x52; ZmmReg1 ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vdpbf16ps_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VFCMADDCPH/VFMADDCPH 5-170 PAGE 1994 LINE 109930 +define pcodeop vfcmaddcph_avx512fp16 ; +:VFCMADDCPH XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x56; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfcmaddcph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VFCMADDCPH/VFMADDCPH 5-170 PAGE 1994 LINE 109934 +:VFCMADDCPH YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x56; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfcmaddcph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VFCMADDCPH/VFMADDCPH 5-170 PAGE 1994 LINE 109938 +:VFCMADDCPH ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x56; ZmmReg1 ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfcmaddcph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VFCMADDCPH/VFMADDCPH 5-170 PAGE 1994 LINE 109942 +define pcodeop vfmaddcph_avx512fp16 ; +:VFMADDCPH XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x56; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfmaddcph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VFCMADDCPH/VFMADDCPH 5-170 PAGE 1994 LINE 109946 +:VFMADDCPH YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x56; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfmaddcph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VFCMADDCPH/VFMADDCPH 5-170 PAGE 1994 LINE 109950 +:VFMADDCPH ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x56; ZmmReg1 ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfmaddcph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VFCMADDCSH/VFMADDCSH 5-173 PAGE 1997 LINE 110095 +define pcodeop vfcmaddcsh_avx512fp16 ; +:VFCMADDCSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F2) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 +{ + XmmResult = vfcmaddcsh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,32], XmmOpMask[0,1], XmmResult[0,32], XmmMask[0,32]); + XmmResult[32,96] = evexV5_XmmReg[32,96]; # DEST[127:32] := src1[127:32] // copy upper part of src1 + ZmmReg1 = zext(XmmResult); +} + +# VFCMADDCSH/VFMADDCSH 5-173 PAGE 1997 LINE 110100 +define pcodeop vfmaddcsh_avx512fp16 ; +:VFMADDCSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 +{ + XmmResult = vfmaddcsh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,32], XmmOpMask[0,1], XmmResult[0,32], XmmMask[0,32]); + XmmResult[32,96] = evexV5_XmmReg[32,96]; # DEST[127:32] := src1[127:32] // copy upper part of src1 + ZmmReg1 = zext(XmmResult); +} + +# VFCMULCPH/VFMULCPH 5-175 PAGE 1999 LINE 110198 +define pcodeop vfcmulcph_avx512fp16 ; +:VFCMULCPH XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0xD6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfcmulcph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VFCMULCPH/VFMULCPH 5-175 PAGE 1999 LINE 110202 +:VFCMULCPH YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0xD6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfcmulcph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VFCMULCPH/VFMULCPH 5-175 PAGE 1999 LINE 110206 +:VFCMULCPH ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0xD6; ZmmReg1 ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfcmulcph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VFCMULCPH/VFMULCPH 5-175 PAGE 1999 LINE 110210 +define pcodeop vfmulcph_avx512fp16 ; +:VFMULCPH XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0xD6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfmulcph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VFCMULCPH/VFMULCPH 5-175 PAGE 1999 LINE 110213 +:VFMULCPH YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0xD6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfmulcph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VFCMULCPH/VFMULCPH 5-175 PAGE 1999 LINE 110216 +:VFMULCPH ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0xD6; ZmmReg1 ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfmulcph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VFCMULCSH/VFMULCSH 5-178 PAGE 2002 LINE 110374 +define pcodeop vfcmulcsh_avx512fp16 ; +:VFCMULCSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F2) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xD7; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 +{ + XmmResult = vfcmulcsh_avx512fp16( evexV5_XmmReg, XmmReg2_m32 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,32], XmmOpMask[0,1], XmmResult[0,32], XmmMask[0,32]); + XmmResult[32,96] = evexV5_XmmReg[32,96]; # DEST[127:32] := src1[127:32] // copy upper part of src1 + ZmmReg1 = zext(XmmResult); +} + +# VFCMULCSH/VFMULCSH 5-178 PAGE 2002 LINE 110379 +define pcodeop vfmulcsh_avx512fp16 ; +:VFMULCSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xD7; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 +{ + XmmResult = vfmulcsh_avx512fp16( evexV5_XmmReg, XmmReg2_m32 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,32], XmmOpMask[0,1], XmmResult[0,32], XmmMask[0,32]); + XmmResult[32,96] = evexV5_XmmReg[32,96]; # DEST[127:32] := src1[127:32] // copy upper part of src1 + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111596 +define pcodeop vfmadd132ph_avx512fp16 ; +:VFMADD132PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x98; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfmadd132ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111599 +:VFMADD132PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x98; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfmadd132ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111602 +:VFMADD132PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x98; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfmadd132ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111605 +define pcodeop vfmadd213ph_avx512fp16 ; +:VFMADD213PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xA8; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfmadd213ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111608 +:VFMADD213PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xA8; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfmadd213ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111611 +:VFMADD213PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xA8; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfmadd213ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111614 +define pcodeop vfmadd231ph_avx512fp16 ; +:VFMADD231PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xB8; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfmadd231ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111617 +:VFMADD231PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xB8; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfmadd231ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111620 +:VFMADD231PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xB8; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfmadd231ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111623 +define pcodeop vfnmadd132ph_avx512fp16 ; +:VFNMADD132PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x9C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfnmadd132ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111627 +:VFNMADD132PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x9C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfnmadd132ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111631 +:VFNMADD132PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x9C; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfnmadd132ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111635 +define pcodeop vfnmadd213ph_avx512fp16 ; +:VFNMADD213PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xAC; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfnmadd213ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111639 +:VFNMADD213PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xAC; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfnmadd213ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VF[,N]MADD[132,213,231]PH 5-202 PAGE 2026 LINE 111655 +:VFNMADD213PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xAC; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfnmadd213ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VF[,N]MADD[132,213,231]PH 5-202 PAGE 2026 LINE 111659 +define pcodeop vfnmadd231ph_avx512fp16 ; +:VFNMADD231PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xBC; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfnmadd231ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MADD[132,213,231]PH 5-202 PAGE 2026 LINE 111663 +:VFNMADD231PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xBC; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfnmadd231ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VF[,N]MADD[132,213,231]PH 5-202 PAGE 2026 LINE 111667 +:VFNMADD231PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xBC; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfnmadd231ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VF[,N]MADD[132,213,231]SH 5-219 PAGE 2043 LINE 112545 +define pcodeop vfmadd132sh_avx512fp16 ; +:VFMADD132SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x99; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vfmadd132sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MADD[132,213,231]SH 5-219 PAGE 2043 LINE 112548 +define pcodeop vfmadd213sh_avx512fp16 ; +:VFMADD213SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xA9; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vfmadd213sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MADD[132,213,231]SH 5-219 PAGE 2043 LINE 112551 +define pcodeop vfmadd231sh_avx512fp16 ; +:VFMADD231SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xB9; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vfmadd231sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MADD[132,213,231]SH 5-219 PAGE 2043 LINE 112554 +define pcodeop vfnmadd132sh_avx512fp16 ; +:VFNMADD132SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x9D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vfnmadd132sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MADD[132,213,231]SH 5-219 PAGE 2043 LINE 112557 +define pcodeop vfnmadd213sh_avx512fp16 ; +:VFNMADD213SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xAD; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vfnmadd213sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MADD[132,213,231]SH 5-219 PAGE 2043 LINE 112560 +define pcodeop vfnmadd231sh_avx512fp16 ; +:VFNMADD231SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xBD; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vfnmadd231sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} +# VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113276 +define pcodeop vfmaddsub132ph_avx512fp16 ; +:VFMADDSUB132PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x96; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfmaddsub132ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113280 +:VFMADDSUB132PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x96; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfmaddsub132ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113284 +:VFMADDSUB132PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x96; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfmaddsub132ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113288 +define pcodeop vfmaddsub213ph_avx512fp16 ; +:VFMADDSUB213PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xA6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfmaddsub213ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113292 +:VFMADDSUB213PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xA6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfmaddsub213ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113296 +:VFMADDSUB213PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xA6; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfmaddsub213ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113300 +define pcodeop vfmaddsub231ph_avx512fp16 ; +:VFMADDSUB231PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xB6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfmaddsub231ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113304 +:VFMADDSUB231PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xB6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfmaddsub231ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113308 +:VFMADDSUB231PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xB6; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfmaddsub231ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114283 +define pcodeop vfmsub132ph_avx512fp16 ; +:VFMSUB132PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x9A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfmsub132ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114286 +:VFMSUB132PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x9A; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfmsub132ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114289 +:VFMSUB132PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x9A; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfmsub132ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114292 +define pcodeop vfmsub213ph_avx512fp16 ; +:VFMSUB213PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xAA; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfmsub213ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114295 +:VFMSUB213PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xAA; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfmsub213ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114298 +:VFMSUB213PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xAA; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfmsub213ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114301 +define pcodeop vfmsub231ph_avx512fp16 ; +:VFMSUB231PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xBA; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfmsub231ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114304 +:VFMSUB231PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xBA; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfmsub231ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114307 +:VFMSUB231PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xBA; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfmsub231ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114310 +define pcodeop vfnmsub132ph_avx512fp16 ; +:VFNMSUB132PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x9E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfnmsub132ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114314 +:VFNMSUB132PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x9E; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfnmsub132ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114318 +:VFNMSUB132PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x9E; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfnmsub132ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} +# VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114322 +define pcodeop vfnmsub213ph_avx512fp16 ; +:VFNMSUB213PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xAE; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfnmsub213ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114326 +:VFNMSUB213PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xAE; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfnmsub213ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VF[,N]MSUB[132,213,231]PH 5-251 PAGE 2075 LINE 114342 +:VFNMSUB213PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xAE; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfnmsub213ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VF[,N]MSUB[132,213,231]PH 5-251 PAGE 2075 LINE 114346 +define pcodeop vfnmsub231ph_avx512fp16 ; +:VFNMSUB231PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xBE; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfnmsub231ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MSUB[132,213,231]PH 5-251 PAGE 2075 LINE 114350 +:VFNMSUB231PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xBE; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfnmsub231ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VF[,N]MSUB[132,213,231]PH 5-251 PAGE 2075 LINE 114354 +:VFNMSUB231PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xBE; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfnmsub231ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VF[,N]MSUB[132,213,231]SH 5-265 PAGE 2089 LINE 115111 +define pcodeop vfmsub132sh_avx512fp16 ; +:VFMSUB132SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x9B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vfmsub132sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MSUB[132,213,231]SH 5-265 PAGE 2089 LINE 115114 +define pcodeop vfmsub213sh_avx512fp16 ; +:VFMSUB213SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xAB; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vfmsub213sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MSUB[132,213,231]SH 5-265 PAGE 2089 LINE 115117 +define pcodeop vfmsub231sh_avx512fp16 ; +:VFMSUB231SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xBB; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vfmsub231sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MSUB[132,213,231]SH 5-265 PAGE 2089 LINE 115120 +define pcodeop vfnmsub132sh_avx512fp16 ; +:VFNMSUB132SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x9F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vfnmsub132sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MSUB[132,213,231]SH 5-265 PAGE 2089 LINE 115124 +define pcodeop vfnmsub213sh_avx512fp16 ; +:VFNMSUB213SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xAF; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vfnmsub213sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VF[,N]MSUB[132,213,231]SH 5-265 PAGE 2089 LINE 115128 +define pcodeop vfnmsub231sh_avx512fp16 ; +:VFNMSUB231SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xBF; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vfnmsub231sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115851 +define pcodeop vfmsubadd132ph_avx512fp16 ; +:VFMSUBADD132PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x97; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfmsubadd132ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115855 +:VFMSUBADD132PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x97; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfmsubadd132ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115859 +:VFMSUBADD132PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x97; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfmsubadd132ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115863 +define pcodeop vfmsubadd213ph_avx512fp16 ; +:VFMSUBADD213PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xA7; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfmsubadd213ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115867 +:VFMSUBADD213PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xA7; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfmsubadd213ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115871 +:VFMSUBADD213PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xA7; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfmsubadd213ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115875 +define pcodeop vfmsubadd231ph_avx512fp16 ; +:VFMSUBADD231PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xB7; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vfmsubadd231ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115879 +:VFMSUBADD231PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xB7; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vfmsubadd231ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115883 +:VFMSUBADD231PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xB7; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vfmsubadd231ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +ClassNeg: "Negative" is imm8_6=1 & imm8_7=0 {} +ClassNeg: "Negative,SNAN" is imm8_6=1 & imm8_7=1 {} +ClassNeg: "SNAN" is imm8_6=0 & imm8_7=1 {} + +ClassDenorm: "Denormal" is imm8_5=1 & imm8_6_7=0 {} +ClassDenorm: "Denormal,"^ClassNeg is imm8_5=1 & ClassNeg {} +ClassDenorm: ClassNeg is imm8_5=0 & ClassNeg {} + +ClassNegI: "NegINF" is imm8_4=1 & imm8_5_7=0 {} +ClassNegI: "NegINF,"^ClassDenorm is imm8_4=1 & ClassDenorm {} +ClassNegI: ClassDenorm is imm8_4=0 & ClassDenorm {} + +ClassPosI: "PosINF" is imm8_3=1 & imm8_4_7=0 {} +ClassPosI: "PosINF,"^ClassNegI is imm8_3=1 & ClassNegI {} +ClassPosI: ClassNegI is imm8_3=0 & ClassNegI {} + +ClassNegZ: "NegZero" is imm8_2=1 & imm8_3_7=0 {} +ClassNegZ: "NegZero,"^ClassPosI is imm8_2=1 & ClassPosI {} +ClassNegZ: ClassPosI is imm8_2=0 & ClassPosI {} + +ClassPosZ: "PosZero" is imm8_1=1 & imm8_2_7=0 {} +ClassPosZ: "PosZero,"^ClassNegZ is imm8_1=1 & ClassNegZ {} +ClassPosZ: ClassNegZ is imm8_1=0 & ClassNegZ {} + +ClassQNaN: "QNAN" is imm8_0=1 & imm8_1_7=0 {} +ClassQNaN: "QNAN,"^ClassPosZ is imm8_0=1 & ClassPosZ {} +ClassQNaN: ClassPosZ is imm8_0=0 & ClassPosZ {} +ClassQNaN: "" is imm8=0 {} + +ClassOp: "{"^ClassQNaN^"}" is ClassQNaN & imm8 { export *[const]:1 imm8;} +# VFPCLASSPH 5-332 PAGE 2156 LINE 118786 +define pcodeop vfpclassph_avx512fp16 ; +:VFPCLASSPH KReg_reg^XmmOpMask, XmmReg2_m128_m16bcst, ClassOp is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask; byte=0x66; KReg_reg ... & XmmReg2_m128_m16bcst; ClassOp +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + local tmp:8 = vfpclassph_avx512fp16( XmmReg2_m128_m16bcst, XmmOpMask, ClassOp ); + KReg_reg = tmp; +} + +# VFPCLASSPH 5-332 PAGE 2156 LINE 118792 +:VFPCLASSPH KReg_reg^XmmOpMask, YmmReg2_m256_m16bcst, ClassOp is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask; byte=0x66; KReg_reg ... & YmmReg2_m256_m16bcst; ClassOp +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + local tmp:8 = vfpclassph_avx512fp16( YmmReg2_m256_m16bcst, XmmOpMask, ClassOp ); + KReg_reg = tmp; +} + +# VFPCLASSPH 5-332 PAGE 2156 LINE 118798 +:VFPCLASSPH KReg_reg^XmmOpMask, ZmmReg2_m512_m16bcst, ClassOp is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask; byte=0x66; KReg_reg ... & ZmmReg2_m512_m16bcst; ClassOp +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + local tmp:8 = vfpclassph_avx512fp16( ZmmReg2_m512_m16bcst, XmmOpMask, ClassOp ); + KReg_reg = tmp; +} + +# VFPCLASSSH 5-339 PAGE 2163 LINE 119114 +define pcodeop vfpclasssh_avx512fp16 ; +:VFPCLASSSH KReg_reg^XmmOpMask, XmmReg2_m16, ClassOp is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask; byte=0x67; KReg_reg ... & XmmReg2_m16; ClassOp +{ + local tmp:8 = vfpclasssh_avx512fp16( XmmReg2_m16, XmmOpMask, ClassOp ); + KReg_reg = tmp; +} + +# VGETEXPPH 5-359 PAGE 2183 LINE 120137 +define pcodeop vgetexpph_avx512fp16 ; +:VGETEXPPH XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16; byte=0x42; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vgetexpph_avx512fp16( XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VGETEXPPH 5-359 PAGE 2183 LINE 120141 +:VGETEXPPH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16; byte=0x42; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vgetexpph_avx512fp16( YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VGETEXPPH 5-359 PAGE 2183 LINE 120145 +:VGETEXPPH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16; byte=0x42; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vgetexpph_avx512fp16( ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VGETEXPSH 5-368 PAGE 2192 LINE 120571 +define pcodeop vgetexpsh_avx512fp16 ; +:VGETEXPSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x43; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vgetexpsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VGETMANTPH 5-376 PAGE 2200 LINE 120939 +define pcodeop vgetmantph_avx512fp16 ; +:VGETMANTPH XmmReg1^XmmOpMask16,XmmReg2_m128_m16bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask16; byte=0x26; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vgetmantph_avx512fp16( XmmReg2_m128_m16bcst, imm8:1 ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VGETMANTPH 5-376 PAGE 2200 LINE 120943 +:VGETMANTPH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask16; byte=0x26; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vgetmantph_avx512fp16( YmmReg2_m256_m16bcst, imm8:1 ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VGETMANTPH 5-376 PAGE 2200 LINE 120947 +:VGETMANTPH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & ZmmOpMask16; byte=0x26; ZmmReg1 ... & ZmmReg2_m512_m16bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vgetmantph_avx512fp16( ZmmReg2_m512_m16bcst, imm8:1 ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VGETMANTSH 5-385 PAGE 2209 LINE 121406 +define pcodeop vgetmantsh_avx512fp16 ; +:VGETMANTSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16, imm8 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x27; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16; imm8 +{ + XmmResult = vgetmantsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16, imm8:1 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VMAXPH 5-400 PAGE 2224 LINE 122191 +define pcodeop vmaxph_avx512fp16 ; +:VMAXPH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vmaxph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VMAXPH 5-400 PAGE 2224 LINE 122194 +:VMAXPH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x5F; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vmaxph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VMAXPH 5-400 PAGE 2224 LINE 122197 +:VMAXPH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x5F; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vmaxph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VMAXSH 5-402 PAGE 2226 LINE 122291 +define pcodeop vmaxsh_avx512fp16 ; +:VMAXSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vmaxsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VMINPH 5-404 PAGE 2228 LINE 122372 +define pcodeop vminph_avx512fp16 ; +:VMINPH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vminph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VMINPH 5-404 PAGE 2228 LINE 122375 +:VMINPH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x5D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vminph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VMINPH 5-404 PAGE 2228 LINE 122378 +:VMINPH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x5D; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vminph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VMINSH 5-406 PAGE 2230 LINE 122472 +define pcodeop vminsh_avx512fp16 ; +:VMINSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vminsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VMOVSH 5-408 PAGE 2232 LINE 122557 +define pcodeop vmovsh_avx512fp16 ; +:VMOVSH XmmReg1^XmmOpMask, m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask; byte=0x10; (XmmReg1 & ZmmReg1) ... & m16 +{ + local tmp = m16; + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], tmp, XmmMask[0,16]); + XmmResult[16,112] = 0; + ZmmReg1 = zext(XmmResult); +} + +# VMOVSH 5-408 PAGE 2232 LINE 122559 +:VMOVSH m16^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask; byte=0x11; XmmReg1 ... & m16 +{ + local tmp:2 = XmmReg1(0); + local mask = m16; + build XmmOpMask; + conditionalAssign(tmp, XmmOpMask[0,1], tmp, mask); + m16 = tmp; +} + +# VMOVSH 5-408 PAGE 2232 LINE 122561 +# WARNING: duplicate opcode EVEX.LLIG.F3.MAP5.W0 10 /r last seen on 5-408 PAGE 2232 LINE 122557 for "VMOVSH xmm1{k1}{z}, xmm2, xmm3" +:VMOVSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x10; (XmmReg1 & ZmmReg1) & (mod=0x3 & XmmReg2) +{ + local tmp = XmmReg2(0); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], tmp, XmmMask[0,16]); + XmmResult[16,112] = evexV5_XmmReg[16,112]; # DEST[127:16] := SRC1[127:16] + ZmmReg1 = zext(XmmResult); +} + +# VMOVSH 5-408 PAGE 2232 LINE 122564 +# WARNING: duplicate opcode EVEX.LLIG.F3.MAP5.W0 11 /r last seen on 5-408 PAGE 2232 LINE 122559 for "VMOVSH xmm1{k1}{z}, xmm2, xmm3" +:VMOVSH XmmReg2^XmmOpMask, evexV5_XmmReg, XmmReg1 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x11; XmmReg1 & (mod=0x3 & (XmmReg2 & ZmmReg2)) +{ + XmmResult = XmmReg1; + XmmMask = XmmReg2; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = evexV5_XmmReg[16,112]; # DEST[127:16] := SRC1[127:16] + ZmmReg2 = zext(XmmResult); +} + +# VMOVW 5-410 PAGE 2234 LINE 122642 +define pcodeop vmovw_avx512fp16 ; +:VMOVW XmmReg1, rm16 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_WIG); byte=0x6E; (XmmReg1 & ZmmReg1) ... & rm16 +{ + local tmp:2 = rm16 ; + ZmmReg1 = zext(tmp); +} + +# VMOVW 5-410 PAGE 2234 LINE 122644 +:VMOVW rm16, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_WIG); byte=0x7E; XmmReg1 ... & rm16 +{ + rm16 = XmmReg1(0); +} + +# VMULPH 5-411 PAGE 2235 LINE 122691 +define pcodeop vmulph_avx512fp16 ; +:VMULPH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vmulph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VMULPH 5-411 PAGE 2235 LINE 122694 +:VMULPH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x59; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vmulph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VMULPH 5-411 PAGE 2235 LINE 122697 +:VMULPH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x59; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vmulph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VMULSH 5-413 PAGE 2237 LINE 122785 +define pcodeop vmulsh_avx512fp16 ; +:VMULSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vmulsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = evexV5_XmmReg[16,112]; # DEST[127:16] := SRC1[127:16] + ZmmReg1 = zext(XmmResult); +} + +# VP2INTERSECTD/VP2INTERSECTQ 5-414 PAGE 2238 LINE 122845 +define pcodeop vp2intersectd_avx512vl ; +:VP2INTERSECTD KReg_reg, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x68; KReg_reg ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + KReg_reg = vp2intersectd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); +} + +# VP2INTERSECTD/VP2INTERSECTQ 5-414 PAGE 2238 LINE 122849 +:VP2INTERSECTD KReg_reg, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x68; KReg_reg ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + KReg_reg = vp2intersectd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); +} + +# VP2INTERSECTD/VP2INTERSECTQ 5-414 PAGE 2238 LINE 122853 +define pcodeop vp2intersectd_avx512f ; +:VP2INTERSECTD KReg_reg, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x68; KReg_reg ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + KReg_reg = vp2intersectd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); +} + +# VP2INTERSECTD/VP2INTERSECTQ 5-414 PAGE 2238 LINE 122857 +define pcodeop vp2intersectq_avx512vl ; +:VP2INTERSECTQ KReg_reg, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x68; KReg_reg ... & XmmReg2_m128_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + KReg_reg = vp2intersectq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); +} + +# VP2INTERSECTD/VP2INTERSECTQ 5-414 PAGE 2238 LINE 122861 +:VP2INTERSECTQ KReg_reg, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x68; KReg_reg ... & YmmReg2_m256_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + KReg_reg = vp2intersectq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); +} + +# VP2INTERSECTD/VP2INTERSECTQ 5-414 PAGE 2238 LINE 122865 +define pcodeop vp2intersectq_avx512f ; +:VP2INTERSECTQ KReg_reg, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x68; KReg_reg ... & ZmmReg2_m512_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + KReg_reg = vp2intersectq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); +} + +# VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124606 +define pcodeop vpcompressb_avx512_vbmi2 ; +:VPCOMPRESSB m128^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0x63; XmmReg1 ... & m128 +[ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) +{ + XmmResult = vpcompressb_avx512_vbmi2( XmmReg1, XmmOpMask ); + m128 = XmmResult; +} + +# VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124608 +# WARNING: duplicate opcode EVEX.128.66.0F38.W0 63 /r last seen on 5-449 PAGE 2273 LINE 124606 for "VPCOMPRESSB xmm1{k1}{z}, xmm2" +:VPCOMPRESSB XmmReg2^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0x63; XmmReg1 & (mod=0x3 & (XmmReg2 & ZmmReg2)) +{ + XmmResult = vpcompressb_avx512_vbmi2( XmmReg1, XmmOpMask ); + ZmmReg2 = zext(XmmResult); +} + +# VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124610 +:VPCOMPRESSB m256^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask; byte=0x63; YmmReg1 ... & m256 +[ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) +{ + YmmResult = vpcompressb_avx512_vbmi2( YmmReg1, YmmOpMask ); + m256 = YmmResult; +} + +# VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124612 +# WARNING: duplicate opcode EVEX.256.66.0F38.W0 63 /r last seen on 5-449 PAGE 2273 LINE 124610 for "VPCOMPRESSB ymm1{k1}{z}, ymm2" +:VPCOMPRESSB YmmReg2^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask; byte=0x63; YmmReg1 & (mod=0x3 & (YmmReg2 & ZmmReg2)) +{ + YmmResult = vpcompressb_avx512_vbmi2( YmmReg1, YmmOpMask ); + ZmmReg2 = zext(YmmResult); +} + +# VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124614 +:VPCOMPRESSB m512^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask; byte=0x63; ZmmReg1 ... & m512 +[ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) +{ + ZmmResult = vpcompressb_avx512_vbmi2( ZmmReg1, ZmmOpMask ); + m512 = ZmmResult; +} + +# VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124616 +# WARNING: duplicate opcode EVEX.512.66.0F38.W0 63 /r last seen on 5-449 PAGE 2273 LINE 124614 for "VPCOMPRESSB zmm1{k1}{z}, zmm2" +:VPCOMPRESSB ZmmReg2^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask; byte=0x63; ZmmReg1 & (mod=0x3 & ZmmReg2) +{ + ZmmResult = vpcompressb_avx512_vbmi2( ZmmReg1, ZmmOpMask ); + ZmmReg2 = ZmmResult; +} + +# VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124618 +define pcodeop vpcompressw_avx512_vbmi2 ; +:VPCOMPRESSW m128^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask; byte=0x63; XmmReg1 ... & m128 +[ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) +{ + XmmResult = vpcompressw_avx512_vbmi2( XmmReg1, XmmOpMask ); + m128 = XmmResult; +} + +# VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124620 +# WARNING: duplicate opcode EVEX.128.66.0F38.W1 63 /r last seen on 5-449 PAGE 2273 LINE 124618 for "VPCOMPRESSW xmm1{k1}{z}, xmm2" +:VPCOMPRESSW XmmReg2^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask; byte=0x63; XmmReg1 & (mod=0x3 & (XmmReg2 & ZmmReg2)) +{ + XmmResult = vpcompressw_avx512_vbmi2( XmmReg1, XmmOpMask ); + ZmmReg2 = zext(XmmResult); +} + +# VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124622 +:VPCOMPRESSW m256^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask; byte=0x63; YmmReg1 ... & m256 +[ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) +{ + YmmResult = vpcompressw_avx512_vbmi2( YmmReg1, YmmOpMask ); + m256 = YmmResult; +} + +# VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124624 +# WARNING: duplicate opcode EVEX.256.66.0F38.W1 63 /r last seen on 5-449 PAGE 2273 LINE 124622 for "VPCOMPRESSW ymm1{k1}{z}, ymm2" +:VPCOMPRESSW YmmReg2^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask; byte=0x63; YmmReg1 & (mod=0x3 & (YmmReg2 & ZmmReg2)) +{ + YmmResult = vpcompressw_avx512_vbmi2( YmmReg1, YmmOpMask ); + ZmmReg2 = zext(YmmResult); +} + +# VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124626 +:VPCOMPRESSW m512^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask; byte=0x63; ZmmReg1 ... & m512 +[ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) +{ + ZmmResult = vpcompressw_avx512_vbmi2( ZmmReg1, ZmmOpMask ); + m512 = ZmmResult; +} + +# VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124628 +# WARNING: duplicate opcode EVEX.512.66.0F38.W1 63 /r last seen on 5-449 PAGE 2273 LINE 124626 for "VPCOMPRESSW zmm1{k1}{z}, zmm2" +:VPCOMPRESSW ZmmReg2^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask; byte=0x63; ZmmReg1 & (mod=0x3 & ZmmReg2) +{ + ZmmResult = vpcompressw_avx512_vbmi2( ZmmReg1, ZmmOpMask ); + ZmmReg2 = ZmmResult; +} + +# VPDPBUSD 5-459 PAGE 2283 LINE 125092 +define pcodeop vpdpbusd_avx512_vnni ; +:VPDPBUSD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x50; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpdpbusd_avx512_vnni( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VPDPBUSD 5-459 PAGE 2283 LINE 125097 +:VPDPBUSD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x50; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpdpbusd_avx512_vnni( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VPDPBUSD 5-459 PAGE 2283 LINE 125102 +:VPDPBUSD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x50; ZmmReg1 ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpdpbusd_avx512_vnni( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VPDPBUSDS 5-461 PAGE 2285 LINE 125211 +define pcodeop vpdpbusds_avx512_vnni ; +:VPDPBUSDS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpdpbusds_avx512_vnni( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VPDPBUSDS 5-461 PAGE 2285 LINE 125217 +:VPDPBUSDS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x51; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpdpbusds_avx512_vnni( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VPDPBUSDS 5-461 PAGE 2285 LINE 125223 +:VPDPBUSDS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x51; ZmmReg1 ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpdpbusds_avx512_vnni( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VPDPWSSD 5-463 PAGE 2287 LINE 125329 +define pcodeop vpdpwssd_avx512_vnni ; +:VPDPWSSD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x52; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpdpwssd_avx512_vnni( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VPDPWSSD 5-463 PAGE 2287 LINE 125334 +:VPDPWSSD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x52; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpdpwssd_avx512_vnni( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VPDPWSSD 5-463 PAGE 2287 LINE 125339 +:VPDPWSSD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x52; ZmmReg1 ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpdpwssd_avx512_vnni( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VPDPWSSDS 5-465 PAGE 2289 LINE 125436 +define pcodeop vpdpwssds_avx512_vnni ; +:VPDPWSSDS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x53; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpdpwssds_avx512_vnni( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VPDPWSSDS 5-465 PAGE 2289 LINE 125442 +:VPDPWSSDS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x53; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpdpwssds_avx512_vnni( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VPDPWSSDS 5-465 PAGE 2289 LINE 125448 +:VPDPWSSDS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x53; ZmmReg1 ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpdpwssds_avx512_vnni( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VPERMB 5-471 PAGE 2295 LINE 125727 +define pcodeop vpermb_avx512vl ; +:VPERMB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8 & evexV5_XmmReg; byte=0x8D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + XmmResult = vpermb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); + XmmMask = XmmReg1; + build XmmOpMask8; + ZmmReg1 = zext(XmmResult); +} + +# VPERMB 5-471 PAGE 2295 LINE 125730 +:VPERMB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask8 & evexV5_YmmReg; byte=0x8D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + YmmResult = vpermb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); + YmmMask = YmmReg1; + build YmmOpMask8; + ZmmReg1 = zext(YmmResult); +} + +# VPERMB 5-471 PAGE 2295 LINE 125733 +define pcodeop vpermb_avx512_vbmi ; +:VPERMB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask8 & evexV5_ZmmReg; byte=0x8D; ZmmReg1 ... & ZmmReg2_m512 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + ZmmResult = vpermb_avx512_vbmi( evexV5_ZmmReg, ZmmReg2_m512 ); + ZmmMask = ZmmReg1; + build ZmmOpMask8; + ZmmReg1 = ZmmResult; +} + +# VPERMI2B 5-476 PAGE 2300 LINE 125958 +define pcodeop vpermi2b_avx512vl ; +:VPERMI2B XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8 & evexV5_XmmReg; byte=0x75; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + XmmResult = vpermi2b_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128 ); + XmmMask = XmmReg1; + build XmmOpMask8; + ZmmReg1 = zext(XmmResult); +} + +# VPERMI2B 5-476 PAGE 2300 LINE 125961 +:VPERMI2B YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask8 & evexV5_YmmReg; byte=0x75; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + YmmResult = vpermi2b_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256 ); + YmmMask = YmmReg1; + build YmmOpMask8; + ZmmReg1 = zext(YmmResult); +} + +# VPERMI2B 5-476 PAGE 2300 LINE 125964 +define pcodeop vpermi2b_avx512_vbmi ; +:VPERMI2B ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask8 & evexV5_ZmmReg; byte=0x75; ZmmReg1 ... & ZmmReg2_m512 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + ZmmResult = vpermi2b_avx512_vbmi( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 ); + ZmmMask = ZmmReg1; + build ZmmOpMask8; + ZmmReg1 = ZmmResult; +} + +# VPERMT2B 5-503 PAGE 2327 LINE 127434 +define pcodeop vpermt2b_avx512vl ; +:VPERMT2B XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8 & evexV5_XmmReg; byte=0x7D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + XmmResult = vpermt2b_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128 ); + XmmMask = XmmReg1; + build XmmOpMask8; + ZmmReg1 = zext(XmmResult); +} + +# VPERMT2B 5-503 PAGE 2327 LINE 127437 +:VPERMT2B YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask8 & evexV5_YmmReg; byte=0x7D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + YmmResult = vpermt2b_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256 ); + YmmMask = YmmReg1; + build YmmOpMask8; + ZmmReg1 = zext(YmmResult); +} + +# VPERMT2B 5-503 PAGE 2327 LINE 127440 +define pcodeop vpermt2b_avx512_vbmi ; +:VPERMT2B ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask8 & evexV5_ZmmReg; byte=0x7D; ZmmReg1 ... & ZmmReg2_m512 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + ZmmResult = vpermt2b_avx512_vbmi( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 ); + ZmmMask = ZmmReg1; + build ZmmOpMask8; + ZmmReg1 = ZmmResult; +} + +# VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127524 +define pcodeop vpermt2w_avx512vl ; +:VPERMT2W XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask16 & evexV5_XmmReg; byte=0x7D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + XmmResult = vpermt2w_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128 ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127527 +:VPERMT2W YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask16 & evexV5_YmmReg; byte=0x7D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + YmmResult = vpermt2w_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256 ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127530 +define pcodeop vpermt2w_avx512bw ; +:VPERMT2W ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x7D; ZmmReg1 ... & ZmmReg2_m512 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + ZmmResult = vpermt2w_avx512bw( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127533 +define pcodeop vpermt2d_avx512vl ; +:VPERMT2D XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x7E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpermt2d_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127536 +:VPERMT2D YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x7E; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpermt2d_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127539 +define pcodeop vpermt2d_avx512f ; +:VPERMT2D ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x7E; ZmmReg1 ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpermt2d_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127542 +define pcodeop vpermt2q_avx512vl ; +:VPERMT2Q XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask64 & evexV5_XmmReg; byte=0x7E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpermt2q_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); + XmmMask = XmmReg1; + build XmmOpMask64; + ZmmReg1 = zext(XmmResult); +} + +# VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127545 +:VPERMT2Q YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask64 & evexV5_YmmReg; byte=0x7E; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpermt2q_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); + YmmMask = YmmReg1; + build YmmOpMask64; + ZmmReg1 = zext(YmmResult); +} + +# VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127548 +define pcodeop vpermt2q_avx512f ; +:VPERMT2Q ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask64 & evexV5_ZmmReg; byte=0x7E; ZmmReg1 ... & ZmmReg2_m512_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpermt2q_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask64; + ZmmReg1 = ZmmResult; +} + +# VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127551 +define pcodeop vpermt2ps_avx512vl ; +:VPERMT2PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x7F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpermt2ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127554 +:VPERMT2PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x7F; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpermt2ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127557 +define pcodeop vpermt2ps_avx512f ; +:VPERMT2PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x7F; ZmmReg1 ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpermt2ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VPEXPANDB/VPEXPANDW 5-510 PAGE 2334 LINE 127806 +define pcodeop vpexpandb_avx512_vbmi2 ; +:VPEXPANDB XmmReg1^XmmOpMask, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0x62; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +[ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) +{ + XmmResult = vpexpandb_avx512_vbmi2( XmmReg2_m128, XmmOpMask ); + ZmmReg1 = zext(XmmResult); +} + +# VPEXPANDB/VPEXPANDW 5-510 PAGE 2334 LINE 127810 +:VPEXPANDB YmmReg1^YmmOpMask, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask; byte=0x62; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 +[ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) +{ + YmmResult = vpexpandb_avx512_vbmi2( YmmReg2_m256, YmmOpMask ); + ZmmReg1 = zext(YmmResult); +} + +# VPEXPANDB/VPEXPANDW 5-510 PAGE 2334 LINE 127814 +:VPEXPANDB ZmmReg1^ZmmOpMask, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask; byte=0x62; ZmmReg1 ... & ZmmReg2_m512 +[ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) +{ + ZmmResult = vpexpandb_avx512_vbmi2( ZmmReg2_m512, ZmmOpMask ); + ZmmReg1 = ZmmResult; +} + +# VPEXPANDB/VPEXPANDW 5-510 PAGE 2334 LINE 127818 +define pcodeop vpexpandw_avx512_vbmi2 ; +:VPEXPANDW XmmReg1^XmmOpMask, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask; byte=0x62; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +[ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) +{ + XmmResult = vpexpandw_avx512_vbmi2( XmmReg2_m128, XmmOpMask ); + ZmmReg1 = zext(XmmResult); +} + +# VPEXPANDB/VPEXPANDW 5-510 PAGE 2334 LINE 127822 +:VPEXPANDW YmmReg1^YmmOpMask, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask; byte=0x62; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 +[ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) +{ + YmmResult = vpexpandw_avx512_vbmi2( YmmReg2_m256, YmmOpMask ); + ZmmReg1 = zext(YmmResult); +} + +# VPEXPANDB/VPEXPANDW 5-510 PAGE 2334 LINE 127826 +:VPEXPANDW ZmmReg1^ZmmOpMask, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask; byte=0x62; ZmmReg1 ... & ZmmReg2_m512 +[ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) +{ + ZmmResult = vpexpandw_avx512_vbmi2( ZmmReg2_m512, ZmmOpMask ); + ZmmReg1 = ZmmResult; +} + +# VPMADD52HUQ 5-534 PAGE 2358 LINE 128946 +define pcodeop vpmadd52huq_avx512_ifma ; +:VPMADD52HUQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask64 & evexV5_XmmReg; byte=0xB5; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpmadd52huq_avx512_ifma( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); + XmmMask = XmmReg1; + build XmmOpMask64; + ZmmReg1 = zext(XmmResult); +} + +# VPMADD52HUQ 5-534 PAGE 2358 LINE 128950 +:VPMADD52HUQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask64 & evexV5_YmmReg; byte=0xB5; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpmadd52huq_avx512_ifma( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); + YmmMask = YmmReg1; + build YmmOpMask64; + ZmmReg1 = zext(YmmResult); +} + +# VPMADD52HUQ 5-534 PAGE 2358 LINE 128954 +:VPMADD52HUQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask64 & evexV5_ZmmReg; byte=0xB5; ZmmReg1 ... & ZmmReg2_m512_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpmadd52huq_avx512_ifma( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask64; + ZmmReg1 = ZmmResult; +} + +# VPMADD52LUQ 5-536 PAGE 2360 LINE 129044 +define pcodeop vpmadd52luq_avx512_ifma ; +:VPMADD52LUQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask64 & evexV5_XmmReg; byte=0xB4; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpmadd52luq_avx512_ifma( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); + XmmMask = XmmReg1; + build XmmOpMask64; + ZmmReg1 = zext(XmmResult); +} + +# VPMADD52LUQ 5-536 PAGE 2360 LINE 129048 +:VPMADD52LUQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask64 & evexV5_YmmReg; byte=0xB4; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpmadd52luq_avx512_ifma( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); + YmmMask = YmmReg1; + build YmmOpMask64; + ZmmReg1 = zext(YmmResult); +} + +# VPMADD52LUQ 5-536 PAGE 2360 LINE 129052 +:VPMADD52LUQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask64 & evexV5_ZmmReg; byte=0xB4; ZmmReg1 ... & ZmmReg2_m512_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpmadd52luq_avx512_ifma( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask64; + ZmmReg1 = ZmmResult; +} + +# VPMULTISHIFTQB 5-571 PAGE 2395 LINE 130845 +define pcodeop vpmultishiftqb_avx512_vbmi ; +:VPMULTISHIFTQB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask8 & evexV5_XmmReg; byte=0x83; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpmultishiftqb_avx512_vbmi( evexV5_XmmReg, XmmReg2_m128_m64bcst ); + XmmMask = XmmReg1; + build XmmOpMask8; + ZmmReg1 = zext(XmmResult); +} + +# VPMULTISHIFTQB 5-571 PAGE 2395 LINE 130849 +:VPMULTISHIFTQB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask8 & evexV5_YmmReg; byte=0x83; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpmultishiftqb_avx512_vbmi( evexV5_YmmReg, YmmReg2_m256_m64bcst ); + YmmMask = YmmReg1; + build YmmOpMask8; + ZmmReg1 = zext(YmmResult); +} + +# VPMULTISHIFTQB 5-571 PAGE 2395 LINE 130853 +:VPMULTISHIFTQB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask8 & evexV5_ZmmReg; byte=0x83; ZmmReg1 ... & ZmmReg2_m512_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpmultishiftqb_avx512_vbmi( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask8; + ZmmReg1 = ZmmResult; +} + +# VPOPCNT 5-573 PAGE 2397 LINE 130938 +define pcodeop vpopcntb_avx512_bitalg ; +:VPOPCNTB XmmReg1^XmmOpMask8, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x54; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + XmmResult = vpopcntb_avx512_bitalg( XmmReg2_m128 ); + XmmMask = XmmReg1; + build XmmOpMask8; + ZmmReg1 = zext(XmmResult); +} + +# VPOPCNT 5-573 PAGE 2397 LINE 130941 +:VPOPCNTB YmmReg1^YmmOpMask8, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask8; byte=0x54; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + YmmResult = vpopcntb_avx512_bitalg( YmmReg2_m256 ); + YmmMask = YmmReg1; + build YmmOpMask8; + ZmmReg1 = zext(YmmResult); +} + +# VPOPCNT 5-573 PAGE 2397 LINE 130944 +:VPOPCNTB ZmmReg1^ZmmOpMask8, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask8; byte=0x54; ZmmReg1 ... & ZmmReg2_m512 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + ZmmResult = vpopcntb_avx512_bitalg( ZmmReg2_m512 ); + ZmmMask = ZmmReg1; + build ZmmOpMask8; + ZmmReg1 = ZmmResult; +} + +# VPOPCNT 5-573 PAGE 2397 LINE 130947 +define pcodeop vpopcntw_avx512_bitalg ; +:VPOPCNTW XmmReg1^XmmOpMask16, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask16; byte=0x54; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + XmmResult = vpopcntw_avx512_bitalg( XmmReg2_m128 ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VPOPCNT 5-573 PAGE 2397 LINE 130950 +:VPOPCNTW YmmReg1^YmmOpMask16, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask16; byte=0x54; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + YmmResult = vpopcntw_avx512_bitalg( YmmReg2_m256 ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VPOPCNT 5-573 PAGE 2397 LINE 130953 +:VPOPCNTW ZmmReg1^ZmmOpMask16, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask16; byte=0x54; ZmmReg1 ... & ZmmReg2_m512 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + ZmmResult = vpopcntw_avx512_bitalg( ZmmReg2_m512 ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VPOPCNT 5-573 PAGE 2397 LINE 130956 +define pcodeop vpopcntd_avx512_vpopcntdq ; +:VPOPCNTD XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32; byte=0x55; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpopcntd_avx512_vpopcntdq( XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VPOPCNT 5-573 PAGE 2397 LINE 130959 +:VPOPCNTD YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32; byte=0x55; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpopcntd_avx512_vpopcntdq( YmmReg2_m256_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VPOPCNT 5-573 PAGE 2397 LINE 130962 +:VPOPCNTD ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32; byte=0x55; ZmmReg1 ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpopcntd_avx512_vpopcntdq( ZmmReg2_m512_m32bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VPOPCNT 5-573 PAGE 2397 LINE 130965 +define pcodeop vpopcntq_avx512_vpopcntdq ; +:VPOPCNTQ XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask64; byte=0x55; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpopcntq_avx512_vpopcntdq( XmmReg2_m128_m64bcst ); + XmmMask = XmmReg1; + build XmmOpMask64; + ZmmReg1 = zext(XmmResult); +} + +# VPOPCNT 5-573 PAGE 2397 LINE 130968 +:VPOPCNTQ YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask64; byte=0x55; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpopcntq_avx512_vpopcntdq( YmmReg2_m256_m64bcst ); + YmmMask = YmmReg1; + build YmmOpMask64; + ZmmReg1 = zext(YmmResult); +} + +# VPOPCNT 5-573 PAGE 2397 LINE 130971 +:VPOPCNTQ ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask64; byte=0x55; ZmmReg1 ... & ZmmReg2_m512_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpopcntq_avx512_vpopcntdq( ZmmReg2_m512_m64bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask64; + ZmmReg1 = ZmmResult; +} + +# VPSHLD 5-588 PAGE 2412 LINE 131746 +define pcodeop vpshldw_avx512_vbmi2 ; +:VPSHLDW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask16 & evexV5_XmmReg; byte=0x70; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + XmmResult = vpshldw_avx512_vbmi2( evexV5_XmmReg, XmmReg2_m128, imm8:1 ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VPSHLD 5-588 PAGE 2412 LINE 131749 +:VPSHLDW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & YmmOpMask16 & evexV5_YmmReg; byte=0x70; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + YmmResult = vpshldw_avx512_vbmi2( evexV5_YmmReg, YmmReg2_m256, imm8:1 ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VPSHLD 5-588 PAGE 2412 LINE 131752 +:VPSHLDW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x70; ZmmReg1 ... & ZmmReg2_m512; imm8 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + ZmmResult = vpshldw_avx512_vbmi2( evexV5_ZmmReg, ZmmReg2_m512, imm8:1 ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VPSHLD 5-588 PAGE 2412 LINE 131755 +define pcodeop vpshldd_avx512_vbmi2 ; +:VPSHLDD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x71; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpshldd_avx512_vbmi2( evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VPSHLD 5-588 PAGE 2412 LINE 131758 +:VPSHLDD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x71; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpshldd_avx512_vbmi2( evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VPSHLD 5-588 PAGE 2412 LINE 131761 +:VPSHLDD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x71; ZmmReg1 ... & ZmmReg2_m512_m32bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpshldd_avx512_vbmi2( evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8:1 ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VPSHLD 5-588 PAGE 2412 LINE 131764 +define pcodeop vpshldq_avx512_vbmi2 ; +:VPSHLDQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask64 & evexV5_XmmReg; byte=0x71; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpshldq_avx512_vbmi2( evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); + XmmMask = XmmReg1; + build XmmOpMask64; + ZmmReg1 = zext(XmmResult); +} + +# VPSHLD 5-588 PAGE 2412 LINE 131767 +:VPSHLDQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & YmmOpMask64 & evexV5_YmmReg; byte=0x71; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpshldq_avx512_vbmi2( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); + YmmMask = YmmReg1; + build YmmOpMask64; + ZmmReg1 = zext(YmmResult); +} + +# VPSHLD 5-588 PAGE 2412 LINE 131770 +:VPSHLDQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & ZmmOpMask64 & evexV5_ZmmReg; byte=0x71; ZmmReg1 ... & ZmmReg2_m512_m64bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpshldq_avx512_vbmi2( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 ); + ZmmMask = ZmmReg1; + build ZmmOpMask64; + ZmmReg1 = ZmmResult; +} + +# VPSHLDV 5-591 PAGE 2415 LINE 131888 +define pcodeop vpshldvw_avx512_vbmi2 ; +:VPSHLDVW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask16 & evexV5_XmmReg; byte=0x70; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + XmmResult = vpshldvw_avx512_vbmi2( XmmReg1, evexV5_XmmReg, XmmReg2_m128 ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VPSHLDV 5-591 PAGE 2415 LINE 131891 +:VPSHLDVW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask16 & evexV5_YmmReg; byte=0x70; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + YmmResult = vpshldvw_avx512_vbmi2( YmmReg1, evexV5_YmmReg, YmmReg2_m256 ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VPSHLDV 5-591 PAGE 2415 LINE 131894 +:VPSHLDVW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x70; ZmmReg1 ... & ZmmReg2_m512 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + ZmmResult = vpshldvw_avx512_vbmi2( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VPSHLDV 5-591 PAGE 2415 LINE 131897 +define pcodeop vpshldvd_avx512_vbmi2 ; +:VPSHLDVD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x71; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpshldvd_avx512_vbmi2( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VPSHLDV 5-591 PAGE 2415 LINE 131900 +:VPSHLDVD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x71; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpshldvd_avx512_vbmi2( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VPSHLDV 5-591 PAGE 2415 LINE 131903 +:VPSHLDVD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x71; ZmmReg1 ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpshldvd_avx512_vbmi2( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VPSHLDV 5-591 PAGE 2415 LINE 131906 +define pcodeop vpshldvq_avx512_vbmi2 ; +:VPSHLDVQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask64 & evexV5_XmmReg; byte=0x71; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpshldvq_avx512_vbmi2( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); + XmmMask = XmmReg1; + build XmmOpMask64; + ZmmReg1 = zext(XmmResult); +} + +# VPSHLDV 5-591 PAGE 2415 LINE 131909 +:VPSHLDVQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask64 & evexV5_YmmReg; byte=0x71; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpshldvq_avx512_vbmi2( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); + YmmMask = YmmReg1; + build YmmOpMask64; + ZmmReg1 = zext(YmmResult); +} + +# VPSHLDV 5-591 PAGE 2415 LINE 131912 +:VPSHLDVQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask64 & evexV5_ZmmReg; byte=0x71; ZmmReg1 ... & ZmmReg2_m512_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpshldvq_avx512_vbmi2( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask64; + ZmmReg1 = ZmmResult; +} + +# VPSHRD 5-594 PAGE 2418 LINE 132044 +define pcodeop vpshrdw_avx512_vbmi2 ; +:VPSHRDW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask16 & evexV5_XmmReg; byte=0x72; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + XmmResult = vpshrdw_avx512_vbmi2( evexV5_XmmReg, XmmReg2_m128, imm8:1 ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VPSHRD 5-594 PAGE 2418 LINE 132047 +:VPSHRDW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & YmmOpMask16 & evexV5_YmmReg; byte=0x72; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + YmmResult = vpshrdw_avx512_vbmi2( evexV5_YmmReg, YmmReg2_m256, imm8:1 ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VPSHRD 5-594 PAGE 2418 LINE 132050 +:VPSHRDW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x72; ZmmReg1 ... & ZmmReg2_m512; imm8 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + ZmmResult = vpshrdw_avx512_vbmi2( evexV5_ZmmReg, ZmmReg2_m512, imm8:1 ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VPSHRD 5-594 PAGE 2418 LINE 132053 +define pcodeop vpshrdd_avx512_vbmi2 ; +:VPSHRDD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x73; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpshrdd_avx512_vbmi2( evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VPSHRD 5-594 PAGE 2418 LINE 132056 +:VPSHRDD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x73; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpshrdd_avx512_vbmi2( evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VPSHRD 5-594 PAGE 2418 LINE 132059 +:VPSHRDD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x73; ZmmReg1 ... & ZmmReg2_m512_m32bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpshrdd_avx512_vbmi2( evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8:1 ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VPSHRD 5-594 PAGE 2418 LINE 132062 +define pcodeop vpshrdq_avx512_vbmi2 ; +:VPSHRDQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask64 & evexV5_XmmReg; byte=0x73; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpshrdq_avx512_vbmi2( evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); + XmmMask = XmmReg1; + build XmmOpMask64; + ZmmReg1 = zext(XmmResult); +} + +# VPSHRD 5-594 PAGE 2418 LINE 132065 +:VPSHRDQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & YmmOpMask64 & evexV5_YmmReg; byte=0x73; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpshrdq_avx512_vbmi2( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); + YmmMask = YmmReg1; + build YmmOpMask64; + ZmmReg1 = zext(YmmResult); +} + +# VPSHRD 5-594 PAGE 2418 LINE 132068 +:VPSHRDQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & ZmmOpMask64 & evexV5_ZmmReg; byte=0x73; ZmmReg1 ... & ZmmReg2_m512_m64bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpshrdq_avx512_vbmi2( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 ); + ZmmMask = ZmmReg1; + build ZmmOpMask64; + ZmmReg1 = ZmmResult; +} + +# VPSHRDV 5-597 PAGE 2421 LINE 132183 +define pcodeop vpshrdvw_avx512_vbmi2 ; +:VPSHRDVW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask16 & evexV5_XmmReg; byte=0x72; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + XmmResult = vpshrdvw_avx512_vbmi2( XmmReg1, evexV5_XmmReg, XmmReg2_m128 ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VPSHRDV 5-597 PAGE 2421 LINE 132186 +:VPSHRDVW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask16 & evexV5_YmmReg; byte=0x72; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + YmmResult = vpshrdvw_avx512_vbmi2( YmmReg1, evexV5_YmmReg, YmmReg2_m256 ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VPSHRDV 5-597 PAGE 2421 LINE 132189 +:VPSHRDVW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x72; ZmmReg1 ... & ZmmReg2_m512 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + ZmmResult = vpshrdvw_avx512_vbmi2( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VPSHRDV 5-597 PAGE 2421 LINE 132192 +define pcodeop vpshrdvd_avx512_vbmi2 ; +:VPSHRDVD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x73; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpshrdvd_avx512_vbmi2( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); + XmmMask = XmmReg1; + build XmmOpMask32; + ZmmReg1 = zext(XmmResult); +} + +# VPSHRDV 5-597 PAGE 2421 LINE 132195 +:VPSHRDVD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x73; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpshrdvd_avx512_vbmi2( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); + YmmMask = YmmReg1; + build YmmOpMask32; + ZmmReg1 = zext(YmmResult); +} + +# VPSHRDV 5-597 PAGE 2421 LINE 132198 +:VPSHRDVD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x73; ZmmReg1 ... & ZmmReg2_m512_m32bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpshrdvd_avx512_vbmi2( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask32; + ZmmReg1 = ZmmResult; +} + +# VPSHRDV 5-597 PAGE 2421 LINE 132201 +define pcodeop vpshrdvq_avx512_vbmi2 ; +:VPSHRDVQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask64 & evexV5_XmmReg; byte=0x73; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vpshrdvq_avx512_vbmi2( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); + XmmMask = XmmReg1; + build XmmOpMask64; + ZmmReg1 = zext(XmmResult); +} + +# VPSHRDV 5-597 PAGE 2421 LINE 132204 +:VPSHRDVQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask64 & evexV5_YmmReg; byte=0x73; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vpshrdvq_avx512_vbmi2( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); + YmmMask = YmmReg1; + build YmmOpMask64; + ZmmReg1 = zext(YmmResult); +} + +# VPSHRDV 5-597 PAGE 2421 LINE 132207 +:VPSHRDVQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask64 & evexV5_ZmmReg; byte=0x73; ZmmReg1 ... & ZmmReg2_m512_m64bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vpshrdvq_avx512_vbmi2( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask64; + ZmmReg1 = ZmmResult; +} + +# VPSHUFBITQMB 5-600 PAGE 2424 LINE 132322 +define pcodeop vpshufbitqmb_avx512_bitalg ; +:VPSHUFBITQMB KReg_reg^XmmOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x8F; KReg_reg ... & XmmReg2_m128 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + local tmp:2 = vpshufbitqmb_avx512_bitalg( evexV5_XmmReg, XmmReg2_m128, XmmOpMask ); + KReg_reg = zext(tmp); +} + +# VPSHUFBITQMB 5-600 PAGE 2424 LINE 132325 +:VPSHUFBITQMB KReg_reg^YmmOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask & evexV5_YmmReg; byte=0x8F; KReg_reg ... & YmmReg2_m256 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + local tmp:4 = vpshufbitqmb_avx512_bitalg( evexV5_YmmReg, YmmReg2_m256, YmmOpMask ); + KReg_reg = zext(tmp); +} + +# VPSHUFBITQMB 5-600 PAGE 2424 LINE 132328 +:VPSHUFBITQMB KReg_reg^ZmmOpMask, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask & evexV5_ZmmReg; byte=0x8F; KReg_reg ... & ZmmReg2_m512 +[ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) +{ + local tmp:8 = vpshufbitqmb_avx512_bitalg( evexV5_ZmmReg, ZmmReg2_m512, ZmmOpMask ); + KReg_reg = zext(tmp); +} + + +# VRCPPH 5-646 PAGE 2470 LINE 134707 +define pcodeop vrcpph_avx512fp16 ; +:VRCPPH XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16; byte=0x4C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vrcpph_avx512fp16( XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VRCPPH 5-646 PAGE 2470 LINE 134710 +:VRCPPH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16; byte=0x4C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vrcpph_avx512fp16( YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VRCPPH 5-646 PAGE 2470 LINE 134713 +:VRCPPH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16; byte=0x4C; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vrcpph_avx512fp16( ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VRCPSH 5-648 PAGE 2472 LINE 134789 +define pcodeop vrcpsh_avx512fp16 ; +:VRCPSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x4D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vrcpsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + + +# VREDUCEPH 5-652 PAGE 2476 LINE 134998 +define pcodeop vreduceph_avx512fp16 ; +:VREDUCEPH XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask16; byte=0x56; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vreduceph_avx512fp16( XmmReg2_m128_m16bcst, imm8:1 ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VREDUCEPH 5-652 PAGE 2476 LINE 135003 +:VREDUCEPH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask16; byte=0x56; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vreduceph_avx512fp16( YmmReg2_m256_m16bcst, imm8:1 ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VREDUCEPH 5-652 PAGE 2476 LINE 135008 +:VREDUCEPH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & ZmmOpMask16; byte=0x56; ZmmReg1 ... & ZmmReg2_m512_m16bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vreduceph_avx512fp16( ZmmReg2_m512_m16bcst, imm8:1 ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VREDUCESH 5-659 PAGE 2483 LINE 135336 +define pcodeop vreducesh_avx512fp16 ; +:VREDUCESH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16, imm8 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16; imm8 +{ + XmmResult = vreducesh_avx512fp16( evexV5_XmmReg, XmmReg2_m16, imm8:1 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VRNDSCALEPH 5-666 PAGE 2490 LINE 135677 +define pcodeop vrndscaleph_avx512fp16 ; +:VRNDSCALEPH XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask16; byte=0x08; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vrndscaleph_avx512fp16( XmmReg2_m128_m16bcst, imm8:1 ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VRNDSCALEPH 5-666 PAGE 2490 LINE 135681 +:VRNDSCALEPH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask16; byte=0x08; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vrndscaleph_avx512fp16( YmmReg2_m256_m16bcst, imm8:1 ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VRNDSCALEPH 5-666 PAGE 2490 LINE 135685 +:VRNDSCALEPH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & ZmmOpMask16; byte=0x08; ZmmReg1 ... & ZmmReg2_m512_m16bcst; imm8 +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vrndscaleph_avx512fp16( ZmmReg2_m512_m16bcst, imm8:1 ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VRNDSCALESH 5-674 PAGE 2498 LINE 136097 +define pcodeop vrndscalesh_avx512fp16 ; +:VRNDSCALESH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16, imm8 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x0A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16; imm8 +{ + XmmResult = vrndscalesh_avx512fp16( evexV5_XmmReg, XmmReg2_m16, imm8:1 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VRSQRTPH 5-686 PAGE 2510 LINE 136692 +define pcodeop vrsqrtph_avx512fp16 ; +:VRSQRTPH XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16; byte=0x4E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vrsqrtph_avx512fp16( XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VRSQRTPH 5-686 PAGE 2510 LINE 136696 +:VRSQRTPH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16; byte=0x4E; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vrsqrtph_avx512fp16( YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VRSQRTPH 5-686 PAGE 2510 LINE 136700 +:VRSQRTPH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16; byte=0x4E; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vrsqrtph_avx512fp16( ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VRSQRTSH 5-688 PAGE 2512 LINE 136781 +define pcodeop vrsqrtsh_avx512fp16 ; +:VRSQRTSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x4F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vrsqrtsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VSCALEFPH 5-692 PAGE 2516 LINE 136971 +define pcodeop vscalefph_avx512fp16 ; +:VSCALEFPH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x2C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vscalefph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VSCALEFPH 5-692 PAGE 2516 LINE 136974 +:VSCALEFPH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x2C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vscalefph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VSCALEFPH 5-692 PAGE 2516 LINE 136977 +:VSCALEFPH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x2C; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vscalefph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VSCALEFSH 5-699 PAGE 2523 LINE 137301 +define pcodeop vscalefsh_avx512fp16 ; +:VSCALEFSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x2D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vscalefsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VSQRTPH 5-712 PAGE 2536 LINE 137925 +define pcodeop vsqrtph_avx512fp16 ; +:VSQRTPH XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x51; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vsqrtph_avx512fp16( XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VSQRTPH 5-712 PAGE 2536 LINE 137928 +:VSQRTPH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x51; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vsqrtph_avx512fp16( YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VSQRTPH 5-712 PAGE 2536 LINE 137931 +:VSQRTPH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16; byte=0x51; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vsqrtph_avx512fp16( ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VSQRTSH 5-714 PAGE 2538 LINE 138003 +define pcodeop vsqrtsh_avx512fp16 ; +:VSQRTSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vsqrtsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VSUBPH 5-715 PAGE 2539 LINE 138057 +define pcodeop vsubph_avx512fp16 ; +:VSUBPH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + XmmResult = vsubph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m16bcst ); + XmmMask = XmmReg1; + build XmmOpMask16; + ZmmReg1 = zext(XmmResult); +} + +# VSUBPH 5-715 PAGE 2539 LINE 138060 +:VSUBPH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x5C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + YmmResult = vsubph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m16bcst ); + YmmMask = YmmReg1; + build YmmOpMask16; + ZmmReg1 = zext(YmmResult); +} + +# VSUBPH 5-715 PAGE 2539 LINE 138063 +:VSUBPH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x5C; ZmmReg1 ... & ZmmReg2_m512_m16bcst +[ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) +{ + ZmmResult = vsubph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); + ZmmMask = ZmmReg1; + build ZmmOpMask16; + ZmmReg1 = ZmmResult; +} + +# VSUBSH 5-717 PAGE 2541 LINE 138152 +define pcodeop vsubsh_avx512fp16 ; +:VSUBSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + XmmResult = vsubsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); + XmmMask = XmmReg1; + build XmmOpMask; + conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); + XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged + ZmmReg1 = zext(XmmResult); +} + +# VUCOMISH 5-721 PAGE 2545 LINE 138358 +define pcodeop vucomish_avx512fp16 ; +:VUCOMISH XmmReg1, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0); byte=0x2E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 +{ + local tmp:16 = vucomish_avx512fp16( XmmReg2_m16 ); + ZmmReg1 = zext(tmp); +} + diff --git a/Ghidra/Processors/x86/data/languages/ia.sinc b/Ghidra/Processors/x86/data/languages/ia.sinc index 3762962541..c4071831eb 100644 --- a/Ghidra/Processors/x86/data/languages/ia.sinc +++ b/Ghidra/Processors/x86/data/languages/ia.sinc @@ -373,6 +373,7 @@ define register offset=2100 size=8 [ ]; # dummy registers for managing broadcast data for AVX512 +define register offset=2200 size=4 [ BCST4 ]; define register offset=2200 size=8 [ BCST8 ]; define register offset=2200 size=16 [ BCST16 ]; define register offset=2200 size=32 [ BCST32 ]; @@ -446,6 +447,7 @@ define context contextreg vexVVVV_XmmReg=(25,28) # value of vex byte for matching XmmReg vexVVVV_YmmReg=(25,28) # value of vex byte for matching YmmReg vexVVVV_ZmmReg=(25,28) # value of vex byte for matching ZmmReg + vexHighV=(25,25) evexVopmask=(26,28) # VEX.vvvv opmask @@ -591,9 +593,23 @@ define token modrm (8) xmmreg1_x = (3,5) ymmreg1_x = (3,5) zmmreg1_x = (3,5) + xmmreg1_r = (3,5) + ymmreg1_r = (3,5) + zmmreg1_r = (3,5) + xmmreg1_rx = (3,5) + ymmreg1_rx = (3,5) + zmmreg1_rx = (3,5) + xmmreg2_b = (0,2) + ymmreg2_b = (0,2) + zmmreg2_b = (0,2) xmmreg2_x = (0,2) ymmreg2_x = (0,2) zmmreg2_x = (0,2) + xmmreg2_bx = (0,2) + ymmreg2_bx = (0,2) + zmmreg2_bx = (0,2) + + vex_pp = (0,1) vex_l = (2,2) vex_vvvv = (3,6) @@ -606,7 +622,7 @@ define token modrm (8) evex_rp = (4,4) evex_res = (3,3) evex_res2 = (2,2) - evex_mmm = (0,2) + evex_mmm = (0,2) evex_z = (7,7) evex_lp = (6,6) @@ -644,13 +660,25 @@ define token sib (8) ; define token I8 (8) - imm8_7=(7,7) Xmm_imm8_7_4=(4,7) Ymm_imm8_7_4=(4,7) + imm8_7=(7,7) + imm8_6=(6,6) + imm8_6_7=(6,7) + imm8_5=(5,5) + imm8_5_7=(5,7) imm8_4=(4,4) + imm8_4_7=(4,7) + imm8_3=(3,3) + imm8_3_7=(3,7) + imm8_2=(2,2) + imm8_2_7=(2,7) + imm8_1=(1,1) + imm8_1_7=(1,7) imm8_0=(0,0) imm8_3_0=(0,3) imm8=(0,7) + imm8_val=(0,7) simm8=(0,7) signed ; @@ -686,7 +714,11 @@ attach variables [ mmxreg mmxreg1 mmxreg2 ] [ MM0 MM1 MM2 MM3 MM4 MM5 MM6 MM7 ]; attach variables [ xmmreg xmmreg1 xmmreg2 xmm_vsib ] [ XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 ]; -attach variables [ xmmreg_x xmmreg1_x xmmreg2_x xmm_vsib_x ] [ XMM8 XMM9 XMM10 XMM11 XMM12 XMM13 XMM14 XMM15 ]; +attach variables [ xmmreg_x xmmreg1_x xmmreg2_b xmm_vsib_x ] [ XMM8 XMM9 XMM10 XMM11 XMM12 XMM13 XMM14 XMM15 ]; + +attach variables [ xmmreg1_r xmmreg2_x ] [ XMM16 XMM17 XMM18 XMM19 XMM20 XMM21 XMM22 XMM23 ]; + +attach variables [ xmmreg1_rx xmmreg2_bx ] [ XMM24 XMM25 XMM26 XMM27 XMM28 XMM29 XMM30 XMM31 ]; attach variables [ vexVVVV_XmmReg Xmm_imm8_7_4 ] [ XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 XMM9 XMM10 XMM11 XMM12 XMM13 XMM14 XMM15 ]; @@ -713,10 +745,14 @@ attach variables [ vexVVVV_r32 ] [ EAX ECX EDX EBX ESP EBP ESI EDI _ _ attach variables [ evexOpmask opmaskreg opmaskrm evexVopmask ] [ K0 K1 K2 K3 K4 K5 K6 K7 ]; attach variables [ ymmreg ymmreg1 ymmreg2 ymm_vsib ] [ YMM0 YMM1 YMM2 YMM3 YMM4 YMM5 YMM6 YMM7 ]; -attach variables [ ymmreg_x ymmreg1_x ymmreg2_x ymm_vsib_x ] [ YMM8 YMM9 YMM10 YMM11 YMM12 YMM13 YMM14 YMM15 ]; +attach variables [ ymmreg_x ymmreg1_x ymmreg2_b ymm_vsib_x ] [ YMM8 YMM9 YMM10 YMM11 YMM12 YMM13 YMM14 YMM15 ]; +attach variables [ ymmreg1_r ymmreg2_x ] [ YMM16 YMM17 YMM18 YMM19 YMM20 YMM21 YMM22 YMM23 ]; +attach variables [ ymmreg1_rx ymmreg2_bx ] [ YMM24 YMM25 YMM26 YMM27 YMM28 YMM29 YMM30 YMM31 ]; attach variables [ zmmreg zmmreg1 zmmreg2 zmm_vsib ] [ ZMM0 ZMM1 ZMM2 ZMM3 ZMM4 ZMM5 ZMM6 ZMM7 ]; -attach variables [ zmmreg_x zmmreg1_x zmmreg2_x zmm_vsib_x ] [ ZMM8 ZMM9 ZMM10 ZMM11 ZMM12 ZMM13 ZMM14 ZMM15 ]; +attach variables [ zmmreg_x zmmreg1_x zmmreg2_b zmm_vsib_x ] [ ZMM8 ZMM9 ZMM10 ZMM11 ZMM12 ZMM13 ZMM14 ZMM15 ]; +attach variables [ zmmreg1_r zmmreg2_x ] [ ZMM16 ZMM17 ZMM18 ZMM19 ZMM20 ZMM21 ZMM22 ZMM23 ]; +attach variables [ zmmreg1_rx zmmreg2_bx ] [ ZMM24 ZMM25 ZMM26 ZMM27 ZMM28 ZMM29 ZMM30 ZMM31 ]; attach variables [ bnd1 bnd2 ] [ BND0 BND1 BND2 BND3 _ _ _ _ ]; attach variables [ bnd1_lb bnd2_lb ] [ BND0_LB BND1_LB BND2_LB BND3_LB _ _ _ _ ]; @@ -827,17 +863,30 @@ XmmReg: xmmreg is rexRprefix=0 & xmmreg { export XmmReg: xmmreg_x is rexRprefix=1 & xmmreg_x { export xmmreg_x; } XmmReg1: xmmreg1 is rexRprefix=0 & xmmreg1 { export xmmreg1; } XmmReg1: xmmreg1_x is rexRprefix=1 & xmmreg1_x { export xmmreg1_x; } +XmmReg1: xmmreg1_r is rexRprefix=0 & evexRp=1 & xmmreg1_r { export xmmreg1_r; } +XmmReg1: xmmreg1_rx is rexRprefix=1 & evexRp=1 & xmmreg1_rx { export xmmreg1_rx; } XmmReg2: xmmreg2 is rexBprefix=0 & xmmreg2 { export xmmreg2; } -XmmReg2: xmmreg2_x is rexBprefix=1 & xmmreg2_x { export xmmreg2_x; } +XmmReg2: xmmreg2_b is rexBprefix=1 & xmmreg2_b { export xmmreg2_b; } +XmmReg2: xmmreg2_x is rexBprefix=0 & rexXprefix=1 & xmmreg2_x { export xmmreg2_x; } +XmmReg2: xmmreg2_bx is rexBprefix=1 & rexXprefix=1 & xmmreg2_bx { export xmmreg2_bx; } + YmmReg1: ymmreg1 is rexRprefix=0 & ymmreg1 { export ymmreg1; } YmmReg1: ymmreg1_x is rexRprefix=1 & ymmreg1_x { export ymmreg1_x; } +YmmReg1: ymmreg1_r is rexRprefix=0 & evexRp=1 & ymmreg1_r { export ymmreg1_r; } +YmmReg1: ymmreg1_rx is rexRprefix=1 & evexRp=1 & ymmreg1_rx { export ymmreg1_rx; } YmmReg2: ymmreg2 is rexBprefix=0 & ymmreg2 { export ymmreg2; } -YmmReg2: ymmreg2_x is rexBprefix=1 & ymmreg2_x { export ymmreg2_x; } +YmmReg2: ymmreg2_b is rexBprefix=1 & ymmreg2_b { export ymmreg2_b; } +YmmReg2: ymmreg2_x is rexBprefix=0 & rexXprefix=1 & ymmreg2_x { export ymmreg2_x; } +YmmReg2: ymmreg2_bx is rexBprefix=1 & rexXprefix=1 & ymmreg2_bx { export ymmreg2_bx; } ZmmReg1: zmmreg1 is rexRprefix=0 & zmmreg1 { export zmmreg1; } ZmmReg1: zmmreg1_x is rexRprefix=1 & zmmreg1_x { export zmmreg1_x; } +ZmmReg1: zmmreg1_r is rexRprefix=0 & evexRp=1 & zmmreg1_r { export zmmreg1_r; } +ZmmReg1: zmmreg1_rx is rexRprefix=1 & evexRp=1 & zmmreg1_rx { export zmmreg1_rx; } ZmmReg2: zmmreg2 is rexBprefix=0 & zmmreg2 { export zmmreg2; } -ZmmReg2: zmmreg2_x is rexBprefix=1 & zmmreg2_x { export zmmreg2_x; } +ZmmReg2: zmmreg2_b is rexBprefix=1 & zmmreg2_b { export zmmreg2_b; } +ZmmReg2: zmmreg2_x is rexBprefix=0 & rexXprefix=1 & zmmreg2_x { export zmmreg2_x; } +ZmmReg2: zmmreg2_bx is rexBprefix=1 & rexXprefix=1 & zmmreg2_bx { export zmmreg2_bx; } Xmm_vsib: xmm_vsib is rexXprefix=0 & xmm_vsib { export xmm_vsib; } Xmm_vsib: xmm_vsib_x is rexXprefix=1 & xmm_vsib_x { export xmm_vsib_x; } @@ -1264,6 +1313,36 @@ ZmmReg2_m512: m512 is m512 { export m512; } XmmReg2_m128_extend: XmmReg2 is mod=3 & XmmReg2 & ZmmReg2 { ZmmReg2 = zext(XmmReg2); } XmmReg2_m128_extend: XmmReg2 is mod & XmmReg2 { } +m16bcst32: m16 is m16 { local tmp:2 = m16; BCST4[0,16] = tmp; BCST4[16,16] = tmp; export BCST4; } + +m16bcst64: m16 is m16 { local tmp:2 = m16; BCST8[0,16] = tmp; BCST8[16,16] = tmp; BCST8[32,16] = tmp; BCST8[48,16] = tmp; export BCST8; } +m16bcst128: m16 is m16 { + local tmp:2 = m16; + BCST16[0,16] = tmp; BCST16[16,16] = tmp; BCST16[32,16] = tmp; BCST16[48,16] = tmp; + BCST16[64,16] = tmp; BCST16[80,16] = tmp; BCST16[96,16] = tmp; BCST16[112,16] = tmp; + export BCST16; +} +m16bcst256: m16 is m16 { + local tmp:2 = m16; + BCST32[0,16] = tmp; BCST32[16,16] = tmp; BCST32[32,16] = tmp; BCST32[48,16] = tmp; + BCST32[64,16] = tmp; BCST32[80,16] = tmp; BCST32[96,16] = tmp; BCST32[112,16] = tmp; + BCST32[128,16] = tmp; BCST32[144,16] = tmp; BCST32[160,16] = tmp; BCST32[176,16] = tmp; + BCST32[192,16] = tmp; BCST32[208,16] = tmp; BCST32[224,16] = tmp; BCST32[240,16] = tmp; + export BCST32; +} +m16bcst512: m16 is m16 { + local tmp:2 = m16; + BCST64[0,16] = tmp; BCST64[16,16] = tmp; BCST64[32,16] = tmp; BCST64[48,16] = tmp; + BCST64[64,16] = tmp; BCST64[80,16] = tmp; BCST64[96,16] = tmp; BCST64[112,16] = tmp; + BCST64[128,16] = tmp; BCST64[144,16] = tmp; BCST64[160,16] = tmp; BCST64[176,16] = tmp; + BCST64[192,16] = tmp; BCST64[208,16] = tmp; BCST64[224,16] = tmp; BCST64[240,16] = tmp; + BCST64[256,16] = tmp; BCST64[272,16] = tmp; BCST64[288,16] = tmp; BCST64[304,16] = tmp; + BCST64[320,16] = tmp; BCST64[336,16] = tmp; BCST64[352,16] = tmp; BCST64[368,16] = tmp; + BCST64[384,16] = tmp; BCST64[400,16] = tmp; BCST64[416,16] = tmp; BCST64[432,16] = tmp; + BCST64[448,16] = tmp; BCST64[464,16] = tmp; BCST64[480,16] = tmp; BCST64[496,16] = tmp; + export BCST64; +} + m32bcst64: m32 is m32 { local tmp:4 = m32; BCST8[0,32] = tmp; BCST8[32,32] = tmp; export BCST8; } m32bcst128: m32 is m32 { local tmp:4 = m32; BCST16[0,32] = tmp; BCST16[32,32] = tmp; BCST16[64,32] = tmp; BCST16[96,32] = tmp; export BCST16; } m32bcst256: m32 is m32 { @@ -1290,10 +1369,22 @@ m64bcst512: m64 is m64 { export BCST64; } +XmmReg2_m32_m16bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; } +XmmReg2_m32_m16bcst: m32 is m32 & evexDisp8N { local tmp:16 = zext(m32); export tmp; } +XmmReg2_m32_m16bcst: m16bcst32 is evexB=1 & m16bcst32 & evexDisp8N { local tmp:16 = zext(m16bcst32); export tmp; } + +XmmReg2_m64_m16bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; } +XmmReg2_m64_m16bcst: m64 is m64 & evexDisp8N { local tmp:16 = zext(m64); export tmp; } +XmmReg2_m64_m16bcst: m16bcst64 is evexB=1 & m16bcst64 & evexDisp8N { local tmp:16 = zext(m16bcst64); export tmp; } + XmmReg2_m64_m32bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; } XmmReg2_m64_m32bcst: m64 is m64 & evexDisp8N { local tmp:16 = zext(m64); export tmp; } XmmReg2_m64_m32bcst: m32bcst64 is evexB=1 & m32bcst64 & evexDisp8N { local tmp:16 = zext(m32bcst64); export tmp; } +XmmReg2_m128_m16bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; } +XmmReg2_m128_m16bcst: m128 is m128& evexDisp8N { export m128; } +XmmReg2_m128_m16bcst: m16bcst128 is evexB=1 & m16bcst128 & evexDisp8N { export m16bcst128; } + XmmReg2_m128_m32bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; } XmmReg2_m128_m32bcst: m128 is m128& evexDisp8N { export m128; } XmmReg2_m128_m32bcst: m32bcst128 is evexB=1 & m32bcst128 & evexDisp8N { export m32bcst128; } @@ -1302,6 +1393,10 @@ XmmReg2_m128_m64bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2 XmmReg2_m128_m64bcst: m128 is m128 & evexDisp8N { export m128; } XmmReg2_m128_m64bcst: m64bcst128 is evexB=1 & m64bcst128 & evexDisp8N { export m64bcst128; } +YmmReg2_m256_m16bcst: YmmReg2 is mod=3 & YmmReg2 { export YmmReg2; } +YmmReg2_m256_m16bcst: m256 is m256 & evexDisp8N { export m256; } +YmmReg2_m256_m16bcst: m16bcst256 is evexB=1 & m16bcst256 & evexDisp8N { export m16bcst256; } + YmmReg2_m256_m32bcst: YmmReg2 is mod=3 & YmmReg2 { export YmmReg2; } YmmReg2_m256_m32bcst: m256 is m256 & evexDisp8N { export m256; } YmmReg2_m256_m32bcst: m32bcst256 is evexB=1 & m32bcst256 & evexDisp8N { export m32bcst256; } @@ -1310,6 +1405,10 @@ YmmReg2_m256_m64bcst: YmmReg2 is mod=3 & YmmReg2 { export YmmReg2 YmmReg2_m256_m64bcst: m256 is m256 & evexDisp8N { export m256; } YmmReg2_m256_m64bcst: m64bcst256 is evexB=1 & m64bcst256 & evexDisp8N { export m64bcst256; } +ZmmReg2_m512_m16bcst: ZmmReg2 is mod=3 & ZmmReg2 { export ZmmReg2; } +ZmmReg2_m512_m16bcst: m512 is m512 & evexDisp8N { export m512; } +ZmmReg2_m512_m16bcst: m16bcst512 is evexB=1 & m16bcst512 & evexDisp8N { export m16bcst512; } + ZmmReg2_m512_m32bcst: ZmmReg2 is mod=3 & ZmmReg2 { export ZmmReg2; } ZmmReg2_m512_m32bcst: m512 is m512 & evexDisp8N { export m512; } ZmmReg2_m512_m32bcst: m32bcst512 is evexB=1 & m32bcst512 & evexDisp8N { export m32bcst512; } @@ -1501,7 +1600,7 @@ unlock: is lockprefx=1 { UNLOCK(); } unlock: is epsilon { } KReg_reg: opmaskreg is opmaskreg { export opmaskreg; } -KReg_rm: opmaskrm is opmaskrm { export opmaskrm; } +KReg_rm: opmaskrm is opmaskrm { export opmaskrm; } # not used vexVVVV_KReg: evexVopmask is evexVopmask { export evexVopmask; } vex1VVV_KReg: evexVopmask is evexVopmask & vexHighV=1 { export evexVopmask; } @@ -1515,7 +1614,7 @@ ZmmMaskMode: is evexZ=0 { } ZmmMaskMode: "{z}" is evexZ=1 { ZmmMask=0; } AVXOpMask: "{"^evexOpmask^"}" is evexOpmask { export evexOpmask; } - +AVXOpMask: is evexOpmask=0 { local tmp:8 = 0xffffffffffffffff; export *[const]:8 tmp; } # Z=0: merge masking # Z=1: zero masking XmmOpMask: AVXOpMask^XmmMaskMode is AVXOpMask & XmmMaskMode { @@ -2314,7 +2413,7 @@ macro fucompe(val1, val2) { @define VEX_L128 "vexL=0" @define VEX_L256 "vexL=1" @define EVEX_L512 "evexLp=1 & vexL=0" -@define EVEX_LIG "evexLp & vexL" +@define EVEX_LLIG "evexLp & vexL" # These are only to be used with VEX or EVEX decoding, where only one "mandatory" prefix is encoded in the VEX or EVEX. # If no prefix is specified, then VEX_PRE_NONE must be used. @@ -2329,7 +2428,9 @@ macro fucompe(val1, val2) { @define VEX_0F "vexMMMMM=1" @define VEX_0F38 "vexMMMMM=2" @define VEX_0F3A "vexMMMMM=3" -#TODO: later extensions such as AVX10 and APX allow evex maps 4, 5 and 6 +@define VEX_MAP4 "vexMMMMM=4" +@define VEX_MAP5 "vexMMMMM=5" +@define VEX_MAP6 "vexMMMMM=6" # Specification is "WIG", "W0", or "W1". @define VEX_WIG "rexWprefix" @@ -4147,7 +4248,9 @@ define pcodeop swap_bytes; define pcodeop ptwrite; :PTWRITE rm32 is vexMode=0 & $(PRE_F3) & byte=0x0f; byte=0xae; rm32 & reg_opcode=4 ... { ptwrite(rm32); } - +@ifdef IA64 +:PTWRITE rm64 is vexMode=0 & $(PRE_F3) & opsize=2 & byte=0x0f; byte=0xae; rm64 & reg_opcode=4 ... { ptwrite(rm64); } +@endif :PUSH rm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0xff; rm16 & reg_opcode=6 ... { push22(rm16); } :PUSH rm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xff; rm16 & reg_opcode=6 ... { push42(rm16); } @@ -4168,8 +4271,10 @@ define pcodeop ptwrite; :PUSH Rmr64 is $(LONGMODE_ON) & vexMode=0 & row=5 & page=0 & Rmr64 { push88(Rmr64); } @endif -:PUSH simm8_16 is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & byte=0x6a; simm8_16 { tmp:2=simm8_16; push22(tmp); } -:PUSH simm8_32 is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & byte=0x6a; simm8_32 { tmp:4=simm8_32; push44(tmp); } +:PUSH simm8_16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0x6a; simm8_16 { tmp:2=simm8_16; push22(tmp); } +:PUSH simm8_16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0x6a; simm8_16 { tmp:2=simm8_16; push42(tmp); } +:PUSH simm8_32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0x6a; simm8_32 { tmp:4=simm8_32; push24(tmp); } +:PUSH simm8_32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0x6a; simm8_32 { tmp:4=simm8_32; push44(tmp); } @ifdef IA64 :PUSH simm8_16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0x6a; simm8_16 { tmp:2=simm8_16; push82(tmp); } :PUSH simm8_64 is $(LONGMODE_ON) & vexMode=0 & byte=0x6a; simm8_64 { tmp:8=simm8_64; push88(tmp); } @@ -4181,7 +4286,7 @@ define pcodeop ptwrite; :PUSH imm32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0x68; imm32 { tmp:4=imm32; push44(tmp); } @ifdef IA64 :PUSH simm16_16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0x68; simm16_16 { tmp:2=simm16_16; push82(tmp); } -:PUSH simm32 is $(LONGMODE_ON) & vexMode=0 & byte=0x68; simm32 { tmp:8=simm32; push88(tmp); } +:PUSH simm32_64 is $(LONGMODE_ON) & vexMode=0 & byte=0x68; simm32_64 { tmp:8=simm32_64; push88(tmp); } @endif :PUSH CS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0xe & CS { push22(CS); } @@ -4195,12 +4300,12 @@ define pcodeop ptwrite; :PUSH FS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0xf; byte=0xa0 & FS { push22(FS); } :PUSH FS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0xf; byte=0xa0 & FS { pushseg44(FS); } @ifdef IA64 -:PUSH FS is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xf; byte=0xa0 & FS { pushseg88(FS); } +:PUSH FS is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xf; byte=0xa0 & FS { pushseg88(FS); } @endif :PUSH GS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0xf; byte=0xa8 & GS { push22(GS); } :PUSH GS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0xf; byte=0xa8 & GS { pushseg44(GS); } @ifdef IA64 -:PUSH GS is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xf; byte=0xa8 & GS { pushseg88(GS); } +:PUSH GS is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xf; byte=0xa8 & GS { pushseg88(GS); } @endif :PUSHA is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0x60 { local tmp=SP; push22(AX); push22(CX); push22(DX); push22(BX); push22(tmp); push22(BP); push22(SI); push22(DI); }