x86/x86avxgen: enable AVX512 encoder tables generation

Now generates both VEX and EVEX encoded optabs.

Encoder based on these optabs passes tests added in
https://golang.org/cl/107217.

This version uses XED datafiles directly instead of x86.csv.

Also moves x86/x86spec/xeddata package to x86/xeddata to make it
usable from x86 packages.
Ported x86spec pattern set type to xeddata.

Updates golang/go#22779

Change-Id: I304267d888dcda4f776d1241efa524f397a8b7b3
Reviewed-on: https://go-review.googlesource.com/107216
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
diff --git a/x86/x86avxgen/avxgen.go b/x86/x86avxgen/avxgen.go
deleted file mode 100644
index bd3b40a..0000000
--- a/x86/x86avxgen/avxgen.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
-	"strings"
-
-	"golang.org/x/arch/x86/x86csv"
-)
-
-// An encoding is the parsed x86csv.Inst Encoding.
-type encoding struct {
-	vex     string // Dot separated VEX prefix. e.g. "VEX.NDD.256.66.0F.WIG"
-	opbyte  string // Single opcode encoding byte (example: "38")
-	opdigit string // "/digit" byte that extends the opcode (example: "7" for /7)
-}
-
-// parseEncoding parses x86csv.Inst Encoding.
-func parseEncoding(encString string) encoding {
-	f := strings.Fields(encString)
-	enc := encoding{
-		vex:    f[0],
-		opbyte: f[1],
-	}
-
-	// Parse rest parts.
-	// Currently interested only in "/digit" byte,
-	// but that may change later.
-	for _, p := range f[2:] {
-		switch p {
-		case "/r", "/is4":
-			// Currently not handled.
-
-		case "/0", "/1", "/2", "/3", "/4", "/5", "/6", "/7":
-			enc.opdigit = p[len("/"):]
-		}
-	}
-
-	return enc
-}
-
-// ytabID is a name of "x86/asm6.go" ytab table object.
-//
-// ytabMap contains all IDs that can be referenced
-// from generated Optabs.
-type ytabID string
-
-// optab holds data that is required to emit x86 optab entry.
-//
-// That is, it is not "the optab" itself, but a set
-// of parameters required to expand a template.
-//
-// Terminology differences:
-// x86csv   | asm6.go
-// ------------------
-// opcode   | as
-// encoding | op
-// ------------------
-// We use asm6.go terminology only in description of this structure,
-// as it describes asm6.go object.
-type optab struct {
-	// Prefix is fixed to "Pvex" right now.
-	// This may change when EVEX-encoded instructions
-	// generation is supported.
-
-	as     string   // AXXX constant name without leading "A" (example: ADD for AADD)
-	ytabID ytabID   // ytab table name (example: yvex_y2)
-	op     []string // Encoding parts
-}
-
-// doGroups groups instructions in insts by Go name and then calls
-// f for each different name, passing the name and the instructions
-// using that name. The calls are made ordered by first appearance
-// of name in insts, and the list of instructions for a given name
-// are in the same order as in insts.
-func doGroups(insts []*x86csv.Inst, f func(string, []*x86csv.Inst)) {
-	var opcodes []string
-	groups := make(map[string][]*x86csv.Inst)
-	for _, inst := range insts {
-		op := inst.GoOpcode()
-		if groups[op] == nil {
-			opcodes = append(opcodes, op)
-		}
-		groups[op] = append(groups[op], inst)
-	}
-	for _, op := range opcodes {
-		f(op, groups[op])
-	}
-}
-
-// argsNormalizer is used to transform Intel manual style args (operands)
-// to shorter form. Compact form is used in compound keys (see ytabMap).
-//
-// asm6.go (x86 asm backend) does not care about:
-// - memory operand sizes. There are distinct instructions for different sizes.
-// - register indexes. "xmm1" or "xmm" - does not matter.
-var argsNormalizer = strings.NewReplacer(
-	", ", ",",
-	" ", "",
-
-	"imm8", "i8",
-
-	"m8", "m",
-	"m16", "m",
-	"m32", "m",
-	"m64", "m",
-	"m128", "m",
-	"m256", "m",
-
-	"r32", "r",
-	"r64", "r",
-
-	"xmm1", "x",
-	"xmm2", "x",
-	"xmm3", "x",
-	"xmm", "x",
-
-	"ymm1", "y",
-	"ymm2", "y",
-	"ymm3", "y",
-	"ymm", "y",
-)
-
-// ytabKey computes a key describing the operand forms from insts for ytabMap.
-// This lets us find instructions with the same groups of forms and
-// have them share a ytab entry.
-func ytabKey(op string, insts []*x86csv.Inst) string {
-	var all []string
-	for _, inst := range insts {
-		form := argsNormalizer.Replace(inst.Go[len(op):])
-		all = append(all, form)
-	}
-	return strings.Join(all, ";")
-}
-
-// vexExpr returns the Go expression describing the VEX prefix.
-//
-// Examples:
-//   "VEX.NDS.256.0F.WIG" => "vexNDS|vex256|vex0F|vexWIG"
-//   "VEX.256.0F.WIG"     => "vexNOVSR|vex256|vex0F|vexWIG"
-func vexExpr(vex string) string {
-	expr := strings.Replace(vex, ".", "|vex", -1)[len("VEX|"):]
-	for _, p := range [...]string{"vexNDS", "vexNDD", "vexDDS"} {
-		if strings.HasPrefix(expr, p) {
-			return expr
-		}
-	}
-	return "vexNOVSR|" + expr
-}
diff --git a/x86/x86avxgen/avxgen_test.go b/x86/x86avxgen/avxgen_test.go
index bbddf67..e2f5b24 100644
--- a/x86/x86avxgen/avxgen_test.go
+++ b/x86/x86avxgen/avxgen_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Go Authors. All rights reserved.
+// Copyright 2018 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
@@ -6,371 +6,908 @@
 
 import (
 	"bytes"
+	"io/ioutil"
+	"path/filepath"
 	"regexp"
 	"strings"
+	"sync"
 	"testing"
 
-	"golang.org/x/arch/x86/x86csv"
+	"golang.org/x/arch/x86/xeddata"
 )
 
-func TestYtabKey(t *testing.T) {
-	type testCase struct {
-		insts []*x86csv.Inst
-		key   string
-	}
-	test := func(key string, goSyntaxes ...string) testCase {
-		insts := make([]*x86csv.Inst, len(goSyntaxes))
-		for i, stx := range goSyntaxes {
-			insts[i] = &x86csv.Inst{Go: stx}
-		}
-		return testCase{insts: insts, key: key}
-	}
-	tests := []testCase{
-		test("", "VZEROALL"),
-		test("i8,x/m,x", "VAESKEYGENASSIST imm8, xmm2/m128, xmm1"),
-		test("x/m,xV,x;y/m,yV,y", "VADDPD xmm2/m128, xmmV, xmm1", "VADDPD ymm2/m256, ymmV, ymm1"),
-		test("x/m,xV,x", "VADDSD xmm2/m64, xmmV, xmm1"),
-		test("x/m,x", "VAESIMC xmm2/m128, xmm1"),
-		test("r/m,i8;r/m,i8;r/m,i8;r/m,i8", "XOR r/m16, imm8", "XOR r/m32, imm8", "XOR r/m64, imm8", "XOR r/m8, imm8"),
-		test("xV,yV", "OP1 xmm1V, ymm2V "),
-		test("x,y", "OP2  xmm, ymm"),
-		test("x/m,r/m", "OP3 xmm3/m32,r/m32"),
-	}
-	for _, test := range tests {
-		op := test.insts[0].GoOpcode()
-		key := ytabKey(op, test.insts)
-		if key != test.key {
-			t.Errorf("ytabKey(%s, ...)\nwant: '%s'\nhave: '%s'", op, key, test.key)
-		}
-	}
-}
-
-func TestVexExpr(t *testing.T) {
-	tests := map[string]string{
-		"VEX.NDS.256.0F.WIG":    "vexNDS|vex256|vex0F|vexWIG",
-		"VEX.256.66.0F.WIG":     "vexNOVSR|vex256|vex66|vex0F|vexWIG",
-		"VEX.128.66.0F38.WIG":   "vexNOVSR|vex128|vex66|vex0F38|vexWIG",
-		"VEX.NDS.LIG.F2.0F.WIG": "vexNDS|vexLIG|vexF2|vex0F|vexWIG",
-		"VEX.NDD.LIG.F2.0F.WIG": "vexNDD|vexLIG|vexF2|vex0F|vexWIG",
-		"VEX.DDS.LIG.F2.0F.WIG": "vexDDS|vexLIG|vexF2|vex0F|vexWIG",
-		"VEX.NDS.0F":            "vexNDS|vex0F",
-		"VEX.0F":                "vexNOVSR|vex0F",
-		"VEX.0F.W0":             "vexNOVSR|vex0F|vexW0",
-		"VEX.66.0F.W1":          "vexNOVSR|vex66|vex0F|vexW1",
-	}
-
-	for input, want := range tests {
-		have := vexExpr(input)
-		if have != want {
-			t.Errorf("vexPrefixExpr(%q)\nwant: %#v\nhave: %#v", input, want, have)
-		}
-	}
-}
-
-func TestParseEncoding(t *testing.T) {
-	tests := map[string]encoding{
-		"VEX.LZ.0F38.W0 F2 /r": {
-			vex:    "VEX.LZ.0F38.W0",
-			opbyte: "F2",
-		},
-		"VEX.NDD.LZ.0F38.W0 F3 /1": {
-			vex:     "VEX.NDD.LZ.0F38.W0",
-			opbyte:  "F3",
-			opdigit: "1",
-		},
-		"VEX.NDS.128.66.0F3A.W0 4B /r /is4": {
-			vex:    "VEX.NDS.128.66.0F3A.W0",
-			opbyte: "4B",
-		},
-	}
-
-	for input, want := range tests {
-		have := parseEncoding(input)
-		if have != want {
-			t.Errorf("vexPrefixExpr(%q)\nwant: %#v\nhave: %#v", input, want, have)
-		}
-	}
-}
-
-func TestGenerateAenum(t *testing.T) {
-	input := `// Code generated by x86avxgen; DO NOT EDIT.
-package eax
-
-const (
-	unrelatedOne = iota
-	unrelatedTwo
-)
-
-/*
-Leading multiline comment;
-Line 2.
-*/
-//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p x86
-const (
-	AAAA = iota + 4*iota // AAAA comment.
-	ACCC                 // ACCC comment.
-	ABBB
-	AFFF  // AFFF comment.
-	ALAST // ALAST comment.
-)
-
-// Top-level floating comment.
-`
-
-	expected := `// Code generated by x86avxgen; DO NOT EDIT.
-package eax
-
-const (
-	unrelatedOne = iota
-	unrelatedTwo
-)
-
-// Top-level floating comment.
-
-/*
-Leading multiline comment;
-Line 2.
-*/
-//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p x86
-const (
-	AAAA = iota + 4*iota // AAAA comment.
-	ABBB
-	ACCC // ACCC comment.
-	ADDD
-	AEEE
-	AFFF // AFFF comment.
-	AZZZ
-	ALAST // ALAST comment.
-)
-`
-	r := strings.NewReader(input)
-	var buf bytes.Buffer
-	err := generateAenum(r, &buf, []string{
-		"ZZZ",
-		"EEE",
-		"DDD",
-	})
+func newTestContext(t testing.TB) *context {
+	ctx := &context{xedPath: filepath.Join("testdata", "xedpath")}
+	db, err := xeddata.NewDatabase(ctx.xedPath)
 	if err != nil {
-		t.Fatal(err)
+		t.Fatalf("open test database: %v", err)
 	}
-	output := buf.String()
-	if expected != output {
-		t.Errorf("output mismatch:\nwant: %s\nhave: %s",
-			expected, output)
-	}
+	ctx.db = db
+	return ctx
 }
 
-func TestUncommentTestLine(t *testing.T) {
-	// Note that is should also fix XMM0 to X0.
-	input := `
-	//TODO: ADCXL (BX), DX                  // 660f38f613
-	//TODO: ADCXL (R11), DX                 // 66410f38f613
-	//TODO: ADDSUBPD (BX), X2               // 660fd013
-	//TODO: BLENDVPD XMM0, (BX), X2         // 660f381513`
-	want := `
-	ADCXL (BX), DX                          // 660f38f613
-	ADCXL (R11), DX                         // 66410f38f613
-	ADDSUBPD (BX), X2                       // 660fd013
-	BLENDVPD X0, (BX), X2                   // 660f381513`
+func newStringSet(keys ...string) map[string]bool {
+	set := make(map[string]bool)
+	for _, k := range keys {
+		set[k] = true
+	}
+	return set
+}
 
-	lines := strings.Split(input, "\n")
-	for i := range lines {
-		if len(lines[i]) > 10 {
-			lines[i] = uncommentedTestLine(lines[i])
-		}
-	}
-	have := strings.Join(lines, "\n")
-	if want != have {
-		t.Errorf("output mismatch:\nwant: `%s`\nhave: `%s`",
-			want, have)
-	}
+func generateToString(t *testing.T) string {
+	ctx := newTestContext(t)
+	buildTables(ctx)
+	var buf bytes.Buffer
+	writeTables(&buf, ctx)
+	return buf.String()
 }
 
 func TestOutput(t *testing.T) {
-	// Using already existing AVX optabs to check generated output.
-	// This does not cover new instructions though.
+	// Ytab lists and optabs output checks.
+	//
+	// These tests are very fragile.
+	// Slight changes can invalidate them.
+	// It is better to keep testCases count at the minimum.
 
-	// These lines can be retrieved by:
-	// $ grep ', Pvex,' src/cmd/internal/obj/x86/asm6.go
-	existingOptabs := `
-	{AANDNL, yvex_r3, Pvex, [23]uint8{VEX_NDS_LZ_0F38_W0, 0xF2}},
-	{AANDNQ, yvex_r3, Pvex, [23]uint8{VEX_NDS_LZ_0F38_W1, 0xF2}},
-	{ABEXTRL, yvex_vmr3, Pvex, [23]uint8{VEX_NDS_LZ_0F38_W0, 0xF7}},
-	{ABEXTRQ, yvex_vmr3, Pvex, [23]uint8{VEX_NDS_LZ_0F38_W1, 0xF7}},
-	{ABZHIL, yvex_vmr3, Pvex, [23]uint8{VEX_NDS_LZ_0F38_W0, 0xF5}},
-	{ABZHIQ, yvex_vmr3, Pvex, [23]uint8{VEX_NDS_LZ_0F38_W1, 0xF5}},
-	{AMULXL, yvex_r3, Pvex, [23]uint8{VEX_NDD_LZ_F2_0F38_W0, 0xF6}},
-	{AMULXQ, yvex_r3, Pvex, [23]uint8{VEX_NDD_LZ_F2_0F38_W1, 0xF6}},
-	{APDEPL, yvex_r3, Pvex, [23]uint8{VEX_NDS_LZ_F2_0F38_W0, 0xF5}},
-	{APDEPQ, yvex_r3, Pvex, [23]uint8{VEX_NDS_LZ_F2_0F38_W1, 0xF5}},
-	{APEXTL, yvex_r3, Pvex, [23]uint8{VEX_NDS_LZ_F3_0F38_W0, 0xF5}},
-	{APEXTQ, yvex_r3, Pvex, [23]uint8{VEX_NDS_LZ_F3_0F38_W1, 0xF5}},
-	{ASARXL, yvex_vmr3, Pvex, [23]uint8{VEX_NDS_LZ_F3_0F38_W0, 0xF7}},
-	{ASARXQ, yvex_vmr3, Pvex, [23]uint8{VEX_NDS_LZ_F3_0F38_W1, 0xF7}},
-	{ASHLXL, yvex_vmr3, Pvex, [23]uint8{VEX_NDS_LZ_66_0F38_W0, 0xF7}},
-	{ASHLXQ, yvex_vmr3, Pvex, [23]uint8{VEX_NDS_LZ_66_0F38_W1, 0xF7}},
-	{ASHRXL, yvex_vmr3, Pvex, [23]uint8{VEX_NDS_LZ_F2_0F38_W0, 0xF7}},
-	{ASHRXQ, yvex_vmr3, Pvex, [23]uint8{VEX_NDS_LZ_F2_0F38_W1, 0xF7}},
-	{AVMOVDQU, yvex_vmovdqa, Pvex, [23]uint8{VEX_NOVSR_128_F3_0F_WIG, 0x6F, VEX_NOVSR_128_F3_0F_WIG, 0x7F, VEX_NOVSR_256_F3_0F_WIG, 0x6F, VEX_NOVSR_256_F3_0F_WIG, 0x7F}},
-	{AVMOVDQA, yvex_vmovdqa, Pvex, [23]uint8{VEX_NOVSR_128_66_0F_WIG, 0x6F, VEX_NOVSR_128_66_0F_WIG, 0x7F, VEX_NOVSR_256_66_0F_WIG, 0x6F, VEX_NOVSR_256_66_0F_WIG, 0x7F}},
-	{AVMOVNTDQ, yvex_vmovntdq, Pvex, [23]uint8{VEX_NOVSR_128_66_0F_WIG, 0xE7, VEX_NOVSR_256_66_0F_WIG, 0xE7}},
-	{AVPCMPEQB, yvex_xy3, Pvex, [23]uint8{VEX_NDS_128_66_0F_WIG, 0x74, VEX_NDS_256_66_0F_WIG, 0x74}},
-	{AVPXOR, yvex_xy3, Pvex, [23]uint8{VEX_NDS_128_66_0F_WIG, 0xEF, VEX_NDS_256_66_0F_WIG, 0xEF}},
-	{AVPMOVMSKB, yvex_xyr2, Pvex, [23]uint8{VEX_NOVSR_128_66_0F_WIG, 0xD7, VEX_NOVSR_256_66_0F_WIG, 0xD7}},
-	{AVPAND, yvex_xy3, Pvex, [23]uint8{VEX_NDS_128_66_0F_WIG, 0xDB, VEX_NDS_256_66_0F_WIG, 0xDB}},
-	{AVPBROADCASTB, yvex_vpbroadcast, Pvex, [23]uint8{VEX_NOVSR_128_66_0F38_W0, 0x78, VEX_NOVSR_256_66_0F38_W0, 0x78}},
-	{AVPTEST, yvex_xy2, Pvex, [23]uint8{VEX_NOVSR_128_66_0F38_WIG, 0x17, VEX_NOVSR_256_66_0F38_WIG, 0x17}},
-	{AVPSHUFB, yvex_xy3, Pvex, [23]uint8{VEX_NDS_128_66_0F38_WIG, 0x00, VEX_NDS_256_66_0F38_WIG, 0x00}},
-	{AVPSHUFD, yvex_xyi3, Pvex, [23]uint8{VEX_NOVSR_128_66_0F_WIG, 0x70, VEX_NOVSR_256_66_0F_WIG, 0x70, VEX_NOVSR_128_66_0F_WIG, 0x70, VEX_NOVSR_256_66_0F_WIG, 0x70}},
-	{AVPOR, yvex_xy3, Pvex, [23]uint8{VEX_NDS_128_66_0F_WIG, 0xeb, VEX_NDS_256_66_0F_WIG, 0xeb}},
-	{AVPADDQ, yvex_xy3, Pvex, [23]uint8{VEX_NDS_128_66_0F_WIG, 0xd4, VEX_NDS_256_66_0F_WIG, 0xd4}},
-	{AVPADDD, yvex_xy3, Pvex, [23]uint8{VEX_NDS_128_66_0F_WIG, 0xfe, VEX_NDS_256_66_0F_WIG, 0xfe}},
-	{AVADDSD, yvex_x3, Pvex, [23]uint8{VEX_NDS_128_F2_0F_WIG, 0x58}},
-	{AVSUBSD, yvex_x3, Pvex, [23]uint8{VEX_NDS_128_F2_0F_WIG, 0x5c}},
-	{AVFMADD213SD, yvex_x3, Pvex, [23]uint8{VEX_DDS_LIG_66_0F38_W1, 0xa9}},
-	{AVFMADD231SD, yvex_x3, Pvex, [23]uint8{VEX_DDS_LIG_66_0F38_W1, 0xb9}},
-	{AVFNMADD213SD, yvex_x3, Pvex, [23]uint8{VEX_DDS_LIG_66_0F38_W1, 0xad}},
-	{AVFNMADD231SD, yvex_x3, Pvex, [23]uint8{VEX_DDS_LIG_66_0F38_W1, 0xbd}},
-	{AVPSLLD, yvex_shift, Pvex, [23]uint8{VEX_NDS_128_66_0F_WIG, 0x72, 0xf0, VEX_NDS_256_66_0F_WIG, 0x72, 0xf0, VEX_NDD_128_66_0F_WIG, 0xf2, VEX_NDD_256_66_0F_WIG, 0xf2}},
-	{AVPSLLQ, yvex_shift, Pvex, [23]uint8{VEX_NDD_128_66_0F_WIG, 0x73, 0xf0, VEX_NDD_256_66_0F_WIG, 0x73, 0xf0, VEX_NDS_128_66_0F_WIG, 0xf3, VEX_NDS_256_66_0F_WIG, 0xf3}},
-	{AVPSRLD, yvex_shift, Pvex, [23]uint8{VEX_NDD_128_66_0F_WIG, 0x72, 0xd0, VEX_NDD_256_66_0F_WIG, 0x72, 0xd0, VEX_NDD_128_66_0F_WIG, 0xd2, VEX_NDD_256_66_0F_WIG, 0xd2}},
-	{AVPSRLQ, yvex_shift, Pvex, [23]uint8{VEX_NDD_128_66_0F_WIG, 0x73, 0xd0, VEX_NDD_256_66_0F_WIG, 0x73, 0xd0, VEX_NDS_128_66_0F_WIG, 0xd3, VEX_NDS_256_66_0F_WIG, 0xd3}},
-	{AVPSRLDQ, yvex_shift_dq, Pvex, [23]uint8{VEX_NDD_128_66_0F_WIG, 0x73, 0xd8, VEX_NDD_256_66_0F_WIG, 0x73, 0xd8}},
-	{AVPSLLDQ, yvex_shift_dq, Pvex, [23]uint8{VEX_NDD_128_66_0F_WIG, 0x73, 0xf8, VEX_NDD_256_66_0F_WIG, 0x73, 0xf8}},
-	{AVPERM2F128, yvex_yyi4, Pvex, [23]uint8{VEX_NDS_256_66_0F3A_W0, 0x06}},
-	{AVPALIGNR, yvex_yyi4, Pvex, [23]uint8{VEX_NDS_256_66_0F3A_WIG, 0x0f}},
-	{AVPBLENDD, yvex_yyi4, Pvex, [23]uint8{VEX_NDS_256_66_0F3A_WIG, 0x02}},
-	{AVINSERTI128, yvex_xyi4, Pvex, [23]uint8{VEX_NDS_256_66_0F3A_WIG, 0x38}},
-	{AVPERM2I128, yvex_yyi4, Pvex, [23]uint8{VEX_NDS_256_66_0F3A_WIG, 0x46}},
-	{ARORXL, yvex_ri3, Pvex, [23]uint8{VEX_NOVSR_LZ_F2_0F3A_W0, 0xf0}},
-	{ARORXQ, yvex_ri3, Pvex, [23]uint8{VEX_NOVSR_LZ_F2_0F3A_W1, 0xf0}},
-	{AVBROADCASTSD, yvex_vpbroadcast_sd, Pvex, [23]uint8{VEX_NOVSR_256_66_0F38_W0, 0x19}},
-	{AVBROADCASTSS, yvex_vpbroadcast, Pvex, [23]uint8{VEX_NOVSR_128_66_0F38_W0, 0x18, VEX_NOVSR_256_66_0F38_W0, 0x18}},
-	{AVMOVDDUP, yvex_xy2, Pvex, [23]uint8{VEX_NOVSR_128_F2_0F_WIG, 0x12, VEX_NOVSR_256_F2_0F_WIG, 0x12}},
-	{AVMOVSHDUP, yvex_xy2, Pvex, [23]uint8{VEX_NOVSR_128_F3_0F_WIG, 0x16, VEX_NOVSR_256_F3_0F_WIG, 0x16}},
-	{AVMOVSLDUP, yvex_xy2, Pvex, [23]uint8{VEX_NOVSR_128_F3_0F_WIG, 0x12, VEX_NOVSR_256_F3_0F_WIG, 0x12}},`
-
-	// Preprocess "existingOptabs" to make generated output comparable.
+	type testCase struct {
+		opcode     string
+		ytabs      string
+		optabLines string
+	}
+	var testCases []testCase
 	{
-		// 1. Inline prefix byte expressions.
-		//    $ egrep 'VEX_[_A-Z0-9]+\s*=' src/cmd/internal/obj/x86/asm6.go
-		prefixFixer := strings.NewReplacer(
-			"VEX_DDS_LIG_66_0F38_W1", "vexDDS | vexLIG | vex66 | vex0F38 | vexW1",
-			"VEX_NDD_128_66_0F_WIG", "vexNDD | vex128 | vex66 | vex0F | vexWIG",
-			"VEX_NDD_256_66_0F_WIG", "vexNDD | vex256 | vex66 | vex0F | vexWIG",
-			"VEX_NDD_LZ_F2_0F38_W0", "vexNDD | vexLZ | vexF2 | vex0F38 | vexW0",
-			"VEX_NDD_LZ_F2_0F38_W1", "vexNDD | vexLZ | vexF2 | vex0F38 | vexW1",
-			"VEX_NDS_128_66_0F_WIG", "vexNDS | vex128 | vex66 | vex0F | vexWIG",
-			"VEX_NDS_128_66_0F38_WIG", "vexNDS | vex128 | vex66 | vex0F38 | vexWIG",
-			"VEX_NDS_128_F2_0F_WIG", "vexNDS | vex128 | vexF2 | vex0F | vexWIG",
-			"VEX_NDS_256_66_0F_WIG", "vexNDS | vex256 | vex66 | vex0F | vexWIG",
-			"VEX_NDS_256_66_0F38_WIG", "vexNDS | vex256 | vex66 | vex0F38 | vexWIG",
-			"VEX_NDS_256_66_0F3A_W0", "vexNDS | vex256 | vex66 | vex0F3A | vexW0",
-			"VEX_NDS_256_66_0F3A_WIG", "vexNDS | vex256 | vex66 | vex0F3A | vexWIG",
-			"VEX_NDS_LZ_0F38_W0", "vexNDS | vexLZ | vex0F38 | vexW0",
-			"VEX_NDS_LZ_0F38_W1", "vexNDS | vexLZ | vex0F38 | vexW1",
-			"VEX_NDS_LZ_66_0F38_W0", "vexNDS | vexLZ | vex66 | vex0F38 | vexW0",
-			"VEX_NDS_LZ_66_0F38_W1", "vexNDS | vexLZ | vex66 | vex0F38 | vexW1",
-			"VEX_NDS_LZ_F2_0F38_W0", "vexNDS | vexLZ | vexF2 | vex0F38 | vexW0",
-			"VEX_NDS_LZ_F2_0F38_W1", "vexNDS | vexLZ | vexF2 | vex0F38 | vexW1",
-			"VEX_NDS_LZ_F3_0F38_W0", "vexNDS | vexLZ | vexF3 | vex0F38 | vexW0",
-			"VEX_NDS_LZ_F3_0F38_W1", "vexNDS | vexLZ | vexF3 | vex0F38 | vexW1",
-			"VEX_NOVSR_128_66_0F_WIG", "vexNOVSR | vex128 | vex66 | vex0F | vexWIG",
-			"VEX_NOVSR_128_66_0F38_W0", "vexNOVSR | vex128 | vex66 | vex0F38 | vexW0",
-			"VEX_NOVSR_128_66_0F38_WIG", "vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG",
-			"VEX_NOVSR_128_F2_0F_WIG", "vexNOVSR | vex128 | vexF2 | vex0F | vexWIG",
-			"VEX_NOVSR_128_F3_0F_WIG", "vexNOVSR | vex128 | vexF3 | vex0F | vexWIG",
-			"VEX_NOVSR_256_66_0F_WIG", "vexNOVSR | vex256 | vex66 | vex0F | vexWIG",
-			"VEX_NOVSR_256_66_0F38_W0", "vexNOVSR | vex256 | vex66 | vex0F38 | vexW0",
-			"VEX_NOVSR_256_66_0F38_WIG", "vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG",
-			"VEX_NOVSR_256_F2_0F_WIG", "vexNOVSR | vex256 | vexF2 | vex0F | vexWIG",
-			"VEX_NOVSR_256_F3_0F_WIG", "vexNOVSR | vex256 | vexF3 | vex0F | vexWIG",
-			"VEX_NOVSR_LZ_F2_0F3A_W0", "vexNOVSR | vexLZ | vexF2 | vex0F3A | vexW0",
-			"VEX_NOVSR_LZ_F2_0F3A_W1", "vexNOVSR | vexLZ | vexF2 | vex0F3A | vexW1",
-		)
-		existingOptabs = prefixFixer.Replace(existingOptabs)
-
-		// 2. Normalize hex literals.
-		//    Some optabs use 0xaa style, others use 0xAA.
-		//    Generated optabs always use upper case style (as in x86.csv).
-		rxHexLit := regexp.MustCompile(` 0x[0-9a-f]{2}`)
-		existingOptabs = rxHexLit.ReplaceAllStringFunc(existingOptabs, func(m string) string {
-			return " 0x" + strings.ToUpper(m[len(" 0x"):])
-		})
+		opcodeRE := regexp.MustCompile(`as: ([A-Z][A-Z0-9]*)`)
+		data, err := ioutil.ReadFile(filepath.Join("testdata", "golden.txt"))
+		if err != nil {
+			t.Fatalf("read golden file: %v", err)
+		}
+		for _, entry := range bytes.Split(data, []byte("======")) {
+			parts := bytes.Split(entry, []byte("----"))
+			ytabs := parts[0]
+			optabLines := parts[1]
+			opcode := opcodeRE.FindSubmatch(optabLines)[1]
+			testCases = append(testCases, testCase{
+				ytabs:      strings.TrimSpace(string(ytabs)),
+				optabLines: strings.TrimSpace(string(optabLines)),
+				opcode:     string(opcode)[len("A"):],
+			})
+		}
 	}
 
-	r, err := specRowReader("../" + specFile)
-	if err != nil {
-		t.Fatalf("open row reader: %v", err)
+	output := generateToString(t)
+	for _, tc := range testCases {
+		if !strings.Contains(output, tc.ytabs) {
+			t.Errorf("%s: ytabs not matched", tc.opcode)
+		}
+		if !strings.Contains(output, tc.optabLines) {
+			t.Errorf("%s: optab lines not matched", tc.opcode)
+		}
 	}
-	var newOptabs bytes.Buffer
-	_, err = doGenerateVexOptabs(r, &newOptabs)
-	if err != nil {
-		t.Fatalf("generate vex optabs: %v", err)
-	}
+}
 
-	rxOptabID := regexp.MustCompile(`[A-Z_][A-Z_0-9]*`)
-	linesToMap := func(lines []string) map[string]string {
-		m := make(map[string]string, len(lines))
-		for _, l := range lines {
-			name := rxOptabID.FindString(l)
-			if name != "" {
-				m[name] = l
+func TestOutputStability(t *testing.T) {
+	// Generate output count+1 times and check that every time
+	// it is exactly the same string.
+	//
+	// The output should be deterministic to avoid unwanted diffs
+	// between each code generation.
+	const count = 8
+
+	want := generateToString(t)
+	var wg sync.WaitGroup
+	for i := 0; i < count; i++ {
+		wg.Add(1)
+		go func(i int) {
+			if want != generateToString(t) {
+				t.Errorf("output #%d mismatches", i)
 			}
-		}
-		return m
+			wg.Done()
+		}(i)
+	}
+	wg.Wait()
+}
+
+func TestOpcodeCoverage(t *testing.T) {
+	// Check that generator produces all expected opcodes from testdata files.
+	// All opcodes are in Go syntax.
+
+	// VEX/EVEX opcodes collected from XED-based x86.csv.
+	expectedOpcodes := newStringSet(
+		"ANDNL",
+		"ANDNQ",
+		"BEXTRL",
+		"BEXTRQ",
+		"BLSIL",
+		"BLSIQ",
+		"BLSMSKL",
+		"BLSMSKQ",
+		"BLSRL",
+		"BLSRQ",
+		"BZHIL",
+		"BZHIQ",
+		"KADDB",
+		"KADDD",
+		"KADDQ",
+		"KADDW",
+		"KANDB",
+		"KANDD",
+		"KANDNB",
+		"KANDND",
+		"KANDNQ",
+		"KANDNW",
+		"KANDQ",
+		"KANDW",
+		"KMOVB",
+		"KMOVD",
+		"KMOVQ",
+		"KMOVW",
+		"KNOTB",
+		"KNOTD",
+		"KNOTQ",
+		"KNOTW",
+		"KORB",
+		"KORD",
+		"KORQ",
+		"KORTESTB",
+		"KORTESTD",
+		"KORTESTQ",
+		"KORTESTW",
+		"KORW",
+		"KSHIFTLB",
+		"KSHIFTLD",
+		"KSHIFTLQ",
+		"KSHIFTLW",
+		"KSHIFTRB",
+		"KSHIFTRD",
+		"KSHIFTRQ",
+		"KSHIFTRW",
+		"KTESTB",
+		"KTESTD",
+		"KTESTQ",
+		"KTESTW",
+		"KUNPCKBW",
+		"KUNPCKDQ",
+		"KUNPCKWD",
+		"KXNORB",
+		"KXNORD",
+		"KXNORQ",
+		"KXNORW",
+		"KXORB",
+		"KXORD",
+		"KXORQ",
+		"KXORW",
+		"MULXL",
+		"MULXQ",
+		"PDEPL",
+		"PDEPQ",
+		"PEXTL",
+		"PEXTQ",
+		"RORXL",
+		"RORXQ",
+		"SARXL",
+		"SARXQ",
+		"SHLXL",
+		"SHLXQ",
+		"SHRXL",
+		"SHRXQ",
+		"V4FMADDPS",
+		"V4FMADDSS",
+		"V4FNMADDPS",
+		"V4FNMADDSS",
+		"VADDPD",
+		"VADDPS",
+		"VADDSD",
+		"VADDSS",
+		"VADDSUBPD",
+		"VADDSUBPS",
+		"VAESDEC",
+		"VAESDECLAST",
+		"VAESENC",
+		"VAESENCLAST",
+		"VAESIMC",
+		"VAESKEYGENASSIST",
+		"VALIGND",
+		"VALIGNQ",
+		"VANDNPD",
+		"VANDNPS",
+		"VANDPD",
+		"VANDPS",
+		"VBLENDMPD",
+		"VBLENDMPS",
+		"VBLENDPD",
+		"VBLENDPS",
+		"VBLENDVPD",
+		"VBLENDVPS",
+		"VBROADCASTF128",
+		"VBROADCASTF32X2",
+		"VBROADCASTF32X4",
+		"VBROADCASTF32X8",
+		"VBROADCASTF64X2",
+		"VBROADCASTF64X4",
+		"VBROADCASTI128",
+		"VBROADCASTI32X2",
+		"VBROADCASTI32X4",
+		"VBROADCASTI32X8",
+		"VBROADCASTI64X2",
+		"VBROADCASTI64X4",
+		"VBROADCASTSD",
+		"VBROADCASTSS",
+		"VCMPPD",
+		"VCMPPS",
+		"VCMPSD",
+		"VCMPSS",
+		"VCOMISD",
+		"VCOMISS",
+		"VCOMPRESSPD",
+		"VCOMPRESSPS",
+		"VCVTDQ2PD",
+		"VCVTDQ2PS",
+		"VCVTPD2DQ",
+		"VCVTPD2DQX",
+		"VCVTPD2DQY",
+		"VCVTPD2PS",
+		"VCVTPD2PSX",
+		"VCVTPD2PSY",
+		"VCVTPD2QQ",
+		"VCVTPD2UDQ",
+		"VCVTPD2UDQX",
+		"VCVTPD2UDQY",
+		"VCVTPD2UQQ",
+		"VCVTPH2PS",
+		"VCVTPS2DQ",
+		"VCVTPS2PD",
+		"VCVTPS2PH",
+		"VCVTPS2QQ",
+		"VCVTPS2UDQ",
+		"VCVTPS2UQQ",
+		"VCVTQQ2PD",
+		"VCVTQQ2PS",
+		"VCVTQQ2PSX",
+		"VCVTQQ2PSY",
+		"VCVTSD2SI",
+		"VCVTSD2SIQ",
+		"VCVTSD2SS",
+		"VCVTSD2USIL",
+		"VCVTSD2USIQ",
+		"VCVTSI2SDL",
+		"VCVTSI2SDQ",
+		"VCVTSI2SSL",
+		"VCVTSI2SSQ",
+		"VCVTSS2SD",
+		"VCVTSS2SI",
+		"VCVTSS2SIQ",
+		"VCVTSS2USIL",
+		"VCVTSS2USIQ",
+		"VCVTTPD2DQ",
+		"VCVTTPD2DQX",
+		"VCVTTPD2DQY",
+		"VCVTTPD2QQ",
+		"VCVTTPD2UDQ",
+		"VCVTTPD2UDQX",
+		"VCVTTPD2UDQY",
+		"VCVTTPD2UQQ",
+		"VCVTTPS2DQ",
+		"VCVTTPS2QQ",
+		"VCVTTPS2UDQ",
+		"VCVTTPS2UQQ",
+		"VCVTTSD2SI",
+		"VCVTTSD2SIQ",
+		"VCVTTSD2USIL",
+		"VCVTTSD2USIQ",
+		"VCVTTSS2SI",
+		"VCVTTSS2SIQ",
+		"VCVTTSS2USIL",
+		"VCVTTSS2USIQ",
+		"VCVTUDQ2PD",
+		"VCVTUDQ2PS",
+		"VCVTUQQ2PD",
+		"VCVTUQQ2PS",
+		"VCVTUQQ2PSX",
+		"VCVTUQQ2PSY",
+		"VCVTUSI2SDL",
+		"VCVTUSI2SDQ",
+		"VCVTUSI2SSL",
+		"VCVTUSI2SSQ",
+		"VDBPSADBW",
+		"VDIVPD",
+		"VDIVPS",
+		"VDIVSD",
+		"VDIVSS",
+		"VDPPD",
+		"VDPPS",
+		"VEXP2PD",
+		"VEXP2PS",
+		"VEXPANDPD",
+		"VEXPANDPS",
+		"VEXTRACTF128",
+		"VEXTRACTF32X4",
+		"VEXTRACTF32X8",
+		"VEXTRACTF64X2",
+		"VEXTRACTF64X4",
+		"VEXTRACTI128",
+		"VEXTRACTI32X4",
+		"VEXTRACTI32X8",
+		"VEXTRACTI64X2",
+		"VEXTRACTI64X4",
+		"VEXTRACTPS",
+		"VFIXUPIMMPD",
+		"VFIXUPIMMPS",
+		"VFIXUPIMMSD",
+		"VFIXUPIMMSS",
+		"VFMADD132PD",
+		"VFMADD132PS",
+		"VFMADD132SD",
+		"VFMADD132SS",
+		"VFMADD213PD",
+		"VFMADD213PS",
+		"VFMADD213SD",
+		"VFMADD213SS",
+		"VFMADD231PD",
+		"VFMADD231PS",
+		"VFMADD231SD",
+		"VFMADD231SS",
+		"VFMADDPD",
+		"VFMADDPS",
+		"VFMADDSD",
+		"VFMADDSS",
+		"VFMADDSUB132PD",
+		"VFMADDSUB132PS",
+		"VFMADDSUB213PD",
+		"VFMADDSUB213PS",
+		"VFMADDSUB231PD",
+		"VFMADDSUB231PS",
+		"VFMADDSUBPD",
+		"VFMADDSUBPS",
+		"VFMSUB132PD",
+		"VFMSUB132PS",
+		"VFMSUB132SD",
+		"VFMSUB132SS",
+		"VFMSUB213PD",
+		"VFMSUB213PS",
+		"VFMSUB213SD",
+		"VFMSUB213SS",
+		"VFMSUB231PD",
+		"VFMSUB231PS",
+		"VFMSUB231SD",
+		"VFMSUB231SS",
+		"VFMSUBADD132PD",
+		"VFMSUBADD132PS",
+		"VFMSUBADD213PD",
+		"VFMSUBADD213PS",
+		"VFMSUBADD231PD",
+		"VFMSUBADD231PS",
+		"VFMSUBADDPD",
+		"VFMSUBADDPS",
+		"VFMSUBPD",
+		"VFMSUBPS",
+		"VFMSUBSD",
+		"VFMSUBSS",
+		"VFNMADD132PD",
+		"VFNMADD132PS",
+		"VFNMADD132SD",
+		"VFNMADD132SS",
+		"VFNMADD213PD",
+		"VFNMADD213PS",
+		"VFNMADD213SD",
+		"VFNMADD213SS",
+		"VFNMADD231PD",
+		"VFNMADD231PS",
+		"VFNMADD231SD",
+		"VFNMADD231SS",
+		"VFNMADDPD",
+		"VFNMADDPS",
+		"VFNMADDSD",
+		"VFNMADDSS",
+		"VFNMSUB132PD",
+		"VFNMSUB132PS",
+		"VFNMSUB132SD",
+		"VFNMSUB132SS",
+		"VFNMSUB213PD",
+		"VFNMSUB213PS",
+		"VFNMSUB213SD",
+		"VFNMSUB213SS",
+		"VFNMSUB231PD",
+		"VFNMSUB231PS",
+		"VFNMSUB231SD",
+		"VFNMSUB231SS",
+		"VFNMSUBPD",
+		"VFNMSUBPS",
+		"VFNMSUBSD",
+		"VFNMSUBSS",
+		"VFPCLASSPDX",
+		"VFPCLASSPDY",
+		"VFPCLASSPDZ",
+		"VFPCLASSPSX",
+		"VFPCLASSPSY",
+		"VFPCLASSPSZ",
+		"VFPCLASSSD",
+		"VFPCLASSSS",
+		"VGATHERDPD",
+		"VGATHERDPS",
+		"VGATHERPF0DPD",
+		"VGATHERPF0DPS",
+		"VGATHERPF0QPD",
+		"VGATHERPF0QPS",
+		"VGATHERPF1DPD",
+		"VGATHERPF1DPS",
+		"VGATHERPF1QPD",
+		"VGATHERPF1QPS",
+		"VGATHERQPD",
+		"VGATHERQPS",
+		"VGETEXPPD",
+		"VGETEXPPS",
+		"VGETEXPSD",
+		"VGETEXPSS",
+		"VGETMANTPD",
+		"VGETMANTPS",
+		"VGETMANTSD",
+		"VGETMANTSS",
+		"VGF2P8AFFINEINVQB",
+		"VGF2P8AFFINEQB",
+		"VGF2P8MULB",
+		"VHADDPD",
+		"VHADDPS",
+		"VHSUBPD",
+		"VHSUBPS",
+		"VINSERTF128",
+		"VINSERTF32X4",
+		"VINSERTF32X8",
+		"VINSERTF64X2",
+		"VINSERTF64X4",
+		"VINSERTI128",
+		"VINSERTI32X4",
+		"VINSERTI32X8",
+		"VINSERTI64X2",
+		"VINSERTI64X4",
+		"VINSERTPS",
+		"VLDDQU",
+		"VLDMXCSR",
+		"VMASKMOVDQU",
+		"VMASKMOVPD",
+		"VMASKMOVPS",
+		"VMAXPD",
+		"VMAXPS",
+		"VMAXSD",
+		"VMAXSS",
+		"VMINPD",
+		"VMINPS",
+		"VMINSD",
+		"VMINSS",
+		"VMOVAPD",
+		"VMOVAPS",
+		"VMOVD",
+		"VMOVDDUP",
+		"VMOVDQA",
+		"VMOVDQA32",
+		"VMOVDQA64",
+		"VMOVDQU",
+		"VMOVDQU16",
+		"VMOVDQU32",
+		"VMOVDQU64",
+		"VMOVDQU8",
+		"VMOVHLPS",
+		"VMOVHPD",
+		"VMOVHPS",
+		"VMOVLHPS",
+		"VMOVLPD",
+		"VMOVLPS",
+		"VMOVMSKPD",
+		"VMOVMSKPS",
+		"VMOVNTDQ",
+		"VMOVNTDQA",
+		"VMOVNTPD",
+		"VMOVNTPS",
+		"VMOVQ",
+		"VMOVSD",
+		"VMOVSHDUP",
+		"VMOVSLDUP",
+		"VMOVSS",
+		"VMOVUPD",
+		"VMOVUPS",
+		"VMPSADBW",
+		"VMULPD",
+		"VMULPS",
+		"VMULSD",
+		"VMULSS",
+		"VORPD",
+		"VORPS",
+		"VP4DPWSSD",
+		"VP4DPWSSDS",
+		"VPABSB",
+		"VPABSD",
+		"VPABSQ",
+		"VPABSW",
+		"VPACKSSDW",
+		"VPACKSSWB",
+		"VPACKUSDW",
+		"VPACKUSWB",
+		"VPADDB",
+		"VPADDD",
+		"VPADDQ",
+		"VPADDSB",
+		"VPADDSW",
+		"VPADDUSB",
+		"VPADDUSW",
+		"VPADDW",
+		"VPALIGNR",
+		"VPAND",
+		"VPANDD",
+		"VPANDN",
+		"VPANDND",
+		"VPANDNQ",
+		"VPANDQ",
+		"VPAVGB",
+		"VPAVGW",
+		"VPBLENDD",
+		"VPBLENDMB",
+		"VPBLENDMD",
+		"VPBLENDMQ",
+		"VPBLENDMW",
+		"VPBLENDVB",
+		"VPBLENDW",
+		"VPBROADCASTB",
+		"VPBROADCASTD",
+		"VPBROADCASTMB2Q",
+		"VPBROADCASTMW2D",
+		"VPBROADCASTQ",
+		"VPBROADCASTW",
+		"VPCLMULQDQ",
+		"VPCMPB",
+		"VPCMPD",
+		"VPCMPEQB",
+		"VPCMPEQD",
+		"VPCMPEQQ",
+		"VPCMPEQW",
+		"VPCMPESTRI",
+		"VPCMPESTRM",
+		"VPCMPGTB",
+		"VPCMPGTD",
+		"VPCMPGTQ",
+		"VPCMPGTW",
+		"VPCMPISTRI",
+		"VPCMPISTRM",
+		"VPCMPQ",
+		"VPCMPUB",
+		"VPCMPUD",
+		"VPCMPUQ",
+		"VPCMPUW",
+		"VPCMPW",
+		"VPCOMPRESSB",
+		"VPCOMPRESSD",
+		"VPCOMPRESSQ",
+		"VPCOMPRESSW",
+		"VPCONFLICTD",
+		"VPCONFLICTQ",
+		"VPDPBUSD",
+		"VPDPBUSDS",
+		"VPDPWSSD",
+		"VPDPWSSDS",
+		"VPERM2F128",
+		"VPERM2I128",
+		"VPERMB",
+		"VPERMD",
+		"VPERMI2B",
+		"VPERMI2D",
+		"VPERMI2PD",
+		"VPERMI2PS",
+		"VPERMI2Q",
+		"VPERMI2W",
+		"VPERMIL2PD",
+		"VPERMIL2PS",
+		"VPERMILPD",
+		"VPERMILPS",
+		"VPERMPD",
+		"VPERMPS",
+		"VPERMQ",
+		"VPERMT2B",
+		"VPERMT2D",
+		"VPERMT2PD",
+		"VPERMT2PS",
+		"VPERMT2Q",
+		"VPERMT2W",
+		"VPERMW",
+		"VPEXPANDB",
+		"VPEXPANDD",
+		"VPEXPANDQ",
+		"VPEXPANDW",
+		"VPEXTRB",
+		"VPEXTRD",
+		"VPEXTRQ",
+		"VPEXTRW",
+		"VPGATHERDD",
+		"VPGATHERDQ",
+		"VPGATHERQD",
+		"VPGATHERQQ",
+		"VPHADDD",
+		"VPHADDSW",
+		"VPHADDW",
+		"VPHMINPOSUW",
+		"VPHSUBD",
+		"VPHSUBSW",
+		"VPHSUBW",
+		"VPINSRB",
+		"VPINSRD",
+		"VPINSRQ",
+		"VPINSRW",
+		"VPLZCNTD",
+		"VPLZCNTQ",
+		"VPMADD52HUQ",
+		"VPMADD52LUQ",
+		"VPMADDUBSW",
+		"VPMADDWD",
+		"VPMASKMOVD",
+		"VPMASKMOVQ",
+		"VPMAXSB",
+		"VPMAXSD",
+		"VPMAXSQ",
+		"VPMAXSW",
+		"VPMAXUB",
+		"VPMAXUD",
+		"VPMAXUQ",
+		"VPMAXUW",
+		"VPMINSB",
+		"VPMINSD",
+		"VPMINSQ",
+		"VPMINSW",
+		"VPMINUB",
+		"VPMINUD",
+		"VPMINUQ",
+		"VPMINUW",
+		"VPMOVB2M",
+		"VPMOVD2M",
+		"VPMOVDB",
+		"VPMOVDW",
+		"VPMOVM2B",
+		"VPMOVM2D",
+		"VPMOVM2Q",
+		"VPMOVM2W",
+		"VPMOVMSKB",
+		"VPMOVQ2M",
+		"VPMOVQB",
+		"VPMOVQD",
+		"VPMOVQW",
+		"VPMOVSDB",
+		"VPMOVSDW",
+		"VPMOVSQB",
+		"VPMOVSQD",
+		"VPMOVSQW",
+		"VPMOVSWB",
+		"VPMOVSXBD",
+		"VPMOVSXBQ",
+		"VPMOVSXBW",
+		"VPMOVSXDQ",
+		"VPMOVSXWD",
+		"VPMOVSXWQ",
+		"VPMOVUSDB",
+		"VPMOVUSDW",
+		"VPMOVUSQB",
+		"VPMOVUSQD",
+		"VPMOVUSQW",
+		"VPMOVUSWB",
+		"VPMOVW2M",
+		"VPMOVWB",
+		"VPMOVZXBD",
+		"VPMOVZXBQ",
+		"VPMOVZXBW",
+		"VPMOVZXDQ",
+		"VPMOVZXWD",
+		"VPMOVZXWQ",
+		"VPMULDQ",
+		"VPMULHRSW",
+		"VPMULHUW",
+		"VPMULHW",
+		"VPMULLD",
+		"VPMULLQ",
+		"VPMULLW",
+		"VPMULTISHIFTQB",
+		"VPMULUDQ",
+		"VPOPCNTB",
+		"VPOPCNTD",
+		"VPOPCNTQ",
+		"VPOPCNTW",
+		"VPOR",
+		"VPORD",
+		"VPORQ",
+		"VPROLD",
+		"VPROLQ",
+		"VPROLVD",
+		"VPROLVQ",
+		"VPRORD",
+		"VPRORQ",
+		"VPRORVD",
+		"VPRORVQ",
+		"VPSADBW",
+		"VPSCATTERDD",
+		"VPSCATTERDQ",
+		"VPSCATTERQD",
+		"VPSCATTERQQ",
+		"VPSHLDD",
+		"VPSHLDQ",
+		"VPSHLDVD",
+		"VPSHLDVQ",
+		"VPSHLDVW",
+		"VPSHLDW",
+		"VPSHRDD",
+		"VPSHRDQ",
+		"VPSHRDVD",
+		"VPSHRDVQ",
+		"VPSHRDVW",
+		"VPSHRDW",
+		"VPSHUFB",
+		"VPSHUFBITQMB",
+		"VPSHUFD",
+		"VPSHUFHW",
+		"VPSHUFLW",
+		"VPSIGNB",
+		"VPSIGND",
+		"VPSIGNW",
+		"VPSLLD",
+		"VPSLLDQ",
+		"VPSLLQ",
+		"VPSLLVD",
+		"VPSLLVQ",
+		"VPSLLVW",
+		"VPSLLW",
+		"VPSRAD",
+		"VPSRAQ",
+		"VPSRAVD",
+		"VPSRAVQ",
+		"VPSRAVW",
+		"VPSRAW",
+		"VPSRLD",
+		"VPSRLDQ",
+		"VPSRLQ",
+		"VPSRLVD",
+		"VPSRLVQ",
+		"VPSRLVW",
+		"VPSRLW",
+		"VPSUBB",
+		"VPSUBD",
+		"VPSUBQ",
+		"VPSUBSB",
+		"VPSUBSW",
+		"VPSUBUSB",
+		"VPSUBUSW",
+		"VPSUBW",
+		"VPTERNLOGD",
+		"VPTERNLOGQ",
+		"VPTEST",
+		"VPTESTMB",
+		"VPTESTMD",
+		"VPTESTMQ",
+		"VPTESTMW",
+		"VPTESTNMB",
+		"VPTESTNMD",
+		"VPTESTNMQ",
+		"VPTESTNMW",
+		"VPUNPCKHBW",
+		"VPUNPCKHDQ",
+		"VPUNPCKHQDQ",
+		"VPUNPCKHWD",
+		"VPUNPCKLBW",
+		"VPUNPCKLDQ",
+		"VPUNPCKLQDQ",
+		"VPUNPCKLWD",
+		"VPXOR",
+		"VPXORD",
+		"VPXORQ",
+		"VRANGEPD",
+		"VRANGEPS",
+		"VRANGESD",
+		"VRANGESS",
+		"VRCP14PD",
+		"VRCP14PS",
+		"VRCP14SD",
+		"VRCP14SS",
+		"VRCP28PD",
+		"VRCP28PS",
+		"VRCP28SD",
+		"VRCP28SS",
+		"VRCPPS",
+		"VRCPSS",
+		"VREDUCEPD",
+		"VREDUCEPS",
+		"VREDUCESD",
+		"VREDUCESS",
+		"VRNDSCALEPD",
+		"VRNDSCALEPS",
+		"VRNDSCALESD",
+		"VRNDSCALESS",
+		"VROUNDPD",
+		"VROUNDPS",
+		"VROUNDSD",
+		"VROUNDSS",
+		"VRSQRT14PD",
+		"VRSQRT14PS",
+		"VRSQRT14SD",
+		"VRSQRT14SS",
+		"VRSQRT28PD",
+		"VRSQRT28PS",
+		"VRSQRT28SD",
+		"VRSQRT28SS",
+		"VRSQRTPS",
+		"VRSQRTSS",
+		"VSCALEFPD",
+		"VSCALEFPS",
+		"VSCALEFSD",
+		"VSCALEFSS",
+		"VSCATTERDPD",
+		"VSCATTERDPS",
+		"VSCATTERPF0DPD",
+		"VSCATTERPF0DPS",
+		"VSCATTERPF0QPD",
+		"VSCATTERPF0QPS",
+		"VSCATTERPF1DPD",
+		"VSCATTERPF1DPS",
+		"VSCATTERPF1QPD",
+		"VSCATTERPF1QPS",
+		"VSCATTERQPD",
+		"VSCATTERQPS",
+		"VSHUFF32X4",
+		"VSHUFF64X2",
+		"VSHUFI32X4",
+		"VSHUFI64X2",
+		"VSHUFPD",
+		"VSHUFPS",
+		"VSQRTPD",
+		"VSQRTPS",
+		"VSQRTSD",
+		"VSQRTSS",
+		"VSTMXCSR",
+		"VSUBPD",
+		"VSUBPS",
+		"VSUBSD",
+		"VSUBSS",
+		"VTESTPD",
+		"VTESTPS",
+		"VUCOMISD",
+		"VUCOMISS",
+		"VUNPCKHPD",
+		"VUNPCKHPS",
+		"VUNPCKLPD",
+		"VUNPCKLPS",
+		"VXORPD",
+		"VXORPS",
+		"VZEROALL",
+		"VZEROUPPER")
+
+	// AMD-specific VEX opcodes.
+	// Excluded from x86avxgen output for now.
+	amdOpcodes := newStringSet(
+		"VFMADDPD",
+		"VFMADDPS",
+		"VFMADDSD",
+		"VFMADDSS",
+		"VFMADDSUBPD",
+		"VFMADDSUBPS",
+		"VFMSUBADDPD",
+		"VFMSUBADDPS",
+		"VFMSUBPD",
+		"VFMSUBPS",
+		"VFMSUBSD",
+		"VFMSUBSS",
+		"VFNMADDPD",
+		"VFNMADDPS",
+		"VFNMADDSD",
+		"VFNMADDSS",
+		"VFNMSUBPD",
+		"VFNMSUBPS",
+		"VFNMSUBSD",
+		"VFNMSUBSS",
+		"VPERMIL2PD",
+		"VPERMIL2PS")
+
+	ctx := newTestContext(t)
+	buildTables(ctx)
+
+	for op := range amdOpcodes {
+		delete(expectedOpcodes, op)
+	}
+	for op := range ctx.optabs {
+		delete(expectedOpcodes, op)
 	}
 
-	expectedChanges := map[string]string{
-		// Before: 256/Y variant.
-		// Now:    256/Y + 128/X variants.
-		"AVPALIGNR": "{AVPALIGNR, yvex_vpalignr, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexWIG, 0x0F, vexNDS | vex256 | vex66 | vex0F3A | vexWIG, 0x0F}}",
-		"AVPBLENDD": "{AVPBLENDD, yvex_vpalignr, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexW0, 0x02, vexNDS | vex256 | vex66 | vex0F3A | vexW0, 0x02}}",
-
-		// Before: R+R variants.
-		// Now:    R+R and R+M variants.
-		"AVBROADCASTSS": "{AVBROADCASTSS, yvex_vpbroadcast_ss, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexW0, 0x18, vexNOVSR | vex128 | vex66 | vex0F38 | vexW0, 0x18, vexNOVSR | vex256 | vex66 | vex0F38 | vexW0, 0x18, vexNOVSR | vex256 | vex66 | vex0F38 | vexW0, 0x18}}",
-		"AVBROADCASTSD": "{AVBROADCASTSD, yvex_vpbroadcast_sd, Pvex, [23]uint8{vexNOVSR | vex256 | vex66 | vex0F38 | vexW0, 0x19, vexNOVSR | vex256 | vex66 | vex0F38 | vexW0, 0x19}}",
-
-		// Before: VEX.L=128 (vex128).
-		// Now:    VEX.L=IGNORE (vexLIG); as in "x86.csv".
-		"AVSUBSD": "{AVSUBSD, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF2 | vex0F | vexWIG, 0x5C}}",
-		"AVADDSD": "{AVADDSD, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF2 | vex0F | vexWIG, 0x58}}",
-
-		// Before: VEX.W=IGNORE (vexWIG).
-		// Now:    VEX.W=W0 (vexW0); as in "x86.csv".
-		"AVINSERTI128": "{AVINSERTI128, yvex_xyi4, Pvex, [23]uint8{vexNDS | vex256 | vex66 | vex0F3A | vexW0, 0x38}}",
-		"AVPERM2I128":  "{AVPERM2I128, yvex_yyi4, Pvex, [23]uint8{vexNDS | vex256 | vex66 | vex0F3A | vexW0, 0x46}}",
-	}
-
-	reportError := func(name, want, have string) {
-		t.Errorf("%s: output mismatch\n\twant:'%s'\n\thave:'%s'",
-			name, want, have)
-	}
-
-	// Perform check.
-	haveLines := linesToMap(strings.Split(newOptabs.String(), "\n"))
-	wantLines := linesToMap(strings.Split(existingOptabs, "\n"))
-	for name, wantLine := range wantLines {
-		haveLine := haveLines[name]
-
-		haveLine = strings.Trim(haveLine, " \t,")
-		wantLine = strings.Trim(wantLine, " \t,")
-
-		if haveLine == "" {
-			t.Errorf("%s: not found", name)
-			continue
-		}
-
-		if line := expectedChanges[name]; line != "" {
-			if line != haveLine {
-				reportError(name, line, haveLine)
-			}
-			continue
-		}
-
-		if !strings.Contains(haveLine, wantLine) {
-			reportError(name, wantLine, haveLine)
-		}
+	for op := range expectedOpcodes {
+		t.Errorf("missing opcode: %s", op)
 	}
 }
diff --git a/x86/x86avxgen/config.go b/x86/x86avxgen/config.go
deleted file mode 100644
index c4cd0d4..0000000
--- a/x86/x86avxgen/config.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import "path/filepath"
-
-const (
-	progName = "x86avxgen"
-	specFile = "x86.v0.2.csv" // Default spec filename
-
-	// Paths are relative to GOROOT.
-	pathVexOptabs = "src/cmd/internal/obj/x86/vex_optabs.go"
-	pathAenum     = "src/cmd/internal/obj/x86/aenum.go"
-	pathAnames    = "src/cmd/internal/obj/x86/anames.go"
-	pathTests     = "src/cmd/asm/internal/asm/testdata/amd64enc.s"
-)
-
-var (
-	filenameVexOptabs = filepath.Base(pathVexOptabs)
-	filenameAenum     = filepath.Base(pathAenum)
-	filenameAnames    = filepath.Base(pathAnames)
-	filenameTests     = filepath.Base(pathTests)
-)
diff --git a/x86/x86avxgen/decode.go b/x86/x86avxgen/decode.go
new file mode 100644
index 0000000..667f77c
--- /dev/null
+++ b/x86/x86avxgen/decode.go
@@ -0,0 +1,395 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"fmt"
+	"log"
+	"regexp"
+	"strings"
+
+	"golang.org/x/arch/x86/xeddata"
+)
+
+// encoding is decoded XED instruction pattern.
+type encoding struct {
+	// opbyte is opcode byte (one that follows [E]VEX prefix).
+	// It's called "opcode" in Intel manual, but we use that for
+	// instruction name (iclass in XED terms).
+	opbyte string
+
+	// opdigit is ModRM.Reg field used to encode opcode extension.
+	// In Intel manual, "/digit" notation is used.
+	opdigit string
+
+	// vex represents [E]VEX fields that are used in a first [E]VEX
+	// opBytes element (see prefixExpr function).
+	vex struct {
+		P string // 66/F2/F3
+		L string // 128/256/512
+		M string // 0F/0F38/0F3A
+		W string // W0/W1
+	}
+
+	// evexScale is a scaling factor used to calculate compact disp-8.
+	evexScale string
+
+	// evexBcstScale is like evexScale, but used during broadcasting.
+	// Empty for optab entries that do not have broadcasting support.
+	evexBcstScale string
+
+	// evex describes which features of EVEX can be used by optab entry.
+	// All flags are "false" for VEX-encoded insts.
+	evex struct {
+		// There is no "broadcast" flag because it's inferred
+		// from non-empty evexBcstScale.
+
+		SAE      bool // EVEX.b controls SAE for reg-reg insts
+		Rounding bool // EVEX.b + EVEX.RC (VL) control rounding for FP insts
+		Zeroing  bool // Instruction can use zeroing.
+	}
+}
+
+type decoder struct {
+	ctx   *context
+	insts []*instruction
+}
+
+// decodeGroups fills ctx.groups with decoded instruction groups.
+//
+// Reads XED objects from ctx.xedPath.
+func decodeGroups(ctx *context) {
+	d := decoder{ctx: ctx}
+	groups := make(map[string][]*instruction)
+	for _, inst := range d.DecodeAll() {
+		groups[inst.opcode] = append(groups[inst.opcode], inst)
+	}
+	for op, insts := range groups {
+		ctx.groups = append(ctx.groups, &instGroup{
+			opcode: op,
+			list:   insts,
+		})
+	}
+}
+
+// DecodeAll decodes every XED instruction.
+func (d *decoder) DecodeAll() []*instruction {
+	err := xeddata.WalkInsts(d.ctx.xedPath, func(inst *xeddata.Inst) {
+		inst.Pattern = xeddata.ExpandStates(d.ctx.db, inst.Pattern)
+		pset := xeddata.NewPatternSet(inst.Pattern)
+
+		opcode := inst.Iclass
+
+		switch {
+		case inst.HasAttribute("AMDONLY") || inst.Extension == "XOP":
+			return // Only VEX and EVEX are supported
+		case !pset.Is("VEX") && !pset.Is("EVEX"):
+			return // Skip non-AVX instructions
+		case inst.RealOpcode == "N":
+			return // Skip unstable instructions
+		}
+
+		// Expand some patterns to simplify decodePattern.
+		pset.Replace("FIX_ROUND_LEN128()", "VL=0")
+		pset.Replace("FIX_ROUND_LEN512()", "VL=2")
+
+		mask, args := d.decodeArgs(pset, inst)
+		d.insts = append(d.insts, &instruction{
+			pset:   pset,
+			opcode: opcode,
+			mask:   mask,
+			args:   args,
+			enc:    d.decodePattern(pset, inst),
+		})
+	})
+	if err != nil {
+		log.Fatalf("walk insts: %v", err)
+	}
+	return d.insts
+}
+
+// registerArgs maps XED argument name RHS to its decoded version.
+var registerArgs = map[string]argument{
+	"GPR32_R()":  {"Yrl", "reg"},
+	"GPR64_R()":  {"Yrl", "reg"},
+	"VGPR32_R()": {"Yrl", "reg"},
+	"VGPR64_R()": {"Yrl", "reg"},
+	"VGPR32_N()": {"Yrl", "regV"},
+	"VGPR64_N()": {"Yrl", "regV"},
+	"GPR32_B()":  {"Yrl", "reg/mem"},
+	"GPR64_B()":  {"Yrl", "reg/mem"},
+	"VGPR32_B()": {"Yrl", "reg/mem"},
+	"VGPR64_B()": {"Yrl", "reg/mem"},
+
+	"XMM_R()":  {"Yxr", "reg"},
+	"XMM_R3()": {"YxrEvex", "reg"},
+	"XMM_N()":  {"Yxr", "regV"},
+	"XMM_N3()": {"YxrEvex", "regV"},
+	"XMM_B()":  {"Yxr", "reg/mem"},
+	"XMM_B3()": {"YxrEvex", "reg/mem"},
+	"XMM_SE()": {"Yxr", "regIH"},
+
+	"YMM_R()":  {"Yyr", "reg"},
+	"YMM_R3()": {"YyrEvex", "reg"},
+	"YMM_N()":  {"Yyr", "regV"},
+	"YMM_N3()": {"YyrEvex", "regV"},
+	"YMM_B()":  {"Yyr", "reg/mem"},
+	"YMM_B3()": {"YyrEvex", "reg/mem"},
+	"YMM_SE()": {"Yyr", "regIH"},
+
+	"ZMM_R3()": {"Yzr", "reg"},
+	"ZMM_N3()": {"Yzr", "regV"},
+	"ZMM_B3()": {"Yzr", "reg/mem"},
+
+	"MASK_R()": {"Yk", "reg"},
+	"MASK_N()": {"Yk", "regV"},
+	"MASK_B()": {"Yk", "reg/mem"},
+
+	"MASKNOT0()": {"Yknot0", "kmask"},
+
+	// Handled specifically in "generate".
+	"MASK1()": {"MASK1()", "MASK1()"},
+}
+
+func (d *decoder) decodeArgs(pset xeddata.PatternSet, inst *xeddata.Inst) (mask *argument, args []*argument) {
+	for i, f := range strings.Fields(inst.Operands) {
+		xarg, err := xeddata.NewOperand(d.ctx.db, f)
+		if err != nil {
+			log.Fatalf("%s: args[%d]: %v", inst, i, err)
+		}
+
+		switch {
+		case xarg.Action == "":
+			continue // Skip meta args like EMX_BROADCAST_1TO32_8
+		case !xarg.IsVisible():
+			continue
+		}
+
+		arg := &argument{}
+		args = append(args, arg)
+
+		switch xarg.NameLHS() {
+		case "IMM0":
+			if xarg.Width != "b" {
+				log.Fatalf("%s: args[%d]: expected width=b, found %s", inst, i, xarg.Width)
+			}
+			if pset["IMM0SIGNED=1"] {
+				arg.ytype = "Yi8"
+			} else {
+				arg.ytype = "Yu8"
+			}
+			arg.zkind = "imm8"
+
+		case "REG0", "REG1", "REG2", "REG3":
+			rhs := xarg.NameRHS()
+			if rhs == "MASK1()" {
+				mask = arg
+			}
+			*arg = registerArgs[rhs]
+			if arg.ytype == "" {
+				log.Fatalf("%s: args[%d]: unexpected %s reg", inst, i, rhs)
+			}
+			if xarg.Attributes["MULTISOURCE4"] {
+				arg.ytype += "Multi4"
+			}
+
+		case "MEM0":
+			arg.ytype = pset.MatchOrDefault("Ym",
+				"VMODRM_XMM()", "Yxvm",
+				"VMODRM_YMM()", "Yyvm",
+				"UISA_VMODRM_XMM()", "YxvmEvex",
+				"UISA_VMODRM_YMM()", "YyvmEvex",
+				"UISA_VMODRM_ZMM()", "Yzvm",
+			)
+			arg.zkind = "reg/mem"
+
+		default:
+			log.Fatalf("%s: args[%d]: unexpected %s", inst, i, xarg.NameRHS())
+		}
+	}
+
+	// Reverse args.
+	for i := len(args)/2 - 1; i >= 0; i-- {
+		j := len(args) - 1 - i
+		args[i], args[j] = args[j], args[i]
+	}
+
+	return mask, args
+}
+
+func (d *decoder) decodePattern(pset xeddata.PatternSet, inst *xeddata.Inst) *encoding {
+	var enc encoding
+
+	enc.opdigit = d.findOpdigit(pset)
+	enc.opbyte = d.findOpbyte(pset, inst)
+
+	if strings.Contains(inst.Attributes, "DISP8_") {
+		enc.evexScale = d.findEVEXScale(pset)
+		enc.evexBcstScale = d.findEVEXBcstScale(pset, inst)
+	}
+
+	enc.vex.P = pset.Match(
+		"VEX_PREFIX=1", "66",
+		"VEX_PREFIX=2", "F2",
+		"VEX_PREFIX=3", "F3")
+	enc.vex.M = pset.Match(
+		"MAP=1", "0F",
+		"MAP=2", "0F38",
+		"MAP=3", "0F3A")
+	enc.vex.L = pset.MatchOrDefault("128",
+		"VL=0", "128",
+		"VL=1", "256",
+		"VL=2", "512")
+	enc.vex.W = pset.MatchOrDefault("W0",
+		"REXW=0", "W0",
+		"REXW=1", "W1")
+
+	if pset.Is("EVEX") {
+		enc.evex.SAE = strings.Contains(inst.Operands, "TXT=SAESTR")
+		enc.evex.Rounding = strings.Contains(inst.Operands, "TXT=ROUNDC")
+		enc.evex.Zeroing = strings.Contains(inst.Operands, "TXT=ZEROSTR")
+	}
+
+	// Prefix each non-empty part with vex or evex.
+	parts := [...]*string{
+		&enc.evexScale, &enc.evexBcstScale,
+		&enc.vex.P, &enc.vex.M, &enc.vex.L, &enc.vex.W,
+	}
+	for _, p := range parts {
+		if *p == "" {
+			continue
+		}
+		if pset.Is("EVEX") {
+			*p = "evex" + *p
+		} else {
+			*p = "vex" + *p
+		}
+	}
+
+	return &enc
+}
+
+func (d *decoder) findOpdigit(pset xeddata.PatternSet) string {
+	reg := pset.Index(
+		"REG[0b000]",
+		"REG[0b001]",
+		"REG[0b010]",
+		"REG[0b011]",
+		"REG[0b100]",
+		"REG[0b101]",
+		"REG[0b110]",
+		"REG[0b111]",
+	)
+	// Fixed ModRM.Reg field means that it is used for opcode extension.
+	if reg != -1 {
+		return fmt.Sprintf("0%d", reg)
+	}
+	return ""
+}
+
+// opbyteRE matches uint8 hex literal.
+var opbyteRE = regexp.MustCompile(`0x[0-9A-F]{2}`)
+
+func (d *decoder) findOpbyte(pset xeddata.PatternSet, inst *xeddata.Inst) string {
+	opbyte := ""
+	for k := range pset {
+		if opbyteRE.MatchString(k) {
+			if opbyte == "" {
+				opbyte = k
+			} else {
+				log.Fatalf("%s: multiple opbytes", inst)
+			}
+		}
+	}
+	return opbyte
+}
+
+func (d *decoder) findEVEXScale(pset xeddata.PatternSet) string {
+	switch {
+	case pset["NELEM_FULL()"], pset["NELEM_FULLMEM()"]:
+		return pset.Match(
+			"VL=0", "N16",
+			"VL=1", "N32",
+			"VL=2", "N64")
+	case pset["NELEM_MOVDDUP()"]:
+		return pset.Match(
+			"VL=0", "N8",
+			"VL=1", "N32",
+			"VL=2", "N64")
+	case pset["NELEM_HALF()"], pset["NELEM_HALFMEM()"]:
+		return pset.Match(
+			"VL=0", "N8",
+			"VL=1", "N16",
+			"VL=2", "N32")
+	case pset["NELEM_QUARTERMEM()"]:
+		return pset.Match(
+			"VL=0", "N4",
+			"VL=1", "N8",
+			"VL=2", "N16")
+	case pset["NELEM_EIGHTHMEM()"]:
+		return pset.Match(
+			"VL=0", "N2",
+			"VL=1", "N4",
+			"VL=2", "N8")
+	case pset["NELEM_TUPLE2()"]:
+		return pset.Match(
+			"ESIZE_32_BITS()", "N8",
+			"ESIZE_64_BITS()", "N16")
+	case pset["NELEM_TUPLE4()"]:
+		return pset.Match(
+			"ESIZE_32_BITS()", "N16",
+			"ESIZE_64_BITS()", "N32")
+	case pset["NELEM_TUPLE8()"]:
+		return "N32"
+	case pset["NELEM_MEM128()"], pset["NELEM_TUPLE1_4X()"]:
+		return "N16"
+	}
+
+	// Explicit list is required to make it possible to
+	// detect unhandled nonterminals for the caller.
+	scalars := [...]string{
+		"NELEM_SCALAR()",
+		"NELEM_GSCAT()",
+		"NELEM_GPR_READER()",
+		"NELEM_GPR_READER_BYTE()",
+		"NELEM_GPR_READER_WORD()",
+		"NELEM_GPR_WRITER_STORE()",
+		"NELEM_GPR_WRITER_STORE_BYTE()",
+		"NELEM_GPR_WRITER_STORE_WORD()",
+		"NELEM_GPR_WRITER_LDOP_D()",
+		"NELEM_GPR_WRITER_LDOP_Q()",
+		"NELEM_TUPLE1()",
+		"NELEM_TUPLE1_BYTE()",
+		"NELEM_TUPLE1_WORD()",
+	}
+	for _, scalar := range scalars {
+		if pset[scalar] {
+			return pset.Match(
+				"ESIZE_8_BITS()", "N1",
+				"ESIZE_16_BITS()", "N2",
+				"ESIZE_32_BITS()", "N4",
+				"ESIZE_64_BITS()", "N8")
+		}
+	}
+
+	return ""
+}
+
+func (d *decoder) findEVEXBcstScale(pset xeddata.PatternSet, inst *xeddata.Inst) string {
+	// Only FULL and HALF tuples are affected by the broadcasting.
+	switch {
+	case pset["NELEM_FULL()"]:
+		return pset.Match(
+			"ESIZE_32_BITS()", "BcstN4",
+			"ESIZE_64_BITS()", "BcstN8")
+	case pset["NELEM_HALF()"]:
+		return "BcstN4"
+	default:
+		if inst.HasAttribute("BROADCAST_ENABLED") {
+			log.Fatalf("%s: unexpected tuple for bcst", inst)
+		}
+		return ""
+	}
+}
diff --git a/x86/x86avxgen/diag.go b/x86/x86avxgen/diag.go
deleted file mode 100644
index 18e7c11..0000000
--- a/x86/x86avxgen/diag.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-// These diagnostics were extensively used during development phase.
-// Now they serve as additional level of tests.
-// If coverage is not 100% for any reason, troubleshooting is required.
-
-import (
-	"fmt"
-	"sort"
-)
-
-// diagnostics is used to collect and display execution info.
-type diagnostics struct {
-	// Count misses for undefined ytab key.
-	ytabMisses      map[string]int
-	optabsGenerated int
-	optabsTotal     int
-}
-
-func (d *diagnostics) Print() {
-	fmt.Println("  -- diag info --")
-	d.printOptabsInfo()
-	fmt.Println()
-	d.printYtabMisses()
-}
-
-func (d *diagnostics) printOptabsInfo() {
-	skipped := d.optabsTotal - d.optabsGenerated
-	cover := float64(d.optabsGenerated*100) / float64(d.optabsTotal)
-	fmt.Println("Optabs info:")
-	fmt.Printf("  processed: %d\n", d.optabsTotal)
-	fmt.Printf("  generated: %d\n", d.optabsGenerated)
-	fmt.Printf("    skipped: %d\n", skipped)
-	fmt.Printf("      cover: %.1f%%\n", cover)
-}
-
-func (d *diagnostics) printYtabMisses() {
-	if len(d.ytabMisses) == 0 {
-		fmt.Println("No ytab key misses recorded")
-		return
-	}
-
-	// Sort by miss count.
-	type ytabMiss struct {
-		key   string
-		count int
-	}
-	misses := make([]ytabMiss, 0, len(d.ytabMisses))
-	for key, count := range d.ytabMisses {
-		misses = append(misses, ytabMiss{
-			key:   key,
-			count: count,
-		})
-	}
-	sort.Slice(misses, func(i, j int) bool {
-		return misses[i].count > misses[j].count
-	})
-
-	fmt.Println("Missed ytab keys:")
-	for _, m := range misses {
-		fmt.Printf("  %+40s = %d\n", m.key, m.count)
-	}
-}
-
-var diag = diagnostics{
-	ytabMisses: make(map[string]int),
-}
diff --git a/x86/x86avxgen/generate.go b/x86/x86avxgen/generate.go
new file mode 100644
index 0000000..14985cb
--- /dev/null
+++ b/x86/x86avxgen/generate.go
@@ -0,0 +1,255 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"bytes"
+	"log"
+	"strings"
+)
+
+// ytab is ytabList element.
+type ytab struct {
+	Zcase   string
+	Zoffset int
+	ArgList string // Ytypes that are matched by this ytab.
+}
+
+// ytabList is a named set of ytab objects.
+// In asm6.go represented as []ytab.
+type ytabList struct {
+	Name  string
+	Ytabs []ytab
+}
+
+// optab describes instruction encodings for specific opcode.
+type optab struct {
+	Opcode   string
+	YtabList *ytabList
+	OpLines  []string
+}
+
+type generator struct {
+	ctx       *context
+	ytabLists map[string]*ytabList
+}
+
+// generateOptabs fills ctx.optabs and ctx.ytabLists with objects created
+// from decoded instructions.
+func generateOptabs(ctx *context) {
+	gen := generator{ctx: ctx, ytabLists: make(map[string]*ytabList)}
+	optabs := make(map[string]*optab)
+	for _, g := range ctx.groups {
+		optabs[g.opcode] = gen.GenerateGroup(g)
+	}
+	ctx.optabs = optabs
+	ctx.ytabLists = gen.ytabLists
+}
+
+// GenerateGroup converts g into optab.
+// Populates internal ytab list map.
+func (gen *generator) GenerateGroup(g *instGroup) *optab {
+	var opLines []string
+	for _, inst := range g.list {
+		opLines = append(opLines, gen.generateOpLine(inst))
+	}
+	return &optab{
+		Opcode:   "A" + g.opcode,
+		OpLines:  opLines,
+		YtabList: gen.internYtabList(g),
+	}
+}
+
+// generateOpLine returns string that describes opBytes for single instruction form.
+func (gen *generator) generateOpLine(inst *instruction) string {
+	parts := []string{gen.prefixExpr(inst)}
+	if inst.pset.Is("EVEX") {
+		parts = append(parts, gen.evexPrefixExpr(inst))
+	}
+	parts = append(parts, inst.enc.opbyte)
+	if inst.enc.opdigit != "" {
+		parts = append(parts, inst.enc.opdigit)
+	}
+	return strings.Join(parts, ", ")
+}
+
+func (gen *generator) prefixExpr(inst *instruction) string {
+	enc := inst.enc
+	return gen.joinPrefixParts([]string{
+		// Special constant that makes AVX byte different from 0x0F,
+		// making it unnecessary to check for both VEX+EVEX when
+		// assigning dealing with legacy instructions that skip it
+		// without advancing "z" counter.
+		"avxEscape",
+		enc.vex.L,
+		enc.vex.P,
+		enc.vex.M,
+		enc.vex.W,
+	})
+}
+
+func (gen *generator) evexPrefixExpr(inst *instruction) string {
+	enc := inst.enc
+	parts := []string{
+		enc.evexScale,
+		enc.evexBcstScale,
+	}
+	if enc.evex.SAE {
+		parts = append(parts, "evexSaeEnabled")
+	}
+	if enc.evex.Rounding {
+		parts = append(parts, "evexRoundingEnabled")
+	}
+	if enc.evex.Zeroing {
+		parts = append(parts, "evexZeroingEnabled")
+	}
+	return gen.joinPrefixParts(parts)
+}
+
+// joinPrefixParts returns the Go OR-expression for every non-empty name.
+// If every name is empty, returns "0".
+func (gen *generator) joinPrefixParts(names []string) string {
+	filterEmptyStrings := func(xs []string) []string {
+		ys := xs[:0]
+		for _, x := range xs {
+			if x != "" {
+				ys = append(ys, x)
+			}
+		}
+		return ys
+	}
+
+	names = filterEmptyStrings(names)
+	if len(names) == 0 {
+		return "0"
+	}
+	return strings.Join(names, "|")
+}
+
+// internYtabList returns ytabList for given group.
+//
+// Returned ytab lists are interned.
+// Same ytab list can be returned for different groups.
+func (gen *generator) internYtabList(g *instGroup) *ytabList {
+	var key string
+	{
+		var buf bytes.Buffer
+		for _, inst := range g.list {
+			buf.WriteString(inst.zform)
+			buf.WriteByte('=')
+			buf.WriteString(inst.YtypeListString())
+			buf.WriteByte(';')
+		}
+		key = buf.String()
+	}
+	if ylist := gen.ytabLists[key]; ylist != nil {
+		return ylist
+	}
+
+	var ytabs []ytab
+	for _, inst := range g.list {
+		zoffset := 2
+		if inst.pset.Is("EVEX") {
+			zoffset++ // Always at least 3 bytes
+		}
+		if inst.enc.opdigit != "" {
+			zoffset++
+		}
+
+		if inst.mask != nil {
+			ytabs = append(ytabs, gen.makeMaskYtabs(zoffset, inst)...)
+		} else {
+			ytabs = append(ytabs, gen.makeYtab(zoffset, inst.zform, inst.args))
+		}
+	}
+	ylist := &ytabList{
+		Name:  "_y" + strings.ToLower(g.opcode),
+		Ytabs: ytabs,
+	}
+	gen.ytabLists[key] = ylist
+	return ylist
+}
+
+var zcaseByZform = map[string]string{
+	"evex imm8 reg kmask reg/mem":          "Zevex_i_r_k_rm",
+	"evex imm8 reg reg/mem":                "Zevex_i_r_rm",
+	"evex imm8 reg/mem kmask reg":          "Zevex_i_rm_k_r",
+	"evex imm8 reg/mem kmask regV opdigit": "Zevex_i_rm_k_vo",
+	"evex imm8 reg/mem reg":                "Zevex_i_rm_r",
+	"evex imm8 reg/mem regV opdigit":       "Zevex_i_rm_vo",
+	"evex imm8 reg/mem regV kmask reg":     "Zevex_i_rm_v_k_r",
+	"evex imm8 reg/mem regV reg":           "Zevex_i_rm_v_r",
+	"evex kmask reg/mem opdigit":           "Zevex_k_rmo",
+	"evex reg kmask reg/mem":               "Zevex_r_k_rm",
+	"evex reg reg/mem":                     "Zevex_r_v_rm",
+	"evex reg regV kmask reg/mem":          "Zevex_r_v_k_rm",
+	"evex reg regV reg/mem":                "Zevex_r_v_rm",
+	"evex reg/mem kmask reg":               "Zevex_rm_k_r",
+	"evex reg/mem reg":                     "Zevex_rm_v_r",
+	"evex reg/mem regV kmask reg":          "Zevex_rm_v_k_r",
+	"evex reg/mem regV reg":                "Zevex_rm_v_r",
+
+	"":                          "Zvex",
+	"imm8 reg reg/mem":          "Zvex_i_r_rm",
+	"imm8 reg/mem reg":          "Zvex_i_rm_r",
+	"imm8 reg/mem regV opdigit": "Zvex_i_rm_vo",
+	"imm8 reg/mem regV reg":     "Zvex_i_rm_v_r",
+	"reg reg/mem":               "Zvex_r_v_rm",
+	"reg regV reg/mem":          "Zvex_r_v_rm",
+	"reg/mem opdigit":           "Zvex_rm_v_ro",
+	"reg/mem reg":               "Zvex_rm_v_r",
+	"reg/mem regV opdigit":      "Zvex_rm_r_vo",
+	"reg/mem regV reg":          "Zvex_rm_v_r",
+	"reg/mem":                   "Zvex_rm_v_r",
+	"regIH reg/mem regV reg":    "Zvex_hr_rm_v_r",
+	"regV reg/mem reg":          "Zvex_v_rm_r",
+}
+
+func (gen *generator) makeYtab(zoffset int, zform string, args []*argument) ytab {
+	var ytypes []string
+	for _, arg := range args {
+		if arg.ytype != "Ynone" {
+			ytypes = append(ytypes, arg.ytype)
+		}
+	}
+	argList := strings.Join(ytypes, ", ")
+	zcase := zcaseByZform[zform]
+	if zcase == "" {
+		log.Fatalf("no zcase for %q", zform)
+	}
+	return ytab{
+		Zcase:   zcase,
+		Zoffset: zoffset,
+		ArgList: argList,
+	}
+}
+
+// makeMaskYtabs returns 2 ytabs created from instruction with MASK1() argument.
+//
+// This is required due to how masking is implemented in asm6.
+// Single MASK1() instruction produces 2 ytabs, for example:
+//	1. OP xmm, mem     | Yxr, Yxm         | Does not permit K arguments (K0 implied)
+//	2. OP xmm, K2, mem | Yxr, Yknot0, Yxm | Does not permit K0 argument
+//
+// This function also exploits that both ytab entries have same opbytes,
+// hence it is efficient to emit only one opbytes line and 0 Z-offset
+// for first ytab object.
+func (gen *generator) makeMaskYtabs(zoffset int, inst *instruction) []ytab {
+	var k0 ytab
+	{
+		zform := strings.Replace(inst.zform, "MASK1() ", "", 1)
+		inst.mask.ytype = "Ynone"
+		k0 = gen.makeYtab(0, zform, inst.args)
+	}
+	var knot0 ytab
+	{
+		zform := strings.Replace(inst.zform, "MASK1() ", "kmask ", 1)
+		inst.mask.ytype = "Yknot0"
+		knot0 = gen.makeYtab(zoffset, zform, inst.args)
+	}
+
+	inst.mask.ytype = "MASK1()" // Restore Y-type
+	return []ytab{k0, knot0}
+}
diff --git a/x86/x86avxgen/generate_aenum.go b/x86/x86avxgen/generate_aenum.go
deleted file mode 100644
index 9462b55..0000000
--- a/x86/x86avxgen/generate_aenum.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"go/ast"
-	"go/format"
-	"go/parser"
-	"go/token"
-	"io"
-	"io/ioutil"
-	"sort"
-)
-
-// generateAenum generates instruction ID enumeration.
-// Adds elements from newNames if they are not already there.
-// Output enum entries are sorted by their name (except ALAST
-// which is always the last element).
-//
-// Reader - old/current "aenum.go" contents provider.
-// Writer - new "aenum.go" contents consumer.
-//
-// Reads r to examine current A-enum (instruction IDs prefixed with "A")
-// file contents. Updated contents are written to w.
-func generateAenum(r io.Reader, w io.Writer, newNames []string) error {
-	f, fset, err := parseFile(r)
-	if err != nil {
-		return err
-	}
-
-	decl := removeAenumDecl(f)
-	if decl == nil {
-		return errors.New(filenameAenum + " missing AXXX const decl clause")
-	}
-	last := decl.Specs[len(decl.Specs)-1]
-	decl.Specs = decl.Specs[:len(decl.Specs)-1] // Drop "ALAST".
-	for _, name := range newNames {
-		decl.Specs = append(decl.Specs, &ast.ValueSpec{
-			Names: []*ast.Ident{{Name: "A" + name}},
-		})
-	}
-	sort.Slice(decl.Specs, func(i, j int) bool {
-		x, y := decl.Specs[i].(*ast.ValueSpec), decl.Specs[j].(*ast.ValueSpec)
-		return x.Names[0].Name < y.Names[0].Name
-	})
-	decl.Specs = append(decl.Specs, last)
-
-	// Reset nodes positions.
-	for _, spec := range decl.Specs {
-		spec := spec.(*ast.ValueSpec)
-		resetPos(spec)
-		if spec.Doc != nil {
-			return fmt.Errorf("%s: doc comments are not supported", spec.Names[0].Name)
-		}
-		if spec.Comment != nil {
-			resetPos(spec.Comment)
-		}
-	}
-
-	var buf bytes.Buffer
-	format.Node(&buf, fset, f)
-	buf.WriteByte('\n')
-	format.Node(&buf, fset, decl)
-
-	// Additional formatting call is needed to make
-	// whitespace gofmt-compliant.
-	prettyCode, err := format.Source(buf.Bytes())
-	if err != nil {
-		return err
-	}
-	w.Write(prettyCode)
-
-	return nil
-}
-
-// removeAenumDecl searches AXXX constand decl and removes it from f.
-// Associated comments are also removed.
-// Returns AXXX declaration or nil, if it was not found.
-func removeAenumDecl(f *ast.File) *ast.GenDecl {
-	for i, decl := range f.Decls {
-		decl, ok := decl.(*ast.GenDecl)
-		if !ok {
-			continue
-		}
-		if decl.Tok != token.CONST {
-			continue
-		}
-		// AXXX enum is distinguished by trailing ALAST.
-		last := decl.Specs[len(decl.Specs)-1].(*ast.ValueSpec)
-		if len(last.Names) == 1 && last.Names[0].Name == "ALAST" {
-			// Remove comments.
-			blacklist := make(map[*ast.CommentGroup]bool)
-			if decl.Doc != nil {
-				blacklist[decl.Doc] = true
-			}
-			for _, spec := range decl.Specs {
-				spec := spec.(*ast.ValueSpec)
-				if spec.Doc != nil {
-					blacklist[spec.Doc] = true
-				}
-				if spec.Comment != nil {
-					blacklist[spec.Comment] = true
-				}
-			}
-			comments := f.Comments[:0]
-			for _, c := range f.Comments {
-				if !blacklist[c] {
-					comments = append(comments, c)
-				}
-			}
-			f.Comments = comments
-			// Remove decl itself.
-			f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
-
-			return decl
-		}
-	}
-
-	return nil
-}
-
-// reset node position info.
-func resetPos(node ast.Node) {
-	switch node := node.(type) {
-	case *ast.CommentGroup:
-		node.List[0].Slash = 0
-	case *ast.ValueSpec:
-		node.Names[0].NamePos = 0
-	default:
-		panic(fmt.Sprintf("can't reset pos for %T", node))
-	}
-}
-
-// parseFile parses file that is identified by specified path.
-func parseFile(r io.Reader) (*ast.File, *token.FileSet, error) {
-	src, err := ioutil.ReadAll(r)
-	if err != nil {
-		return nil, nil, err
-	}
-	fset := token.NewFileSet()
-	mode := parser.ParseComments
-	f, err := parser.ParseFile(fset, filenameAenum, src, mode)
-	if err != nil {
-		return nil, nil, err
-	}
-	return f, fset, nil
-}
diff --git a/x86/x86avxgen/instruction.go b/x86/x86avxgen/instruction.go
new file mode 100644
index 0000000..a20d141
--- /dev/null
+++ b/x86/x86avxgen/instruction.go
@@ -0,0 +1,64 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"strings"
+
+	"golang.org/x/arch/x86/xeddata"
+)
+
+// argument is a describes single instruction operand properties.
+type argument struct {
+	// ytype is argument class as returned by asm6 "oclass" function.
+	ytype string
+
+	// zkind is a partial Z-case matcher.
+	// Determines which Z-case handles the encoding of instruction.
+	zkind string
+}
+
+// instruction is decoded XED instruction.
+// Used to produce ytabs and optabs in later phases.
+type instruction struct {
+	// opcode is instruction symbolic name.
+	opcode string
+
+	pset xeddata.PatternSet
+	enc  *encoding
+
+	// mask is EVEX K-register argument; points to args element.
+	// Used to emit Yk0+Yknot0 table entries.
+	// Nil for VEX-encoded insts.
+	mask *argument
+	args []*argument
+
+	// zform is a pattern that determines which encoder Z-case is used.
+	// We store zform instead of zcase directly because it's further
+	// expanded during optabs generation.
+	zform string
+}
+
+// String returns short inst printed representation.
+func (inst *instruction) String() string { return inst.opcode }
+
+// YtypeListString joins each argument Y-type and returns the result.
+func (inst *instruction) YtypeListString() string {
+	var parts []string
+	for _, arg := range inst.args {
+		parts = append(parts, arg.ytype)
+	}
+	return strings.Join(parts, " ")
+}
+
+// ArgIndexByZkind returns first argument matching given zkind or -1.
+func (inst *instruction) ArgIndexByZkind(zkind string) int {
+	for i, arg := range inst.args {
+		if arg.zkind == zkind {
+			return i
+		}
+	}
+	return -1
+}
diff --git a/x86/x86avxgen/main.go b/x86/x86avxgen/main.go
index be57570..9fdf262 100644
--- a/x86/x86avxgen/main.go
+++ b/x86/x86avxgen/main.go
@@ -1,318 +1,361 @@
-// Copyright 2017 The Go Authors. All rights reserved.
+// Copyright 2018 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// X86avxgen generates Go code for obj/x86 that adds AVX instructions support.
-//
-// Currently supports only AVX1 and AVX2 instructions.
-// When x86.csv will contain AVX512 instructions and
-// asm6.go is patched to support them,
-// this program can be extended to generate the remainder.
-//
-// The output consists of multiple files:
-// - cmd/internal/obj/x86/aenum.go
-//	Add enum entries for new instructions.
-// - cmd/internal/obj/x86/anames.go
-//	Add new instruction names.
-// - cmd/internal/obj/x86/vex_optabs.go
-//	Add new instruction optabs.
-// - cmd/asm/internal/asm/testdata/amd64enc.s
-//	Uncomment tests for added instructions.
-//
-// Usage:
-//	x86avxgen -goroot=$DEV_GOROOT [-csv=x86.csv] [-output=x86avxgen-output]
-// $DEV_GOROOT is a path to Go repository working tree root.
-//
-// To get precise usage information, call this program without arguments.
 package main
 
 import (
-	"bufio"
-	"bytes"
-	"errors"
 	"flag"
 	"fmt"
-	"go/format"
-	"io"
-	"io/ioutil"
 	"log"
 	"os"
-	"os/exec"
-	"regexp"
+	"sort"
 	"strings"
 
-	"golang.org/x/arch/x86/x86csv"
+	"golang.org/x/arch/x86/xeddata"
 )
 
+// instGroup holds a list of instructions with same opcode.
+type instGroup struct {
+	opcode string
+	list   []*instruction
+}
+
+// context is x86avxgen program execution state.
+type context struct {
+	db *xeddata.Database
+
+	groups []*instGroup
+
+	optabs    map[string]*optab
+	ytabLists map[string]*ytabList
+
+	// Command line arguments:
+
+	xedPath string
+}
+
 func main() {
-	goroot := flag.String(
-		"goroot", "",
-		"Go sources root path")
-	csv := flag.String(
-		"csv", specFile,
-		"Absolute path to x86spec CSV file")
-	output := flag.String(
-		"output", "x86avxgen-output",
-		"Where to put output files")
-	autopatchEnabled := flag.Bool(
-		"autopatch", false,
-		"Try automatic patching (writes to goroot, unsafe if it is not under VCS)")
-	diagEnabled := flag.Bool(
-		"diag", false,
-		"Print debug information")
+	log.SetPrefix("x86avxgen: ")
+	log.SetFlags(log.Lshortfile)
+
+	var ctx context
+
+	runSteps(&ctx,
+		parseFlags,
+		openDatabase,
+		buildTables,
+		printTables)
+}
+
+func buildTables(ctx *context) {
+	// Order of steps is significant.
+	runSteps(ctx,
+		decodeGroups,
+		mergeRegMem,
+		addGoSuffixes,
+		mergeWIG,
+		assignZforms,
+		sortGroups,
+		generateOptabs)
+}
+
+func runSteps(ctx *context, steps ...func(*context)) {
+	for _, f := range steps {
+		f(ctx)
+	}
+}
+
+func parseFlags(ctx *context) {
+	flag.StringVar(&ctx.xedPath, "xedPath", "./xedpath",
+		"XED datafiles location")
+
 	flag.Parse()
-	if len(os.Args) == 1 {
-		fmt.Printf("%s: x86 AVX ytab generator", progName)
-		flag.Usage()
-		os.Exit(1)
-	}
-	if *goroot == "" {
-		log.Fatal("goroot arg is mandatory")
-	}
-	if _, err := os.Stat(*csv); os.IsNotExist(err) {
-		log.Fatalf("spec file %s not found", *csv)
-	}
-
-	r, err := specRowReader(*csv)
-	if err != nil {
-		log.Fatal(err)
-	}
-	if err := os.MkdirAll(*output, 0755); err != nil {
-		log.Fatal(err)
-	}
-
-	opcodes, err := doGenerateVexOptabs(r, mustOpenFile(*output+"/"+filenameVexOptabs))
-	if err != nil {
-		log.Fatal(err)
-	}
-	if err := doGenerateAenum(*goroot, *output, opcodes); err != nil {
-		log.Fatal(err)
-	}
-	if err := doGenerateAnames(*output); err != nil {
-		log.Fatal(err)
-	}
-	if err := doGenerateTests(*goroot, *output, opcodes); err != nil {
-		log.Fatal(err)
-	}
-
-	if *autopatchEnabled {
-		if err := doAutopatch(*goroot, *output); err != nil {
-			log.Fatal(err)
-		}
-	}
-
-	if *diagEnabled {
-		diag.Print()
-	}
 }
 
-func mustOpenFile(path string) *os.File {
-	f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+func openDatabase(ctx *context) {
+	db, err := xeddata.NewDatabase(ctx.xedPath)
 	if err != nil {
-		log.Fatal(err)
+		log.Fatalf("open database: %v", err)
 	}
-	return f
+	ctx.db = db
 }
 
-// filterVEX removes all non-VEX instructions from insts.
-// Returns updates slice.
-func filterVEX(insts []*x86csv.Inst) []*x86csv.Inst {
-	vexInsts := insts[:0]
-	for _, inst := range insts {
-		// Checking CPUID for AVX is not good enough
-		// in this case, because some instructions
-		// have VEX prefix, but no AVX CPUID flag.
-		if strings.HasPrefix(inst.Encoding, "VEX.") {
-			vexInsts = append(vexInsts, inst)
-		}
-	}
-	return vexInsts
-}
-
-func doGenerateVexOptabs(r *x86csv.Reader, w io.Writer) (opcodes []string, err error) {
-	insts, err := r.ReadAll()
-	if err != nil {
-		return nil, err
-	}
-	insts = filterVEX(insts)
-
-	var buf bytes.Buffer
-
-	visitOptab := func(o optab) {
-		diag.optabsGenerated++
-
-		opcodes = append(opcodes, o.as)
-
-		tmpl := "\t{A%s, %s, Pvex, [23]uint8{%s}},\n"
-		fmt.Fprintf(&buf, tmpl, o.as, o.ytabID, strings.Join(o.op, ","))
+// mergeRegMem merges reg-only with mem-only instructions.
+// For example: {MOVQ reg, mem} + {MOVQ reg, reg} = {MOVQ reg, reg/mem}.
+func mergeRegMem(ctx *context) {
+	mergeKey := func(inst *instruction) string {
+		return strings.Join([]string{
+			fmt.Sprint(len(inst.args)),
+			inst.enc.opbyte,
+			inst.enc.opdigit,
+			inst.enc.vex.P,
+			inst.enc.vex.L,
+			inst.enc.vex.M,
+			inst.enc.vex.W,
+		}, " ")
 	}
 
-	doGroups(insts, func(op string, insts []*x86csv.Inst) {
-		diag.optabsTotal++
-
-		if ot, ok := precomputedOptabs[op]; ok {
-			log.Printf("notice: using precomputed %s optab", op)
-			visitOptab(ot)
-			return
-		}
-
-		key := ytabKey(op, insts)
-		ytabID := ytabMap[key]
-		if ytabID == "" {
-			diag.ytabMisses[key]++
-			log.Printf("warning: skip %s: no ytabID for '%s' key", op, key)
-			return
-		}
-		var encParts []string
-		for _, inst := range insts {
-			enc := parseEncoding(inst.Encoding)
-
-			encParts = append(encParts, vexExpr(enc.vex))
-			encParts = append(encParts, "0x"+enc.opbyte)
-			if enc.opdigit != "" {
-				encParts = append(encParts, "0"+enc.opdigit)
+	for _, g := range ctx.groups {
+		regOnly := make(map[string]*instruction)
+		memOnly := make(map[string]*instruction)
+		list := g.list[:0]
+		for _, inst := range g.list {
+			switch {
+			case inst.pset.Is("RegOnly"):
+				regOnly[mergeKey(inst)] = inst
+			case inst.pset.Is("MemOnly"):
+				memOnly[mergeKey(inst)] = inst
+			default:
+				if len(inst.args) == 0 {
+					list = append(list, inst)
+					continue
+				}
+				log.Fatalf("%s: unexpected MOD value", inst)
 			}
 		}
-		visitOptab(optab{
-			as:     op,
-			ytabID: ytabID,
-			op:     encParts,
-		})
+
+		for k, m := range memOnly {
+			r := regOnly[k]
+			if r != nil {
+				index := m.ArgIndexByZkind("reg/mem")
+				arg := m.args[index]
+				switch ytype := r.args[index].ytype; ytype {
+				case "Yrl":
+					arg.ytype = "Yml"
+				case "Yxr":
+					arg.ytype = "Yxm"
+				case "YxrEvex":
+					arg.ytype = "YxmEvex"
+				case "Yyr":
+					arg.ytype = "Yym"
+				case "YyrEvex":
+					arg.ytype = "YymEvex"
+				case "Yzr":
+					arg.ytype = "Yzm"
+				case "Yk":
+					arg.ytype = "Ykm"
+				default:
+					log.Fatalf("%s: unexpected register type: %s", r, ytype)
+				}
+				// Merge EVEX flags into m.
+				m.enc.evex.SAE = m.enc.evex.SAE || r.enc.evex.SAE
+				m.enc.evex.Rounding = m.enc.evex.Rounding || r.enc.evex.Rounding
+				m.enc.evex.Zeroing = m.enc.evex.Zeroing || r.enc.evex.Zeroing
+				delete(regOnly, k)
+			}
+			list = append(list, m)
+		}
+		for _, r := range regOnly {
+			list = append(list, r)
+		}
+
+		g.list = list
+	}
+}
+
+// mergeWIG merges [E]VEX.W0 + [E]VEX.W1 into [E]VEX.WIG.
+func mergeWIG(ctx *context) {
+	mergeKey := func(inst *instruction) string {
+		return strings.Join([]string{
+			fmt.Sprint(len(inst.args)),
+			inst.enc.opbyte,
+			inst.enc.opdigit,
+			inst.enc.vex.P,
+			inst.enc.vex.L,
+			inst.enc.vex.M,
+		}, " ")
+	}
+
+	for _, g := range ctx.groups {
+		w0map := make(map[string]*instruction)
+		w1map := make(map[string]*instruction)
+		list := g.list[:0]
+		for _, inst := range g.list {
+			switch w := inst.enc.vex.W; w {
+			case "evexW0", "vexW0":
+				w0map[mergeKey(inst)] = inst
+			case "evexW1", "vexW1":
+				w1map[mergeKey(inst)] = inst
+			default:
+				log.Fatalf("%s: unexpected vex.W: %s", inst, w)
+			}
+		}
+
+		for k, w0 := range w0map {
+			w1 := w1map[k]
+			if w1 != nil {
+				w0.enc.vex.W = strings.Replace(w0.enc.vex.W, "W0", "WIG", 1)
+				delete(w1map, k)
+			}
+			list = append(list, w0)
+		}
+		for _, w1 := range w1map {
+			list = append(list, w1)
+		}
+
+		g.list = list
+	}
+}
+
+// assignZforms initializes zform field of every instruction in ctx.
+func assignZforms(ctx *context) {
+	for _, g := range ctx.groups {
+		for _, inst := range g.list {
+			var parts []string
+			if inst.pset.Is("EVEX") {
+				parts = append(parts, "evex")
+			}
+			for _, arg := range inst.args {
+				parts = append(parts, arg.zkind)
+			}
+			if inst.enc.opdigit != "" {
+				parts = append(parts, "opdigit")
+			}
+			inst.zform = strings.Join(parts, " ")
+		}
+	}
+}
+
+// sortGroups sorts each instruction group by opcode as well as instructions
+// inside groups by special rules (see below).
+//
+// The order of instructions inside group determine ytab
+// elements order inside ytabList.
+//
+// We want these rules to be satisfied:
+//	- EVEX-encoded entries go after VEX-encoded entries.
+//	  This way, VEX forms are selected over EVEX variants.
+//	- EVEX forms with SAE/RC must go before forms without them.
+//	  This helps to avoid problems with reg-reg instructions
+//	  that encode either of them in ModRM.R/M which causes
+//	  ambiguity in ytabList (more than 1 ytab can match args).
+//	  If first matching ytab has SAE/RC, problem will not occur.
+//	- Memory argument position affects order.
+//	  Required to be in sync with XED encoder when there
+//	  are multiple choices of how to encode instruction.
+func sortGroups(ctx *context) {
+	sort.SliceStable(ctx.groups, func(i, j int) bool {
+		return ctx.groups[i].opcode < ctx.groups[j].opcode
 	})
 
-	tmpl := `// Code generated by %s. DO NOT EDIT.
-
-package x86
-
-var vexOptab = []Optab{
-%s
-}
-`
-	code := []byte(fmt.Sprintf(tmpl, progName, buf.String()))
-
-	prettyCode, err := format.Source(code)
-	if err != nil {
-		return nil, err
+	for _, g := range ctx.groups {
+		sortInstList(g.list)
 	}
-
-	_, err = w.Write(prettyCode)
-
-	return opcodes, err
 }
 
-func doGenerateAenum(goroot, output string, newNames []string) error {
-	w, err := os.Create(output + "/" + filenameAenum)
-	if err != nil {
-		return err
+func sortInstList(insts []*instruction) {
+	// Use strings for sorting to get reliable transitive "less".
+	order := make(map[*instruction]string)
+	for _, inst := range insts {
+		encTag := 'a'
+		if inst.pset.Is("EVEX") {
+			encTag = 'b'
+		}
+		memTag := 'a'
+		if index := inst.ArgIndexByZkind("reg/mem"); index != -1 {
+			memTag = 'z' - rune(index)
+		}
+		rcsaeTag := 'a'
+		if !(inst.enc.evex.SAE || inst.enc.evex.Rounding) {
+			rcsaeTag = 'b'
+		}
+		order[inst] = fmt.Sprintf("%c%c%c %s",
+			encTag, memTag, rcsaeTag, inst.YtypeListString())
 	}
-	defer w.Close()
-	r, err := os.Open(goroot + "/" + pathAenum)
-	if err != nil {
-		return err
-	}
-	defer r.Close()
 
-	return generateAenum(r, w, newNames)
+	sort.SliceStable(insts, func(i, j int) bool {
+		return order[insts[i]] < order[insts[j]]
+	})
 }
 
-func doGenerateAnames(output string) error {
-	// Runs "go generate" over previously generated aenum file.
-	path := output + "/" + filenameAenum
-	cmd := exec.Command("go", "generate", path)
-	var buf bytes.Buffer
-	cmd.Stderr = &buf
-	err := cmd.Run()
-	if err != nil {
-		return errors.New(err.Error() + ": " + buf.String())
+// addGoSuffixes splits some groups into several groups by introducing a suffix.
+// For example, ANDN group becomes ANDNL and ANDNQ (ANDN becomes empty itself).
+// Empty groups are removed.
+func addGoSuffixes(ctx *context) {
+	var opcodeSuffixMatchers map[string][]string
+	{
+		opXY := []string{"VL=0", "X", "VL=1", "Y"}
+		opXYZ := []string{"VL=0", "X", "VL=1", "Y", "VL=2", "Z"}
+		opQ := []string{"REXW=1", "Q"}
+		opLQ := []string{"REXW=0", "L", "REXW=1", "Q"}
+
+		opcodeSuffixMatchers = map[string][]string{
+			"VCVTPD2DQ":   opXY,
+			"VCVTPD2PS":   opXY,
+			"VCVTTPD2DQ":  opXY,
+			"VCVTQQ2PS":   opXY,
+			"VCVTUQQ2PS":  opXY,
+			"VCVTPD2UDQ":  opXY,
+			"VCVTTPD2UDQ": opXY,
+
+			"VFPCLASSPD": opXYZ,
+			"VFPCLASSPS": opXYZ,
+
+			"VCVTSD2SI":  opQ,
+			"VCVTTSD2SI": opQ,
+			"VCVTTSS2SI": opQ,
+			"VCVTSS2SI":  opQ,
+
+			"VCVTSD2USI":  opLQ,
+			"VCVTSS2USI":  opLQ,
+			"VCVTTSD2USI": opLQ,
+			"VCVTTSS2USI": opLQ,
+			"VCVTUSI2SD":  opLQ,
+			"VCVTUSI2SS":  opLQ,
+			"VCVTSI2SD":   opLQ,
+			"VCVTSI2SS":   opLQ,
+			"ANDN":        opLQ,
+			"BEXTR":       opLQ,
+			"BLSI":        opLQ,
+			"BLSMSK":      opLQ,
+			"BLSR":        opLQ,
+			"BZHI":        opLQ,
+			"MULX":        opLQ,
+			"PDEP":        opLQ,
+			"PEXT":        opLQ,
+			"RORX":        opLQ,
+			"SARX":        opLQ,
+			"SHLX":        opLQ,
+			"SHRX":        opLQ,
+		}
 	}
-	return nil
-}
 
-// testLineReplacer is used in uncommentedTestLine function.
-var testLineReplacer = strings.NewReplacer(
-	"//TODO: ", "",
+	newGroups := make(map[string][]*instruction)
+	for _, g := range ctx.groups {
+		kv := opcodeSuffixMatchers[g.opcode]
+		if kv == nil {
+			continue
+		}
 
-	// Fix register references.
-	"XMM", "X",
-	"YMM", "Y",
-)
-
-func uncommentedTestLine(line string) string {
-	// Sync with x86/x86test/print.go.
-	const x86testFmt = "\t%-39s // %s"
-
-	line = testLineReplacer.Replace(line)
-	i := strings.Index(line, " // ")
-	return fmt.Sprintf(x86testFmt, line[len("\t"):i], line[i+len(" // "):])
-}
-
-// stringsSet returns a map mapping each x in xs to true.
-func stringsSet(xs []string) map[string]bool {
-	set := make(map[string]bool, len(xs))
-	for _, x := range xs {
-		set[x] = true
-	}
-	return set
-}
-
-func doGenerateTests(goroot, output string, newNames []string) error {
-	testsFile, err := os.Open(goroot + "/" + pathTests)
-	if err != nil {
-		return err
-	}
-	defer testsFile.Close()
-
-	var rxCommentedTestCase = regexp.MustCompile(`//TODO: ([A-Z][A-Z0-9]+)`)
-
-	newNamesSet := stringsSet(newNames)
-
-	var buf bytes.Buffer
-	scanner := bufio.NewScanner(testsFile)
-	for scanner.Scan() {
-		line := scanner.Text()
-		m := rxCommentedTestCase.FindStringSubmatch(line)
-		if m != nil {
-			name := string(m[1])
-			if newNamesSet[name] {
-				line = uncommentedTestLine(line)
+		list := g.list[:0]
+		for _, inst := range g.list {
+			newOp := inst.opcode + inst.pset.Match(kv...)
+			if newOp != inst.opcode {
+				inst.opcode = newOp
+				newGroups[newOp] = append(newGroups[newOp], inst)
+			} else {
+				list = append(list, inst)
 			}
 		}
-		buf.WriteString(line)
-		buf.WriteByte('\n')
+		g.list = list
 	}
-
-	return ioutil.WriteFile(output+"/"+filenameTests, buf.Bytes(), 0644)
-}
-
-func doAutopatch(goroot, output string) error {
-	from := [...]string{
-		output + "/" + filenameVexOptabs,
-		output + "/" + filenameAenum,
-		output + "/" + filenameAnames,
-		output + "/" + filenameTests,
-	}
-	to := [...]string{
-		goroot + "/" + pathVexOptabs,
-		goroot + "/" + pathAenum,
-		goroot + "/" + pathAnames,
-		goroot + "/" + pathTests,
-	}
-
-	// No recovery if rename will fail.
-	// There is a warning in "autopatch" description.
-	for i := range from {
-		if err := os.Rename(from[i], to[i]); err != nil {
-			return err
+	groups := ctx.groups[:0] // Filled with non-empty groups
+	// Some groups may become empty due to opcode split.
+	for _, g := range ctx.groups {
+		if len(g.list) != 0 {
+			groups = append(groups, g)
 		}
 	}
-
-	return nil
+	for op, insts := range newGroups {
+		groups = append(groups, &instGroup{
+			opcode: op,
+			list:   insts,
+		})
+	}
+	ctx.groups = groups
 }
 
-func specRowReader(path string) (*x86csv.Reader, error) {
-	f, err := os.Open(path)
-	if err != nil {
-		return nil, err
-	}
-	return x86csv.NewReader(bufio.NewReader(f)), nil
+func printTables(ctx *context) {
+	writeTables(os.Stdout, ctx)
 }
diff --git a/x86/x86avxgen/print.go b/x86/x86avxgen/print.go
new file mode 100644
index 0000000..c17dbaf
--- /dev/null
+++ b/x86/x86avxgen/print.go
@@ -0,0 +1,116 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"bytes"
+	"go/format"
+	"io"
+	"log"
+	"sort"
+	"text/template"
+)
+
+var tablesTemplate = template.Must(template.New("avx_optabs").Parse(`
+// Code generated by x86avxgen. DO NOT EDIT.
+
+package x86
+
+// VEX instructions that come in two forms:
+//	VTHING xmm2/m128, xmmV, xmm1
+//	VTHING ymm2/m256, ymmV, ymm1
+//
+// The opcode array in the corresponding Optab entry
+// should contain the (VEX prefixes, opcode byte) pair
+// for each of the two forms.
+// For example, the entries for VPXOR are:
+//
+//	VPXOR xmm2/m128, xmmV, xmm1
+//	VEX.NDS.128.66.0F.WIG EF /r
+//
+//	VPXOR ymm2/m256, ymmV, ymm1
+//	VEX.NDS.256.66.0F.WIG EF /r
+//
+// Produce this optab entry:
+//
+//	{AVPXOR, yvex_xy3, Pavx, opBytes{vex128|vex66|vex0F|vexWIG, 0xEF, vex256|vex66|vex0F|vexWIG, 0xEF}}
+//
+// VEX requires at least 2 bytes inside opBytes:
+//	- VEX prefixes (vex-prefixed constants)
+//	- Opcode byte
+//
+// EVEX instructions extend VEX form variety:
+//	VTHING zmm2/m512, zmmV, zmm1    -- implicit K0 (merging)
+//	VTHING zmm2/m512, zmmV, K, zmm1 -- explicit K mask (can't use K0)
+//
+// EVEX requires at least 3 bytes inside opBytes:
+//	- EVEX prefixes (evex-prefixed constants); similar to VEX
+//	- Displacement multiplier info (scale / broadcast scale)
+//	- Opcode byte; similar to VEX
+//
+// Both VEX and EVEX instructions may have opdigit (opcode extension) byte
+// which follows the primary opcode byte.
+// Because it can only have value of 0-7, it is written in octal notation.
+//
+// x86.csv can be very useful for figuring out proper [E]VEX parts.
+
+{{ range .Ylists }}
+  var {{.Name}} = []ytab{
+    {{- range .Ytabs }}
+      {zcase: {{.Zcase}}, zoffset: {{.Zoffset}}, args: argList{ {{.ArgList}} }},
+    {{- end }}
+  }
+{{ end }}
+
+var avxOptab = [...]Optab{
+  {{- range .Optabs }}
+    {as: {{.Opcode}}, ytab: {{.YtabList.Name}}, prefix: Pavx, op: opBytes{
+      {{- range .OpLines }}
+        {{.}},
+      {{- end }}
+    }},
+  {{- end }}
+}
+`))
+
+// writeTables writes avx optabs file contents to w.
+func writeTables(w io.Writer, ctx *context) {
+	ylists := make([]*ytabList, 0, len(ctx.ytabLists))
+	for _, ylist := range ctx.ytabLists {
+		ylists = append(ylists, ylist)
+	}
+	sort.Slice(ylists, func(i, j int) bool {
+		return ylists[i].Name < ylists[j].Name
+	})
+	optabs := make([]*optab, 0, len(ctx.optabs))
+	for _, o := range ctx.optabs {
+		optabs = append(optabs, o)
+	}
+	sort.Slice(optabs, func(i, j int) bool {
+		return optabs[i].Opcode < optabs[j].Opcode
+	})
+
+	var buf bytes.Buffer
+	err := tablesTemplate.Execute(&buf, struct {
+		Ylists []*ytabList
+		Optabs []*optab
+	}{
+		Ylists: ylists,
+		Optabs: optabs,
+	})
+	if err != nil {
+		log.Fatalf("template execute error: %v", err)
+	}
+
+	// TODO: invoke "go fmt" or format.Gofmt? #22695.
+	prettyCode, err := format.Source(buf.Bytes())
+	if err != nil {
+		log.Fatalf("gofmt error: %v", err)
+	}
+
+	if _, err := w.Write(prettyCode); err != nil {
+		log.Fatalf("write output: %v", err)
+	}
+}
diff --git a/x86/x86avxgen/tables.go b/x86/x86avxgen/tables.go
deleted file mode 100644
index 5d80daa..0000000
--- a/x86/x86avxgen/tables.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-// ytabMap maps keys generated with keyFromInsts to ytab identifiers.
-var ytabMap = map[string]ytabID{
-	"": "yvex",
-
-	// 1 form:
-	"m":           "yvex_m",
-	"y/m,x":       "yvex_y2",
-	"x/m,r":       "yvex_vcvtsd2si",
-	"m,y":         "yvex_vbroadcastf",
-	"m,x;m,y":     "yvex_mxy",
-	"x,x":         "yvex_xx2",
-	"x/m,x":       "yvex_x2",
-	"x/m,xV,x":    "yvex_x3",
-	"x,xV,x":      "yvex_xx3",
-	"y/m,yV,y":    "yvex_yy3",
-	"r/m,rV":      "yvex_r2",
-	"r/m,rV,r":    "yvex_r3",
-	"r/m,xV,x":    "yvex_rx3",
-	"rV,r/m,r":    "yvex_vmr3",
-	"i8,r/m,r":    "yvex_ri3",
-	"i8,x/m,x":    "yvex_xi3",
-	"i8,x,r/m":    "yvex_vpextr",
-	"i8,y,x/m":    "yvex_yi3",
-	"i8,y/m,y":    "yvex_vpermpd",
-	"i8,r/m,xV,x": "yvex_rxi4",
-	"i8,x/m,xV,x": "yvex_xxi4",
-	"i8,x/m,yV,y": "yvex_xyi4",
-	"i8,y/m,yV,y": "yvex_yyi4",
-
-	// 2 forms:
-	"m,y;x,y":                   "yvex_vpbroadcast_sd",
-	"i8,x,r;i8,x,r/m":           "yvex_vpextrw",
-	"i8,y,x/m;i8,x,x/m":         "yvex_vcvtps2ph",
-	"i8,x/m,x;i8,y/m,y":         "yvex_xyi3",
-	"i8,x/m,xV,x;i8,y/m,yV,y":   "yvex_vpalignr",
-	"i8,x,xV;i8,y,yV":           "yvex_shift_dq",
-	"x/m,xV,x;y/m,yV,y":         "yvex_xy3",
-	"x/m,xV,x;i8,x,xV":          "yvex_shift",
-	"x/m,x;x/m,y":               "yvex_vpbroadcast",
-	"x/m,x;y/m,y":               "yvex_xy2",
-	"x,m;y,m":                   "yvex_vmovntdq",
-	"x,r/m;r/m,x":               "yvex_vmovd",
-	"x,r;y,r":                   "yvex_xyr2",
-	"x,m;m,xV,x":                "yvex_vmovhpd",
-	"xIH,x/m,xV,x;yIH,y/m,yV,y": "yvex_xy4",
-
-	// 4 forms:
-	"m,x;x,x;m,y;x,y":                     "yvex_vpbroadcast_ss",
-	"x,m;m,x;x,xV,x;x,xV,x":               "yvex_vmov",
-	"x,xV,m;y,yV,m;m,xV,x;m,yV,y":         "yvex_vblendvpd",
-	"x/m,x;x,x/m;y/m,y;y,y/m":             "yvex_vmovdqa",
-	"x/m,xV,x;i8,x,xV;x/m,yV,y;i8,y,yV":   "yvex_vps",
-	"i8,x/m,x;x/m,xV,x;i8,y/m,y;y/m,yV,y": "yvex_vpermilp",
-
-	// 5 forms:
-	"x,r/m;m,x;r/m,x;x,x;x,x/m": "yvex_vmovq",
-}
-
-// precomputedOptabs is used to emit some optabs that can not be
-// generated with normal execution path.
-var precomputedOptabs = map[string]optab{
-	// This is added to avoid backwards-incompatible change.
-	//
-	// initially, yvex_xyi3 was added with Yi8 args.
-	// Later, it was decided to make it Yu8, but Yi8 forms
-	// were preserved as well.
-	// So, 4 ytabs instead of 2.
-	"VPSHUFD": {
-		"VPSHUFD",
-		"yvex_xyi3",
-		[]string{
-			"vexNOVSR | vex128 | vex66 | vex0F | vexWIG", "0x70",
-			"vexNOVSR | vex256 | vex66 | vex0F | vexWIG", "0x70",
-			"vexNOVSR | vex128 | vex66 | vex0F | vexWIG", "0x70",
-			"vexNOVSR | vex256 | vex66 | vex0F | vexWIG", "0x70",
-		},
-	},
-
-	// Instructions that can not be constructed from
-	// "x86.csv" because it only have 2/4 forms.
-	"VPSRLQ": {
-		"VPSRLQ",
-		"yvex_shift",
-		[]string{
-			"vexNDD | vex128 | vex66 | vex0F | vexWIG", "0x73", "0xD0",
-			"vexNDD | vex256 | vex66 | vex0F | vexWIG", "0x73", "0xD0",
-			"vexNDS | vex128 | vex66 | vex0F | vexWIG", "0xD3",
-			"vexNDS | vex256 | vex66 | vex0F | vexWIG", "0xD3",
-		},
-	},
-	"VPSLLQ": {
-		"VPSLLQ",
-		"yvex_shift",
-		[]string{
-			"vexNDD | vex128 | vex66 | vex0F | vexWIG", "0x73", "0xF0",
-			"vexNDD | vex256 | vex66 | vex0F | vexWIG", "0x73", "0xF0",
-			"vexNDS | vex128 | vex66 | vex0F | vexWIG", "0xF3",
-			"vexNDS | vex256 | vex66 | vex0F | vexWIG", "0xF3",
-		},
-	},
-	"VPSLLD": {
-		"VPSLLD",
-		"yvex_shift",
-		[]string{
-			"vexNDS | vex128 | vex66 | vex0F | vexWIG", "0x72", "0xF0",
-			"vexNDS | vex256 | vex66 | vex0F | vexWIG", "0x72", "0xF0",
-			"vexNDD | vex128 | vex66 | vex0F | vexWIG", "0xF2",
-			"vexNDD | vex256 | vex66 | vex0F | vexWIG", "0xF2",
-		},
-	},
-	"VPSRLD": {
-		"VPSRLD",
-		"yvex_shift",
-		[]string{
-			"vexNDD | vex128 | vex66 | vex0F | vexWIG", "0x72", "0xD0",
-			"vexNDD | vex256 | vex66 | vex0F | vexWIG", "0x72", "0xD0",
-			"vexNDD | vex128 | vex66 | vex0F | vexWIG", "0xD2",
-			"vexNDD | vex256 | vex66 | vex0F | vexWIG", "0xD2",
-		},
-	},
-
-	// Thease are here due to adhoc encoded
-	// ModR/M opcode extension.
-	"VPSLLDQ": {
-		"VPSLLDQ",
-		"yvex_shift_dq",
-		[]string{
-			"vexNDD | vex128 | vex66 | vex0F | vexWIG", "0x73", "0xF8",
-			"vexNDD | vex256 | vex66 | vex0F | vexWIG", "0x73", "0xF8",
-		},
-	},
-	"VPSRLDQ": {
-		"VPSRLDQ",
-		"yvex_shift_dq",
-		[]string{
-			"vexNDD | vex128 | vex66 | vex0F | vexWIG", "0x73", "0xD8",
-			"vexNDD | vex256 | vex66 | vex0F | vexWIG", "0x73", "0xD8",
-		},
-	},
-	"VPSLLW": {
-		"VPSLLW",
-		"yvex_vps",
-		[]string{
-			"vexNDS | vex128 | vex66 | vex0F | vexWIG", "0xF1",
-			"vexNDD | vex128 | vex66 | vex0F | vexWIG", "0x71", "0xF0",
-			"vexNDS | vex256 | vex66 | vex0F | vexWIG", "0xF1",
-			"vexNDD | vex256 | vex66 | vex0F | vexWIG", "0x71", "0xF0",
-		},
-	},
-	"VPSRAD": {
-		"VPSRAD",
-		"yvex_vps",
-		[]string{
-			"vexNDS | vex128 | vex66 | vex0F | vexWIG", "0xE2",
-			"vexNDD | vex128 | vex66 | vex0F | vexWIG", "0x72", "0xE0",
-			"vexNDS | vex256 | vex66 | vex0F | vexWIG", "0xE2",
-			"vexNDD | vex256 | vex66 | vex0F | vexWIG", "0x72", "0xE0",
-		},
-	},
-	"VPSRAW": {
-		"VPSRAW",
-		"yvex_vps",
-		[]string{
-			"vexNDS | vex128 | vex66 | vex0F | vexWIG", "0xE1",
-			"vexNDD | vex128 | vex66 | vex0F | vexWIG", "0x71", "0xE0",
-			"vexNDS | vex256 | vex66 | vex0F | vexWIG", "0xE1",
-			"vexNDD | vex256 | vex66 | vex0F | vexWIG", "0x71", "0xE0",
-		},
-	},
-	"VPSRLW": {
-		"VPSRLW",
-		"yvex_vps",
-		[]string{
-			"vexNDS | vex128 | vex66 | vex0F | vexWIG", "0xD1",
-			"vexNDD | vex128 | vex66 | vex0F | vexWIG", "0x71", "0xD0",
-			"vexNDS | vex256 | vex66 | vex0F | vexWIG", "0xD1",
-			"vexNDD | vex256 | vex66 | vex0F | vexWIG", "0x71", "0xD0",
-		},
-	},
-}
diff --git a/x86/x86avxgen/testdata/golden.txt b/x86/x86avxgen/testdata/golden.txt
new file mode 100644
index 0000000..c5bb549
--- /dev/null
+++ b/x86/x86avxgen/testdata/golden.txt
@@ -0,0 +1,160 @@
+var _yvmovsd = []ytab{
+	{zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Yxr, Yxr}},
+	{zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Ym}},
+	{zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yxr}},
+	{zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxr, Yxr, Yxr}},
+	{zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YxrEvex, YxrEvex, YxrEvex}},
+	{zcase: Zevex_r_v_k_rm, zoffset: 3, args: argList{YxrEvex, YxrEvex, Yknot0, YxrEvex}},
+	{zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YxrEvex, Ym}},
+	{zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, Ym}},
+	{zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Ym, YxrEvex}},
+	{zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Ym, Yknot0, YxrEvex}},
+	{zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxrEvex, YxrEvex, YxrEvex}},
+	{zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxrEvex, YxrEvex, Yknot0, YxrEvex}},
+}
+----
+	{as: AVMOVSD, ytab: _yvmovsd, prefix: Pavx, op: opBytes{
+		avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x11,
+		avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x11,
+		avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x10,
+		avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x10,
+		avxEscape | evex128 | evexF2 | evex0F | evexW1, evexZeroingEnabled, 0x11,
+		avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8, 0x11,
+		avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexZeroingEnabled, 0x10,
+		avxEscape | evex128 | evexF2 | evex0F | evexW1, evexZeroingEnabled, 0x10,
+	}}
+
+======
+var _yvaddpd = []ytab{
+	{zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}},
+	{zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr, Yyr}},
+	{zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yzr}},
+	{zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yzr}},
+	{zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}},
+	{zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}},
+	{zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, YyrEvex}},
+	{zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, YyrEvex}},
+}
+----
+	{as: AVADDPD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{
+		avxEscape | vex128 | vex66 | vex0F | vexW0, 0x58,
+		avxEscape | vex256 | vex66 | vex0F | vexW0, 0x58,
+		avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x58,
+		avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x58,
+		avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x58,
+	}}
+
+======
+var _yvcmppd = []ytab{
+	{zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yxm, Yxr, Yxr}},
+	{zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yym, Yyr, Yyr}},
+	{zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr, Yk}},
+	{zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yzr, Yknot0, Yk}},
+	{zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex, Yk}},
+	{zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, Yknot0, Yk}},
+	{zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex, Yk}},
+	{zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YymEvex, YyrEvex, Yknot0, Yk}},
+}
+----
+	{as: AVCMPPD, ytab: _yvcmppd, prefix: Pavx, op: opBytes{
+		avxEscape | vex128 | vex66 | vex0F | vexW0, 0xC2,
+		avxEscape | vex256 | vex66 | vex0F | vexW0, 0xC2,
+		avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled, 0xC2,
+		avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8, 0xC2,
+		avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8, 0xC2,
+	}}
+
+======
+var _yvmovapd = []ytab{
+	{zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Yxm}},
+	{zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yyr, Yym}},
+	{zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}},
+	{zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr}},
+	{zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YxrEvex, YxmEvex}},
+	{zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YxmEvex}},
+	{zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YyrEvex, YymEvex}},
+	{zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YyrEvex, Yknot0, YymEvex}},
+	{zcase: Zevex_r_v_rm, zoffset: 0, args: argList{Yzr, Yzm}},
+	{zcase: Zevex_r_k_rm, zoffset: 3, args: argList{Yzr, Yknot0, Yzm}},
+	{zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}},
+	{zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}},
+	{zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex}},
+	{zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, YyrEvex}},
+	{zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr}},
+	{zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzm, Yknot0, Yzr}},
+}
+----
+	{as: AVMOVAPD, ytab: _yvmovapd, prefix: Pavx, op: opBytes{
+		avxEscape | vex128 | vex66 | vex0F | vexW0, 0x29,
+		avxEscape | vex256 | vex66 | vex0F | vexW0, 0x29,
+		avxEscape | vex128 | vex66 | vex0F | vexW0, 0x28,
+		avxEscape | vex256 | vex66 | vex0F | vexW0, 0x28,
+		avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x29,
+		avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x29,
+		avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x29,
+		avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x28,
+		avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x28,
+		avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x28,
+	}}
+
+======
+var _yvpslld = []ytab{
+	{zcase: Zvex_i_rm_vo, zoffset: 3, args: argList{Yu8, Yxr, Yxr}},
+	{zcase: Zvex_i_rm_vo, zoffset: 3, args: argList{Yu8, Yyr, Yyr}},
+	{zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}},
+	{zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yyr, Yyr}},
+	{zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex}},
+	{zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, YxmEvex, Yknot0, YxrEvex}},
+	{zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex}},
+	{zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, YymEvex, Yknot0, YyrEvex}},
+	{zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, Yzm, Yzr}},
+	{zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, Yzm, Yknot0, Yzr}},
+	{zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}},
+	{zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}},
+	{zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex, YyrEvex}},
+	{zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YyrEvex, Yknot0, YyrEvex}},
+	{zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, Yzr, Yzr}},
+	{zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, Yzr, Yknot0, Yzr}},
+}
+----
+	{as: AVPSLLW, ytab: _yvpslld, prefix: Pavx, op: opBytes{
+		avxEscape | vex128 | vex66 | vex0F | vexW0, 0x71, 06,
+		avxEscape | vex256 | vex66 | vex0F | vexW0, 0x71, 06,
+		avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF1,
+		avxEscape | vex256 | vex66 | vex0F | vexW0, 0xF1,
+		avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x71, 06,
+		avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x71, 06,
+		avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x71, 06,
+		avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF1,
+		avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF1,
+		avxEscape | evex512 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF1,
+	}}
+
+======
+var _yvzeroall = []ytab{
+	{zcase: Zvex, zoffset: 2, args: argList{}},
+}
+----
+	{as: AVZEROALL, ytab: _yvzeroall, prefix: Pavx, op: opBytes{
+		avxEscape | vex256 | vex0F | vexW0, 0x77,
+	}}
+
+======
+var _yvzeroall = []ytab{
+	{zcase: Zvex, zoffset: 2, args: argList{}},
+}
+----
+	{as: AVZEROUPPER, ytab: _yvzeroall, prefix: Pavx, op: opBytes{
+		avxEscape | vex128 | vex0F | vexW0, 0x77,
+	}}
+
+======
+var _yvcomisd = []ytab{
+	{zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}},
+	{zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YxmEvex, YxrEvex}},
+}
+----
+	{as: AVUCOMISD, ytab: _yvcomisd, prefix: Pavx, op: opBytes{
+		avxEscape | vex128 | vex66 | vex0F | vexW0, 0x2E,
+		avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8 | evexSaeEnabled, 0x2E,
+	}}
diff --git a/x86/x86avxgen/testdata/xedpath/all-dec-instructions.txt b/x86/x86avxgen/testdata/xedpath/all-dec-instructions.txt
new file mode 100644
index 0000000..07cbc41
--- /dev/null
+++ b/x86/x86avxgen/testdata/xedpath/all-dec-instructions.txt
@@ -0,0 +1,57893 @@
+AVX_INSTRUCTIONS()::
+
+{
+ICLASS: VFMADDSUBPS
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: MXCSR AMDONLY
+
+PATTERN: VV1 0x5C V66 W0 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32 REG2=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x5C V66 W0 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32 REG3=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x5C V66 W1 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN: VV1 0x5C V66 W1 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 REG3=XMM_B():r:dq:f32
+
+PATTERN: VV1 0x5C V66 W0 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32 REG2=YMM_SE():r:qq:f32
+
+PATTERN: VV1 0x5C V66 W0 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32 REG3=YMM_SE():r:qq:f32
+
+PATTERN: VV1 0x5C V66 W1 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_SE():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN: VV1 0x5C V66 W1 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_SE():r:qq:f32 REG3=YMM_B():r:qq:f32
+}
+
+{
+ICLASS: VFMADDSUBPD
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: MXCSR AMDONLY
+
+PATTERN: VV1 0x5D V66 W0 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64 REG2=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x5D V66 W0 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64 REG3=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x5D V66 W1 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN: VV1 0x5D V66 W1 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 REG3=XMM_B():r:dq:f64
+
+PATTERN: VV1 0x5D V66 W0 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64 REG2=YMM_SE():r:qq:f64
+
+PATTERN: VV1 0x5D V66 W0 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64 REG3=YMM_SE():r:qq:f64
+
+PATTERN: VV1 0x5D V66 W1 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_SE():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN: VV1 0x5D V66 W1 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_SE():r:qq:f64 REG3=YMM_B():r:qq:f64
+}
+
+{
+ICLASS: VFMSUBADDPS
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: MXCSR AMDONLY
+
+PATTERN: VV1 0x5E V66 W0 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32 REG2=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x5E V66 W0 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32 REG3=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x5E V66 W1 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN: VV1 0x5E V66 W1 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 REG3=XMM_B():r:dq:f32
+
+PATTERN: VV1 0x5E V66 W0 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32 REG2=YMM_SE():r:qq:f32
+
+PATTERN: VV1 0x5E V66 W0 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32 REG3=YMM_SE():r:qq:f32
+
+PATTERN: VV1 0x5E V66 W1 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_SE():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN: VV1 0x5E V66 W1 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_SE():r:qq:f32 REG3=YMM_B():r:qq:f32
+}
+
+{
+ICLASS: VFMSUBADDPD
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: MXCSR AMDONLY
+
+PATTERN: VV1 0x5F V66 W0 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64 REG2=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x5F V66 W0 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64 REG3=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x5F V66 W1 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN: VV1 0x5F V66 W1 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 REG3=XMM_B():r:dq:f64
+
+PATTERN: VV1 0x5F V66 W0 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64 REG2=YMM_SE():r:qq:f64
+
+PATTERN: VV1 0x5F V66 W0 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64 REG3=YMM_SE():r:qq:f64
+
+PATTERN: VV1 0x5F V66 W1 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_SE():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN: VV1 0x5F V66 W1 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_SE():r:qq:f64 REG3=YMM_B():r:qq:f64
+}
+
+{
+ICLASS: VFMADDPS
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: MXCSR AMDONLY
+
+PATTERN: VV1 0x68 V66 W0 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32 REG2=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x68 V66 W0 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32 REG3=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x68 V66 W1 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN: VV1 0x68 V66 W1 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 REG3=XMM_B():r:dq:f32
+
+PATTERN: VV1 0x68 V66 W0 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32 REG2=YMM_SE():r:qq:f32
+
+PATTERN: VV1 0x68 V66 W0 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32 REG3=YMM_SE():r:qq:f32
+
+PATTERN: VV1 0x68 V66 W1 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_SE():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN: VV1 0x68 V66 W1 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_SE():r:qq:f32 REG3=YMM_B():r:qq:f32
+}
+
+{
+ICLASS: VFMADDPD
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: MXCSR AMDONLY
+
+PATTERN: VV1 0x69 V66 W0 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64 REG2=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x69 V66 W0 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64 REG3=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x69 V66 W1 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN: VV1 0x69 V66 W1 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 REG3=XMM_B():r:dq:f64
+
+PATTERN: VV1 0x69 V66 W0 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64 REG2=YMM_SE():r:qq:f64
+
+PATTERN: VV1 0x69 V66 W0 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64 REG3=YMM_SE():r:qq:f64
+
+PATTERN: VV1 0x69 V66 W1 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_SE():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN: VV1 0x69 V66 W1 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_SE():r:qq:f64 REG3=YMM_B():r:qq:f64
+}
+
+{
+ICLASS: VFMADDSS
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: SIMD_SCALAR MXCSR AMDONLY
+
+PATTERN: VV1 0x6A V66 W0  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:d:f32 REG2=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x6A V66 W0  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:d:f32 REG3=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x6A V66 W1  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 MEM0:r:d:f32
+
+PATTERN: VV1 0x6A V66 W1  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 REG3=XMM_B():r:d:f32
+}
+
+{
+ICLASS: VFMADDSD
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: SIMD_SCALAR MXCSR AMDONLY
+
+PATTERN: VV1 0x6B V66 W0  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:q:f64 REG2=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x6B V66 W0  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:q:f64 REG3=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x6B V66 W1  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 MEM0:r:q:f64
+
+PATTERN: VV1 0x6B V66 W1  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 REG3=XMM_B():r:q:f64
+}
+
+{
+ICLASS: VFMSUBPS
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: MXCSR AMDONLY
+
+PATTERN: VV1 0x6C V66 W0 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32 REG2=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x6C V66 W0 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32 REG3=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x6C V66 W1 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN: VV1 0x6C V66 W1 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 REG3=XMM_B():r:dq:f32
+
+PATTERN: VV1 0x6C V66 W0 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32 REG2=YMM_SE():r:qq:f32
+
+PATTERN: VV1 0x6C V66 W0 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32 REG3=YMM_SE():r:qq:f32
+
+PATTERN: VV1 0x6C V66 W1 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_SE():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN: VV1 0x6C V66 W1 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_SE():r:qq:f32 REG3=YMM_B():r:qq:f32
+}
+
+{
+ICLASS: VFMSUBPD
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: MXCSR AMDONLY
+
+PATTERN: VV1 0x6D V66 W0 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64 REG2=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x6D V66 W0 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64 REG3=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x6D V66 W1 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN: VV1 0x6D V66 W1 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 REG3=XMM_B():r:dq:f64
+
+PATTERN: VV1 0x6D V66 W0 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64 REG2=YMM_SE():r:qq:f64
+
+PATTERN: VV1 0x6D V66 W0 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64 REG3=YMM_SE():r:qq:f64
+
+PATTERN: VV1 0x6D V66 W1 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_SE():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN: VV1 0x6D V66 W1 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_SE():r:qq:f64 REG3=YMM_B():r:qq:f64
+}
+
+{
+ICLASS: VFMSUBSS
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: SIMD_SCALAR MXCSR AMDONLY
+
+PATTERN: VV1 0x6E V66 W0  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:d:f32 REG2=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x6E V66 W0  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:d:f32 REG3=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x6E V66 W1  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 MEM0:r:d:f32
+
+PATTERN: VV1 0x6E V66 W1  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 REG3=XMM_B():r:d:f32
+}
+
+{
+ICLASS: VFMSUBSD
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: SIMD_SCALAR MXCSR AMDONLY
+
+PATTERN: VV1 0x6F V66 W0  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:q:f64 REG2=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x6F V66 W0  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:q:f64 REG3=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x6F V66 W1  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 MEM0:r:q:f64
+
+PATTERN: VV1 0x6F V66 W1  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 REG3=XMM_B():r:q:f64
+}
+
+{
+ICLASS: VFNMADDPS
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: MXCSR AMDONLY
+
+PATTERN: VV1 0x78 V66 W0 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32 REG2=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x78 V66 W0 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32 REG3=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x78 V66 W1 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN: VV1 0x78 V66 W1 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 REG3=XMM_B():r:dq:f32
+
+PATTERN: VV1 0x78 V66 W0 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32 REG2=YMM_SE():r:qq:f32
+
+PATTERN: VV1 0x78 V66 W0 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32 REG3=YMM_SE():r:qq:f32
+
+PATTERN: VV1 0x78 V66 W1 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_SE():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN: VV1 0x78 V66 W1 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_SE():r:qq:f32 REG3=YMM_B():r:qq:f32
+}
+
+{
+ICLASS: VFNMADDPD
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: MXCSR AMDONLY
+
+PATTERN: VV1 0x79 V66 W0 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64 REG2=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x79 V66 W0 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64 REG3=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x79 V66 W1 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN: VV1 0x79 V66 W1 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 REG3=XMM_B():r:dq:f64
+
+PATTERN: VV1 0x79 V66 W0 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64 REG2=YMM_SE():r:qq:f64
+
+PATTERN: VV1 0x79 V66 W0 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64 REG3=YMM_SE():r:qq:f64
+
+PATTERN: VV1 0x79 V66 W1 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_SE():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN: VV1 0x79 V66 W1 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_SE():r:qq:f64 REG3=YMM_B():r:qq:f64
+}
+
+{
+ICLASS: VFNMADDSS
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: SIMD_SCALAR MXCSR AMDONLY
+
+PATTERN: VV1 0x7A V66 W0  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:d:f32 REG2=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x7A V66 W0  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:d:f32 REG3=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x7A V66 W1  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 MEM0:r:d:f32
+
+PATTERN: VV1 0x7A V66 W1  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 REG3=XMM_B():r:d:f32
+}
+
+{
+ICLASS: VFNMADDSD
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: SIMD_SCALAR MXCSR AMDONLY
+
+PATTERN: VV1 0x7B V66 W0  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:q:f64 REG2=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x7B V66 W0  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:q:f64 REG3=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x7B V66 W1  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 MEM0:r:q:f64
+
+PATTERN: VV1 0x7B V66 W1  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 REG3=XMM_B():r:q:f64
+}
+
+{
+ICLASS: VFNMSUBPS
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: MXCSR AMDONLY
+
+PATTERN: VV1 0x7C V66 W0 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32 REG2=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x7C V66 W0 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32 REG3=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x7C V66 W1 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN: VV1 0x7C V66 W1 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 REG3=XMM_B():r:dq:f32
+
+PATTERN: VV1 0x7C V66 W0 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32 REG2=YMM_SE():r:qq:f32
+
+PATTERN: VV1 0x7C V66 W0 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32 REG3=YMM_SE():r:qq:f32
+
+PATTERN: VV1 0x7C V66 W1 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_SE():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN: VV1 0x7C V66 W1 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_SE():r:qq:f32 REG3=YMM_B():r:qq:f32
+}
+
+{
+ICLASS: VFNMSUBPD
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: MXCSR AMDONLY
+
+PATTERN: VV1 0x7D V66 W0 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64 REG2=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x7D V66 W0 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64 REG3=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x7D V66 W1 VL128  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN: VV1 0x7D V66 W1 VL128  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 REG3=XMM_B():r:dq:f64
+
+PATTERN: VV1 0x7D V66 W0 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64 REG2=YMM_SE():r:qq:f64
+
+PATTERN: VV1 0x7D V66 W0 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64 REG3=YMM_SE():r:qq:f64
+
+PATTERN: VV1 0x7D V66 W1 VL256  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_SE():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN: VV1 0x7D V66 W1 VL256  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_SE():r:qq:f64 REG3=YMM_B():r:qq:f64
+}
+
+{
+ICLASS: VFNMSUBSS
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: SIMD_SCALAR MXCSR AMDONLY
+
+PATTERN: VV1 0x7E V66 W0  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:d:f32 REG2=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x7E V66 W0  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:d:f32 REG3=XMM_SE():r:dq:f32
+
+PATTERN: VV1 0x7E V66 W1  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 MEM0:r:d:f32
+
+PATTERN: VV1 0x7E V66 W1  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 REG3=XMM_B():r:d:f32
+}
+
+{
+ICLASS: VFNMSUBSD
+CPL: 3
+CATEGORY: FMA4
+ISA_SET: FMA4
+EXTENSION: FMA4
+ATTRIBUTES: SIMD_SCALAR MXCSR AMDONLY
+
+PATTERN: VV1 0x7F V66 W0  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:q:f64 REG2=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x7F V66 W0  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:q:f64 REG3=XMM_SE():r:dq:f64
+
+PATTERN: VV1 0x7F V66 W1  V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 MEM0:r:q:f64
+
+PATTERN: VV1 0x7F V66 W1  V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS: REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 REG3=XMM_B():r:q:f64
+}
+
+
+###FILE: ./datafiles/amdxop/amd-vpermil2-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+
+AVX_INSTRUCTIONS()::
+
+
+{
+ICLASS    : VPERMIL2PS
+CPL       : 3
+CATEGORY  : XOP
+EXTENSION : XOP
+ISA_SET   : XOP
+ATTRIBUTES : AMDONLY
+
+# 128b W0
+PATTERN : VV1 0x48 VL128 V66 V0F3A W0  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32 REG2=XMM_SE():r:dq:f32 IMM0:r:b
+
+PATTERN : VV1 0x48 VL128 V66 V0F3A W0  MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32 REG3=XMM_SE():r:dq:f32 IMM0:r:b
+
+# 256b W0
+PATTERN : VV1 0x48 VL256 V66 V0F3A W0   MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32 REG2=YMM_SE():r:qq:f32 IMM0:r:b
+
+PATTERN : VV1 0x48 VL256 V66 V0F3A W0   MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32 REG3=YMM_SE():r:qq:f32 IMM0:r:b
+
+# 128b W1
+PATTERN : VV1 0x48 VL128 V66 V0F3A W1  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 MEM0:r:dq:f32  IMM0:r:b
+
+PATTERN : VV1 0x48 VL128 V66 V0F3A W1  MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_SE():r:dq:f32 REG3=XMM_B():r:dq:f32  IMM0:r:b
+
+# 256b W1
+PATTERN : VV1 0x48 VL256 V66 V0F3A W1   MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32  REG2=YMM_SE():r:qq:f32  MEM0:r:qq:f32 IMM0:r:b
+
+PATTERN : VV1 0x48 VL256 V66 V0F3A W1   MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_SE():r:qq:f32 REG3=YMM_B():r:qq:f32  IMM0:r:b
+
+}
+
+
+
+{
+ICLASS    : VPERMIL2PD
+CPL       : 3
+CATEGORY  : XOP
+EXTENSION : XOP
+ISA_SET   : XOP
+ATTRIBUTES : AMDONLY
+
+# 128b W0
+PATTERN : VV1 0x49 VL128 V66 V0F3A W0  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64 REG2=XMM_SE():r:dq:f64 IMM0:r:b
+
+PATTERN : VV1 0x49 VL128 V66 V0F3A W0  MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64 REG3=XMM_SE():r:dq:f64 IMM0:r:b
+
+# 256b W0
+PATTERN : VV1 0x49 VL256 V66 V0F3A W0   MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64 REG2=YMM_SE():r:qq:f64 IMM0:r:b
+
+PATTERN : VV1 0x49 VL256 V66 V0F3A W0   MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64 REG3=YMM_SE():r:qq:f64 IMM0:r:b
+
+# 128b W1
+PATTERN : VV1 0x49 VL128 V66 V0F3A W1  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 MEM0:r:dq:f64  IMM0:r:b
+
+PATTERN : VV1 0x49 VL128 V66 V0F3A W1  MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_SE():r:dq:f64 REG3=XMM_B():r:dq:f64  IMM0:r:b
+
+# 256b W1
+PATTERN : VV1 0x49 VL256 V66 V0F3A W1   MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64  REG2=YMM_SE():r:qq:f64  MEM0:r:qq:f64 IMM0:r:b
+
+PATTERN : VV1 0x49 VL256 V66 V0F3A W1   MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_SE():r:qq:f64 REG3=YMM_B():r:qq:f64  IMM0:r:b
+
+}
+
+
+
+###FILE: ./datafiles/xsaveopt/xsaveopt-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+INSTRUCTIONS()::
+
+{
+ICLASS    : XSAVEOPT
+CPL       : 3
+CATEGORY  : XSAVEOPT
+EXTENSION : XSAVEOPT
+ATTRIBUTES : xmm_state_r REQUIRES_ALIGNMENT  x87_mmx_state_r NOTSX
+PATTERN   : 0x0F 0xAE MOD[mm] MOD!=3 REG[0b110] RM[nnn]  no_refining_prefix norexw_prefix MODRM()
+#FIXME 2007-06-25 need a meaningful width code for XSAVE/XSAVEOPT/XRSTOR
+OPERANDS  : MEM0:w:mxsave REG0=XED_REG_EDX:r:SUPP REG1=XED_REG_EAX:r:SUPP REG2=XED_REG_XCR0:r:SUPP
+}
+
+
+{
+ICLASS    : XSAVEOPT64
+CPL       : 3
+CATEGORY  : XSAVEOPT
+EXTENSION : XSAVEOPT
+ATTRIBUTES : xmm_state_r REQUIRES_ALIGNMENT  x87_mmx_state_r NOTSX
+
+PATTERN   : 0x0F 0xAE MOD[mm] MOD!=3 REG[0b110] RM[nnn] no_refining_prefix rexw_prefix MODRM()
+#FIXME 2007-06-25 need a meaningful width code for XSAVE/XSAVEOPT/XRSTOR
+OPERANDS  : MEM0:w:mxsave REG0=XED_REG_EDX:r:SUPP REG1=XED_REG_EAX:r:SUPP REG2=XED_REG_XCR0:r:SUPP
+}
+
+
+
+###FILE: ./datafiles/mpx/mpx-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+
+
+INSTRUCTIONS()::
+
+
+UDELETE: NOP0F1A
+UDELETE: NOP0F1B
+
+
+
+{
+ICLASS: BNDMK
+EXTENSION: MPX
+CATEGORY:  MPX
+ISA_SET:   MPX
+ATTRIBUTES: NO_RIP_REL
+PATTERN:  0x0F 0x1B MPXMODE=1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()  f3_refining_prefix
+OPERANDS: REG0=BND_R():w  AGEN:r
+}
+
+
+
+
+{
+ICLASS: BNDCL
+EXTENSION: MPX
+CATEGORY:  MPX
+ISA_SET:   MPX
+ATTRIBUTES: EXCEPTION_BR
+COMMENT: 67 prefixes will be misinterpreted on MPX NI. XED cannot ignore them.
+PATTERN:  0x0F 0x1A MPXMODE=1  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()  f3_refining_prefix
+OPERANDS: REG0=BND_R():r AGEN:r
+
+PATTERN:  0x0F 0x1A MPXMODE=1 MOD[mm] MOD=3 REG[rrr] RM[nnn]   f3_refining_prefix  mode64
+OPERANDS: REG0=BND_R():r REG1=GPR64_B():r
+PATTERN:  0x0F 0x1A MPXMODE=1 MOD[mm] MOD=3 REG[rrr] RM[nnn]   f3_refining_prefix  not64
+OPERANDS: REG0=BND_R():r REG1=GPR32_B():r
+}
+
+{
+ICLASS: BNDCU
+EXTENSION: MPX
+CATEGORY:  MPX
+ISA_SET:   MPX
+ATTRIBUTES: EXCEPTION_BR
+COMMENT: 67 prefixes will be misinterpreted on MPX NI. XED cannot ignore them.
+PATTERN:  0x0F 0x1A MPXMODE=1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()  f2_refining_prefix
+OPERANDS: REG0=BND_R():r AGEN:r
+
+PATTERN:  0x0F 0x1A MPXMODE=1 MOD[mm] MOD=3 REG[rrr] RM[nnn]   f2_refining_prefix  mode64
+OPERANDS: REG0=BND_R():r REG1=GPR64_B():r
+PATTERN:  0x0F 0x1A MPXMODE=1 MOD[mm] MOD=3 REG[rrr] RM[nnn]   f2_refining_prefix  not64
+OPERANDS: REG0=BND_R():r REG1=GPR32_B():r
+}
+
+{
+ICLASS: BNDCN
+EXTENSION: MPX
+CATEGORY:  MPX
+ISA_SET:   MPX
+ATTRIBUTES:  EXCEPTION_BR
+COMMENT: 67 prefixes will be misinterpreted on MPX NI. XED cannot ignore them.
+PATTERN:  0x0F 0x1B MPXMODE=1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() f2_refining_prefix
+OPERANDS: REG0=BND_R():r AGEN:r
+
+PATTERN:  0x0F 0x1B MPXMODE=1 MOD[mm] MOD=3 REG[rrr] RM[nnn]  f2_refining_prefix  mode64
+OPERANDS: REG0=BND_R():r REG1=GPR64_B():r
+PATTERN:  0x0F 0x1B MPXMODE=1 MOD[mm] MOD=3 REG[rrr] RM[nnn]  f2_refining_prefix  not64
+OPERANDS: REG0=BND_R():r REG1=GPR32_B():r
+
+}
+
+{
+ICLASS: BNDMOV
+EXTENSION: MPX
+CATEGORY:  MPX
+ISA_SET:   MPX
+ATTRIBUTES:
+COMMENT: load form
+
+PATTERN:  0x0F 0x1A MPXMODE=1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]  osz_refining_prefix REFINING66()
+OPERANDS: REG0=BND_R():w REG1=BND_B():r
+
+# 16b refs 64b memop (2x32b) but only if EASZ=32b!
+PATTERN:  0x0F 0x1A MPXMODE=1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()  osz_refining_prefix REFINING66() mode16 eamode32
+OPERANDS: REG0=BND_R():w MEM0:r:q:u32
+
+# 32b refs 64b memop (2x32b)
+PATTERN:  0x0F 0x1A MPXMODE=1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()  osz_refining_prefix REFINING66() mode32 eamode32
+OPERANDS: REG0=BND_R():w MEM0:r:q:u32
+
+# 64b refs 128b memop (2x64b)
+PATTERN:  0x0F 0x1A MPXMODE=1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()  osz_refining_prefix REFINING66() mode64
+OPERANDS: REG0=BND_R():w MEM0:r:dq:u64
+
+
+
+}
+
+{
+ICLASS: BNDMOV
+EXTENSION: MPX
+CATEGORY:  MPX
+ISA_SET:   MPX
+ATTRIBUTES:
+COMMENT: store form
+
+PATTERN:  0x0F 0x1B MPXMODE=1 MOD[0b11] MOD=3 REG[rrr] RM[nnn] osz_refining_prefix REFINING66()
+OPERANDS: REG0=BND_B():w   REG1=BND_R():r
+
+# 16b refs 64b memop (2x32b) but only if EASZ=32b!
+PATTERN:  0x0F 0x1B MPXMODE=1 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  osz_refining_prefix REFINING66() mode16 eamode32
+OPERANDS: MEM0:w:q:u32 REG0=BND_R():r
+
+# 32b refs 64b memop (2x32b)
+PATTERN:  0x0F 0x1B MPXMODE=1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()  osz_refining_prefix REFINING66() mode32
+OPERANDS: MEM0:w:q:u32 REG0=BND_R():r
+
+# 64b refs 128b memop (2x64b)
+PATTERN:  0x0F 0x1B MPXMODE=1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()  osz_refining_prefix REFINING66() mode64
+OPERANDS: MEM0:w:dq:u64 REG0=BND_R():r
+}
+
+
+{
+ICLASS: BNDLDX
+EXTENSION: MPX
+CATEGORY:  MPX
+ISA_SET:   MPX
+ATTRIBUTES:  EXCEPTION_BR SPECIAL_AGEN_REQUIRED INDEX_REG_IS_POINTER NO_RIP_REL
+COMMENT:  RIP (mode64, easz64, MOD=0, RM=5) mode disallowed in 64b And 16/32b is easize32 only
+PATTERN:  0x0F 0x1A MPXMODE=1 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  no_refining_prefix not64 eamode32
+OPERANDS: REG0=BND_R():w MEM0:r:bnd32
+PATTERN:  0x0F 0x1A MPXMODE=1 MOD[mm] MOD=0 REG[rrr] RM[nnn]   MODRM()  no_refining_prefix mode64  # RM!=5
+OPERANDS: REG0=BND_R():w MEM0:r:bnd64
+PATTERN:  0x0F 0x1A MPXMODE=1 MOD[mm] MOD=1 REG[rrr] RM[nnn]   MODRM()  no_refining_prefix mode64
+OPERANDS: REG0=BND_R():w MEM0:r:bnd64
+PATTERN:  0x0F 0x1A MPXMODE=1 MOD[mm] MOD=2 REG[rrr] RM[nnn]   MODRM()  no_refining_prefix mode64
+OPERANDS: REG0=BND_R():w MEM0:r:bnd64
+}
+
+{
+ICLASS: BNDSTX
+EXTENSION: MPX
+CATEGORY:  MPX
+ISA_SET:   MPX
+ATTRIBUTES:  EXCEPTION_BR SPECIAL_AGEN_REQUIRED INDEX_REG_IS_POINTER NO_RIP_REL
+COMMENT:  RIP (mode64, easz64, MOD=0, RM=5) mode disallowed in 64b And 16/32b is easize32 only
+PATTERN:  0x0F 0x1B MPXMODE=1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()  no_refining_prefix not64 eamode32
+OPERANDS: MEM0:w:bnd32 REG0=BND_R():r
+PATTERN:  0x0F 0x1B MPXMODE=1 MOD[mm] MOD=0 REG[rrr] RM[nnn]  MODRM()  no_refining_prefix mode64 # RM!=5
+OPERANDS: MEM0:w:bnd64 REG0=BND_R():r
+PATTERN:  0x0F 0x1B MPXMODE=1 MOD[mm] MOD=1 REG[rrr] RM[nnn]  MODRM()  no_refining_prefix mode64
+OPERANDS: MEM0:w:bnd64 REG0=BND_R():r
+PATTERN:  0x0F 0x1B MPXMODE=1 MOD[mm] MOD=2 REG[rrr] RM[nnn]  MODRM()  no_refining_prefix mode64
+OPERANDS: MEM0:w:bnd64 REG0=BND_R():r
+}
+
+{
+ICLASS    : NOP
+CPL       : 3
+CATEGORY  : WIDENOP
+ATTRIBUTES: NOP
+EXTENSION : BASE
+ISA_SET   : PPRO
+COMMENT   : MPXMODE=1: some of the reg/reg forms of these NOPs are still NOPs.
+
+PATTERN   : 0x0F 0x1A MPXMODE=1 MOD[0b11] MOD=3 REG[rrr] RM[nnn] no_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1A
+
+PATTERN   : 0x0F 0x1B MPXMODE=1 MOD[0b11] MOD=3 REG[rrr] RM[nnn] no_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1B
+
+PATTERN   : 0x0F 0x1B MPXMODE=1 MOD[0b11] MOD=3 REG[rrr] RM[nnn] f3_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1B
+}
+
+
+{
+ICLASS    : NOP
+CPL       : 3
+CATEGORY  : WIDENOP
+ATTRIBUTES: NOP
+EXTENSION : BASE
+ISA_SET   : PPRO
+COMMENT   : For MPXMODE=0 operation
+
+PATTERN   : 0x0F 0x1A MPXMODE=0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1A
+
+PATTERN   : 0x0F 0x1B MPXMODE=0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1B
+
+PATTERN   : 0x0F 0x1A MPXMODE=0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPRv_B():r MEM0:r:v
+IFORM     : NOP_GPRv_MEMv_0F1A
+
+PATTERN   : 0x0F 0x1B MPXMODE=0 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()
+OPERANDS  : REG0=GPRv_B():r MEM0:r:v
+IFORM     : NOP_GPRv_MEM_0F1B
+}
+
+
+
+
+###FILE: ./datafiles/cet/cet-nop-remove.xed.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2017 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+
+
+INSTRUCTIONS()::
+
+UDELETE: NOP0F1E
+
+{
+ICLASS    : NOP
+#UNAME     : NOP0F1E
+CPL       : 3
+CATEGORY  : WIDENOP
+EXTENSION : BASE
+ATTRIBUTES: NOP
+ISA_SET   : PPRO
+COMMENT   : reg form MODRM.MOD=3 & MODRM.REG=0b001  f3 prefix is RDSSP{D,Q}
+
+# mem forms
+
+PATTERN   : 0x0F 0x1E MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:r:v REG0=GPRv_R():r
+IFORM     : NOP_MEMv_GPRv_0F1E
+
+
+# reg forms
+
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3 REG[rrr] RM[nnn] no_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3 REG[rrr] RM[nnn] f2_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3 REG[rrr] RM[nnn] osz_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+
+
+
+
+
+
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3 REG[0b000] RM[nnn] f3_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+
+# ...
+# F3 with MODRM.REG=0b001 is for CET for all values of RM.
+# ...
+
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3 REG[0b010] RM[nnn] f3_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3 REG[0b011] RM[nnn] f3_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3 REG[0b100] RM[nnn] f3_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3 REG[0b101] RM[nnn] f3_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3 REG[0b110] RM[nnn] f3_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+
+
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3 REG[0b111] RM[0b000] f3_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3 REG[0b111] RM[0b001] f3_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+
+# ...
+# F3 with MODRM.REG=0b111  with RM=2 or RM=3 is for CET
+# ...
+
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3 REG[0b111] RM[0b100] f3_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3 REG[0b111] RM[0b101] f3_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3 REG[0b111] RM[0b110] f3_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3 REG[0b111] RM[0b111] f3_refining_prefix
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+
+
+}
+
+
+# REPLACE CERTAIN NOPS WITH MODAL OPTIONS  basd on CET=0/1
+{
+ICLASS    : NOP
+#UNAME     : NOP0F1E
+CPL       : 3
+CATEGORY  : WIDENOP
+EXTENSION : BASE
+ATTRIBUTES: NOP
+ISA_SET   : PPRO
+
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3  REG[0b111] RM[0b010]  f3_refining_prefix CET=0
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3  REG[0b111] RM[0b011]  f3_refining_prefix CET=0
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+}
+
+
+{
+ICLASS    : NOP
+#UNAME     : NOP0F1E
+CPL       : 3
+CATEGORY  : WIDENOP
+EXTENSION : BASE
+ATTRIBUTES: NOP
+ISA_SET   : PPRO
+
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3  REG[0b001] RM[nnn]  f3_refining_prefix W0 CET=0
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+
+PATTERN   : 0x0F 0x1E MOD[0b11] MOD=3  REG[0b001] RM[nnn]  f3_refining_prefix W1 mode64  CET=0
+OPERANDS  : REG0=GPRv_B():r REG1=GPRv_R():r
+IFORM     : NOP_GPRv_GPRv_0F1E
+}
+
+
+###FILE: ./datafiles/cet/cet-isa.xed.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2017 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+#
+#
+#
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#
+#
+#
+INSTRUCTIONS()::
+# EMITTING CLRSSBSY (CLRSSBSY-N/A-1)
+{
+ICLASS:      CLRSSBSY
+CPL:         3
+CATEGORY:    CET
+EXTENSION:   CET
+ISA_SET:     CET
+REAL_OPCODE: Y
+PATTERN:    0x0F 0xAE MOD[mm] MOD!=3 REG[0b110] RM[nnn]   f3_refining_prefix     MODRM()
+OPERANDS:    MEM0:w:q:u64
+IFORM:       CLRSSBSY_MEMu64
+}
+
+
+# EMITTING ENDBR32 (ENDBR32-N/A-1)
+{
+ICLASS:      ENDBR32
+CPL:         3
+CATEGORY:    CET
+EXTENSION:   CET
+ISA_SET:     CET
+REAL_OPCODE: Y
+PATTERN:    0x0F 0x1E MOD[0b11] MOD=3  REG[0b111] RM[0b011]  f3_refining_prefix     CET=1
+OPERANDS:
+IFORM:       ENDBR32
+}
+
+
+# EMITTING ENDBR64 (ENDBR64-N/A-1)
+{
+ICLASS:      ENDBR64
+CPL:         3
+CATEGORY:    CET
+EXTENSION:   CET
+ISA_SET:     CET
+REAL_OPCODE: Y
+PATTERN:    0x0F 0x1E MOD[0b11] MOD=3  REG[0b111] RM[0b010]  f3_refining_prefix     CET=1
+OPERANDS:
+IFORM:       ENDBR64
+}
+
+
+# EMITTING INCSSPD (INCSSPD-N/A-1)
+{
+ICLASS:      INCSSPD
+CPL:         3
+CATEGORY:    CET
+EXTENSION:   CET
+ISA_SET:     CET
+REAL_OPCODE: Y
+PATTERN:    0x0F 0xAE MOD[0b11] MOD=3  REG[0b101] RM[nnn]  f3_refining_prefix    W0
+OPERANDS:    REG0=GPR32_B():r:d:u8 REG1=XED_REG_SSP:rw:SUPP:u64
+IFORM:       INCSSPD_GPR32u8
+}
+
+
+# EMITTING INCSSPQ (INCSSPQ-N/A-1)
+{
+ICLASS:      INCSSPQ
+CPL:         3
+CATEGORY:    CET
+EXTENSION:   CET
+ISA_SET:     CET
+REAL_OPCODE: Y
+PATTERN:    0x0F 0xAE MOD[0b11] MOD=3  REG[0b101] RM[nnn]  f3_refining_prefix    W1  mode64
+OPERANDS:    REG0=GPR64_B():r:q:u8 REG1=XED_REG_SSP:rw:SUPP:u64
+IFORM:       INCSSPQ_GPR64u8
+}
+
+
+# EMITTING RDSSPD (RDSSPD-N/A-1)
+{
+ICLASS:      RDSSPD
+CPL:         3
+CATEGORY:    CET
+EXTENSION:   CET
+ISA_SET:     CET
+REAL_OPCODE: Y
+PATTERN:    0x0F 0x1E MOD[0b11] MOD=3  REG[0b001] RM[nnn]  f3_refining_prefix    W0 CET=1
+OPERANDS:    REG0=GPR32_B():w:d:u32 REG1=XED_REG_SSP:r:SUPP:u64
+IFORM:       RDSSPD_GPR32u32
+}
+
+
+# EMITTING RDSSPQ (RDSSPQ-N/A-1)
+{
+ICLASS:      RDSSPQ
+CPL:         3
+CATEGORY:    CET
+EXTENSION:   CET
+ISA_SET:     CET
+REAL_OPCODE: Y
+PATTERN:    0x0F 0x1E MOD[0b11] MOD=3  REG[0b001] RM[nnn]  f3_refining_prefix    W1  mode64 CET=1
+OPERANDS:    REG0=GPR64_B():w:q:u64 REG1=XED_REG_SSP:r:SUPP:u64
+IFORM:       RDSSPQ_GPR64u64
+}
+
+
+# EMITTING RSTORSSP (RSTORSSP-N/A-1)
+{
+ICLASS:      RSTORSSP
+CPL:         3
+CATEGORY:    CET
+EXTENSION:   CET
+ISA_SET:     CET
+REAL_OPCODE: Y
+PATTERN:    0x0F 0x01 MOD[mm] MOD!=3 REG[0b101] RM[nnn]  MODRM()  f3_refining_prefix
+OPERANDS:    MEM0:rw:q:u64 REG0=XED_REG_SSP:w:SUPP:u64
+IFORM:       RSTORSSP_MEMu64
+}
+
+
+# EMITTING SAVESSP (SAVESSP-N/A-1)
+{
+ICLASS:      SAVESSP
+CPL:         3
+CATEGORY:    CET
+EXTENSION:   CET
+ISA_SET:     CET
+REAL_OPCODE: Y
+PATTERN:    0x0F 0x01 MOD[0b11] MOD=3  REG[0b101] RM[0b010]  f3_refining_prefix
+OPERANDS:    REG0=XED_REG_SSP:r:SUPP:u64
+IFORM:       SAVESSP
+}
+
+
+# EMITTING SETSSBSY (SETSSBSY-N/A-1)
+{
+ICLASS:      SETSSBSY
+CPL:         3
+CATEGORY:    CET
+EXTENSION:   CET
+ISA_SET:     CET
+REAL_OPCODE: Y
+PATTERN:    0x0F 0x01 MOD[0b11] MOD=3  REG[0b101] RM[0b000]  f3_refining_prefix
+OPERANDS:
+IFORM:       SETSSBSY
+}
+
+
+# EMITTING WRSSD (WRSSD-N/A-1)
+{
+ICLASS:      WRSSD
+CPL:         3
+CATEGORY:    CET
+EXTENSION:   CET
+ISA_SET:     CET
+REAL_OPCODE: Y
+PATTERN:    0x0F 0x38 0xF6 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  no_refining_prefix    W0
+OPERANDS:    MEM0:w:d:u32 REG0=GPR32_R():r:d:u32
+IFORM:       WRSSD_MEMu32_GPR32u32
+}
+
+
+# EMITTING WRSSQ (WRSSQ-N/A-1)
+{
+ICLASS:      WRSSQ
+CPL:         3
+CATEGORY:    CET
+EXTENSION:   CET
+ISA_SET:     CET
+REAL_OPCODE: Y
+PATTERN:    0x0F 0x38 0xF6 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  no_refining_prefix    W1  mode64
+OPERANDS:    MEM0:w:q:u64 REG0=GPR64_R():r:q:u64
+IFORM:       WRSSQ_MEMu64_GPR64u64
+}
+
+
+# EMITTING WRUSSD (WRUSSD-N/A-1)
+{
+ICLASS:      WRUSSD
+CPL:         3
+CATEGORY:    CET
+EXTENSION:   CET
+ISA_SET:     CET
+REAL_OPCODE: Y
+PATTERN:    0x0F 0x38 0xF5 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  osz_refining_prefix    W0
+OPERANDS:    MEM0:w:d:u32 REG0=GPR32_R():r:d:u32
+IFORM:       WRUSSD_MEMu32_GPR32u32
+}
+
+
+# EMITTING WRUSSQ (WRUSSQ-N/A-1)
+{
+ICLASS:      WRUSSQ
+CPL:         3
+CATEGORY:    CET
+EXTENSION:   CET
+ISA_SET:     CET
+REAL_OPCODE: Y
+PATTERN:    0x0F 0x38 0xF5 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  osz_refining_prefix    W1  mode64
+OPERANDS:    MEM0:w:q:u64 REG0=GPR64_R():r:q:u64
+IFORM:       WRUSSQ_MEMu64_GPR64u64
+}
+
+
+
+
+###FILE: ./datafiles/sha/sha-isa.xed.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+#
+#
+#
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#
+#
+#
+INSTRUCTIONS()::
+# EMITTING SHA1MSG1 (SHA1MSG1-N/A-1)
+{
+ICLASS:      SHA1MSG1
+CPL:         3
+CATEGORY:    SHA
+EXTENSION:   SHA
+ISA_SET:     SHA
+EXCEPTIONS:     SSE_TYPE_4
+REAL_OPCODE: Y
+PATTERN:     0x0F 0x38 0xC9 MOD[0b11] MOD=3  REG[rrr] RM[nnn]  no_refining_prefix
+OPERANDS:    REG0=XMM_R():rw:dq:i32 REG1=XMM_B():r:dq:i32
+IFORM:       SHA1MSG1_XMMi32_XMMi32_SHA
+}
+
+{
+ICLASS:      SHA1MSG1
+CPL:         3
+CATEGORY:    SHA
+EXTENSION:   SHA
+ISA_SET:     SHA
+EXCEPTIONS:     SSE_TYPE_4
+REAL_OPCODE: Y
+ATTRIBUTES:  REQUIRES_ALIGNMENT
+PATTERN:     0x0F 0x38 0xC9 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  no_refining_prefix
+OPERANDS:    REG0=XMM_R():rw:dq:i32 MEM0:r:dq:i32
+IFORM:       SHA1MSG1_XMMi32_MEMi32_SHA
+}
+
+
+# EMITTING SHA1MSG2 (SHA1MSG2-N/A-1)
+{
+ICLASS:      SHA1MSG2
+CPL:         3
+CATEGORY:    SHA
+EXTENSION:   SHA
+ISA_SET:     SHA
+EXCEPTIONS:     SSE_TYPE_4
+REAL_OPCODE: Y
+PATTERN:     0x0F 0x38 0xCA MOD[0b11] MOD=3  REG[rrr] RM[nnn]  no_refining_prefix
+OPERANDS:    REG0=XMM_R():rw:dq:i32 REG1=XMM_B():r:dq:i32
+IFORM:       SHA1MSG2_XMMi32_XMMi32_SHA
+}
+
+{
+ICLASS:      SHA1MSG2
+CPL:         3
+CATEGORY:    SHA
+EXTENSION:   SHA
+ISA_SET:     SHA
+EXCEPTIONS:     SSE_TYPE_4
+REAL_OPCODE: Y
+ATTRIBUTES:  REQUIRES_ALIGNMENT
+PATTERN:     0x0F 0x38 0xCA MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  no_refining_prefix
+OPERANDS:    REG0=XMM_R():rw:dq:i32 MEM0:r:dq:i32
+IFORM:       SHA1MSG2_XMMi32_MEMi32_SHA
+}
+
+
+# EMITTING SHA1NEXTE (SHA1NEXTE-N/A-1)
+{
+ICLASS:      SHA1NEXTE
+CPL:         3
+CATEGORY:    SHA
+EXTENSION:   SHA
+ISA_SET:     SHA
+EXCEPTIONS:     SSE_TYPE_4
+REAL_OPCODE: Y
+PATTERN:     0x0F 0x38 0xC8 MOD[0b11] MOD=3  REG[rrr] RM[nnn]  no_refining_prefix
+OPERANDS:    REG0=XMM_R():rw:dq:i32 REG1=XMM_B():r:dq:i32
+IFORM:       SHA1NEXTE_XMMi32_XMMi32_SHA
+}
+
+{
+ICLASS:      SHA1NEXTE
+CPL:         3
+CATEGORY:    SHA
+EXTENSION:   SHA
+ISA_SET:     SHA
+EXCEPTIONS:     SSE_TYPE_4
+REAL_OPCODE: Y
+ATTRIBUTES:  REQUIRES_ALIGNMENT
+PATTERN:     0x0F 0x38 0xC8 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  no_refining_prefix
+OPERANDS:    REG0=XMM_R():rw:dq:i32 MEM0:r:dq:i32
+IFORM:       SHA1NEXTE_XMMi32_MEMi32_SHA
+}
+
+
+# EMITTING SHA1RNDS4 (SHA1RNDS4-N/A-1)
+{
+ICLASS:      SHA1RNDS4
+CPL:         3
+CATEGORY:    SHA
+EXTENSION:   SHA
+ISA_SET:     SHA
+EXCEPTIONS:     SSE_TYPE_4
+REAL_OPCODE: Y
+PATTERN:     0x0F 0x3A 0xCC MOD[0b11] MOD=3  REG[rrr] RM[nnn]  no_refining_prefix     UIMM8()
+OPERANDS:    REG0=XMM_R():rw:dq:i32 REG1=XMM_B():r:dq:i32 IMM0:r:b
+IFORM:       SHA1RNDS4_XMMi32_XMMi32_IMM8_SHA
+}
+
+{
+ICLASS:      SHA1RNDS4
+CPL:         3
+CATEGORY:    SHA
+EXTENSION:   SHA
+ISA_SET:     SHA
+EXCEPTIONS:     SSE_TYPE_4
+REAL_OPCODE: Y
+ATTRIBUTES:  REQUIRES_ALIGNMENT
+PATTERN:     0x0F 0x3A 0xCC MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  no_refining_prefix     UIMM8()
+OPERANDS:    REG0=XMM_R():rw:dq:i32 MEM0:r:dq:i32 IMM0:r:b
+IFORM:       SHA1RNDS4_XMMi32_MEMi32_IMM8_SHA
+}
+
+
+# EMITTING SHA256MSG1 (SHA256MSG1-N/A-1)
+{
+ICLASS:      SHA256MSG1
+CPL:         3
+CATEGORY:    SHA
+EXTENSION:   SHA
+ISA_SET:     SHA
+EXCEPTIONS:     SSE_TYPE_4
+REAL_OPCODE: Y
+PATTERN:     0x0F 0x38 0xCC MOD[0b11] MOD=3  REG[rrr] RM[nnn]  no_refining_prefix
+OPERANDS:    REG0=XMM_R():rw:dq:i32 REG1=XMM_B():r:dq:i32
+IFORM:       SHA256MSG1_XMMi32_XMMi32_SHA
+}
+
+{
+ICLASS:      SHA256MSG1
+CPL:         3
+CATEGORY:    SHA
+EXTENSION:   SHA
+ISA_SET:     SHA
+EXCEPTIONS:     SSE_TYPE_4
+REAL_OPCODE: Y
+ATTRIBUTES:  REQUIRES_ALIGNMENT
+PATTERN:     0x0F 0x38 0xCC MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  no_refining_prefix
+OPERANDS:    REG0=XMM_R():rw:dq:i32 MEM0:r:dq:i32
+IFORM:       SHA256MSG1_XMMi32_MEMi32_SHA
+}
+
+
+# EMITTING SHA256MSG2 (SHA256MSG2-N/A-1)
+{
+ICLASS:      SHA256MSG2
+CPL:         3
+CATEGORY:    SHA
+EXTENSION:   SHA
+ISA_SET:     SHA
+EXCEPTIONS:     SSE_TYPE_4
+REAL_OPCODE: Y
+PATTERN:     0x0F 0x38 0xCD MOD[0b11] MOD=3  REG[rrr] RM[nnn]  no_refining_prefix
+OPERANDS:    REG0=XMM_R():rw:dq:i32 REG1=XMM_B():r:dq:i32
+IFORM:       SHA256MSG2_XMMi32_XMMi32_SHA
+}
+
+{
+ICLASS:      SHA256MSG2
+CPL:         3
+CATEGORY:    SHA
+EXTENSION:   SHA
+ISA_SET:     SHA
+EXCEPTIONS:     SSE_TYPE_4
+REAL_OPCODE: Y
+ATTRIBUTES:  REQUIRES_ALIGNMENT
+PATTERN:     0x0F 0x38 0xCD MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  no_refining_prefix
+OPERANDS:    REG0=XMM_R():rw:dq:i32 MEM0:r:dq:i32
+IFORM:       SHA256MSG2_XMMi32_MEMi32_SHA
+}
+
+
+# EMITTING SHA256RNDS2 (SHA256RNDS2-N/A-1)
+{
+ICLASS:      SHA256RNDS2
+CPL:         3
+CATEGORY:    SHA
+EXTENSION:   SHA
+ISA_SET:     SHA
+EXCEPTIONS:     SSE_TYPE_4
+REAL_OPCODE: Y
+PATTERN:     0x0F 0x38 0xCB MOD[0b11] MOD=3  REG[rrr] RM[nnn]  no_refining_prefix
+OPERANDS:    REG0=XMM_R():rw:dq:i32 REG1=XMM_B():r:dq:i32 REG2=XED_REG_XMM0:r:SUPP:dq:u8
+IFORM:       SHA256RNDS2_XMMi32_XMMi32_SHA
+}
+
+{
+ICLASS:      SHA256RNDS2
+CPL:         3
+CATEGORY:    SHA
+EXTENSION:   SHA
+ISA_SET:     SHA
+EXCEPTIONS:     SSE_TYPE_4
+REAL_OPCODE: Y
+ATTRIBUTES:  REQUIRES_ALIGNMENT
+PATTERN:     0x0F 0x38 0xCB MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  no_refining_prefix
+OPERANDS:    REG0=XMM_R():rw:dq:i32 MEM0:r:dq:i32 REG1=XED_REG_XMM0:r:SUPP:dq:u8
+IFORM:       SHA256RNDS2_XMMi32_MEMi32_SHA
+}
+
+
+
+
+###FILE: ./datafiles/ivbint/ivb-int-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+INSTRUCTIONS()::
+
+{
+ICLASS    : RDRAND
+CPL       : 3
+CATEGORY  : RDRAND
+EXTENSION : RDRAND
+ISA_SET   : RDRAND
+FLAGS     : MUST [ cf-mod zf-0 of-0 af-0 pf-0 sf-0 ]
+PATTERN   : 0x0F 0xC7  MOD[0b11] MOD=3 REG[0b110] RM[nnn] not_refining
+OPERANDS  : REG0=GPRv_B():w
+}
+
+
+
+###FILE: ./datafiles/ivbint/fsgsbase-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+INSTRUCTIONS()::
+
+
+{
+ICLASS    : RDFSBASE
+CPL       : 3
+CATEGORY  : RDWRFSGS
+EXTENSION : RDWRFSGS
+
+PATTERN   : 0x0F 0xAE MOD[0b11] MOD=3 REG[0b000] RM[nnn] mode64 f3_refining_prefix
+OPERANDS  : REG0=GPRy_B():w  REG1=XED_REG_FSBASE:r:SUPP:y
+
+}
+{
+ICLASS    : RDGSBASE
+CPL       : 3
+CATEGORY  : RDWRFSGS
+EXTENSION : RDWRFSGS
+
+PATTERN   : 0x0F 0xAE MOD[0b11] MOD=3 REG[0b001] RM[nnn] mode64 f3_refining_prefix
+OPERANDS  : REG0=GPRy_B():w  REG1=XED_REG_GSBASE:r:SUPP:y
+
+}
+
+
+
+{
+ICLASS    : WRFSBASE
+CPL       : 3
+CATEGORY  : RDWRFSGS
+EXTENSION : RDWRFSGS
+ATTRIBUTES: NOTSX
+
+PATTERN   : 0x0F 0xAE MOD[0b11] MOD=3 REG[0b010] RM[nnn] mode64 f3_refining_prefix
+OPERANDS  :   REG0=GPRy_B():r   REG1=XED_REG_FSBASE:w:SUPP:y
+
+}
+{
+ICLASS    : WRGSBASE
+CPL       : 3
+CATEGORY  : RDWRFSGS
+EXTENSION : RDWRFSGS
+ATTRIBUTES: NOTSX
+
+PATTERN   : 0x0F 0xAE MOD[0b11] MOD=3 REG[0b011] RM[nnn] mode64 f3_refining_prefix
+OPERANDS  :   REG0=GPRy_B():r   REG1=XED_REG_GSBASE:w:SUPP:y
+
+}
+
+
+###FILE: ./datafiles/xsaves/xsaves-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+INSTRUCTIONS()::
+
+{
+ICLASS    : XSAVES
+CPL       : 0
+CATEGORY  : XSAVE
+EXTENSION : XSAVES
+COMMENT   : variable length load and conditianal reg write
+ATTRIBUTES : xmm_state_r REQUIRES_ALIGNMENT x87_mmx_state_r NOTSX SPECIAL_AGEN_REQUIRED
+PATTERN   : 0x0F 0xC7 MOD[mm]  MOD!=3 REG[0b101] RM[nnn] MODRM() norexw_prefix no_refining_prefix
+OPERANDS  : MEM0:w:mxsave REG0=XED_REG_EDX:r:SUPP REG1=XED_REG_EAX:r:SUPP REG2=XED_REG_XCR0:r:SUPP
+}
+
+
+{
+ICLASS    : XSAVES64
+CPL       : 0
+CATEGORY  : XSAVE
+EXTENSION : XSAVES
+COMMENT   : variable length load and conditianal reg write
+ATTRIBUTES : xmm_state_r REQUIRES_ALIGNMENT x87_mmx_state_r NOTSX SPECIAL_AGEN_REQUIRED
+PATTERN   : 0x0F 0xC7 MOD[mm]  MOD!=3 REG[0b101] RM[nnn] MODRM() rexw_prefix no_refining_prefix
+OPERANDS  : MEM0:w:mxsave REG0=XED_REG_EDX:r:SUPP REG1=XED_REG_EAX:r:SUPP REG2=XED_REG_XCR0:r:SUPP
+}
+
+
+
+
+
+{
+ICLASS    : XRSTORS
+CPL       : 0
+CATEGORY  : XSAVE
+EXTENSION : XSAVES
+COMMENT   : variable length load and conditianal reg write
+ATTRIBUTES : xmm_state_w REQUIRES_ALIGNMENT x87_mmx_state_w NOTSX SPECIAL_AGEN_REQUIRED
+PATTERN   : 0x0F 0xC7 MOD[mm]  MOD!=3 REG[0b011] RM[nnn] MODRM() norexw_prefix no_refining_prefix
+OPERANDS  : MEM0:r:mxsave REG0=XED_REG_EDX:r:SUPP REG1=XED_REG_EAX:r:SUPP REG2=XED_REG_XCR0:r:SUPP
+}
+
+
+{
+ICLASS    : XRSTORS64
+CPL       : 0
+CATEGORY  : XSAVE
+EXTENSION : XSAVES
+COMMENT   : variable length load and conditianal reg write
+ATTRIBUTES : xmm_state_w REQUIRES_ALIGNMENT x87_mmx_state_w NOTSX SPECIAL_AGEN_REQUIRED
+PATTERN   : 0x0F 0xC7 MOD[mm]  MOD!=3 REG[0b011] RM[nnn] MODRM() rexw_prefix no_refining_prefix
+OPERANDS  : MEM0:r:mxsave REG0=XED_REG_EDX:r:SUPP REG1=XED_REG_EAX:r:SUPP REG2=XED_REG_XCR0:r:SUPP
+}
+
+
+
+###FILE: ./datafiles/xsavec/xsavec-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+INSTRUCTIONS()::
+
+{
+ICLASS    : XSAVEC
+CPL       : 3
+CATEGORY  : XSAVE
+EXTENSION : XSAVEC
+COMMENT   : variable length store
+ATTRIBUTES : xmm_state_r REQUIRES_ALIGNMENT x87_mmx_state_r NOTSX SPECIAL_AGEN_REQUIRED
+PATTERN   : 0x0F 0xC7 MOD[mm]  MOD!=3 REG[0b100] RM[nnn] MODRM() norexw_prefix no_refining_prefix
+OPERANDS  : MEM0:w:mxsave REG0=XED_REG_EDX:r:SUPP REG1=XED_REG_EAX:r:SUPP REG2=XED_REG_XCR0:r:SUPP
+}
+
+
+
+{
+ICLASS    : XSAVEC64
+CPL       : 3
+CATEGORY  : XSAVE
+EXTENSION : XSAVEC
+COMMENT   : variable length store
+ATTRIBUTES : xmm_state_r REQUIRES_ALIGNMENT x87_mmx_state_r  NOTSX SPECIAL_AGEN_REQUIRED
+PATTERN   : 0x0F 0xC7 MOD[mm]  MOD!=3 REG[0b100] RM[nnn] MODRM() rexw_prefix no_refining_prefix
+OPERANDS  : MEM0:w:mxsave REG0=XED_REG_EDX:r:SUPP REG1=XED_REG_EAX:r:SUPP REG2=XED_REG_XCR0:r:SUPP
+}
+
+
+
+
+###FILE: ./datafiles/avx/avx-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+
+# The neat thing is we can just end a nonterminal by starting a new one.
+
+AVX_INSTRUCTIONS()::
+{
+ICLASS    : VADDPD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: MXCSR
+PATTERN : VV1 0x58  V66 VL128 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN : VV1 0x58  V66 VL128 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+PATTERN : VV1 0x58  V66 VL256 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN : VV1 0x58  V66 VL256 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+
+
+{
+ICLASS    : VADDPS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: MXCSR
+PATTERN : VV1 0x58  VNP VL128 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN : VV1 0x58  VNP VL128 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x58  VNP VL256 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN : VV1 0x58  VNP VL256 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+
+
+{
+ICLASS    : VADDSD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+ATTRIBUTES : simd_scalar MXCSR
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x58  VF2  V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:q:f64
+
+PATTERN : VV1 0x58  VF2  V0F  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:q:f64
+}
+
+{
+ICLASS    : VADDSS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+ATTRIBUTES : simd_scalar MXCSR
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x58  VF3  V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:d:f32
+
+PATTERN : VV1 0x58  VF3  V0F  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:d:f32
+}
+
+
+{
+ICLASS    : VADDSUBPD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0xD0  VL128 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN : VV1 0xD0  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+PATTERN : VV1 0xD0  VL256 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN : VV1 0xD0  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+
+{
+ICLASS    : VADDSUBPS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0xD0  VL128 VF2 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN : VV1 0xD0  VL128 VF2 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+PATTERN : VV1 0xD0  VL256 VF2 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN : VV1 0xD0  VL256 VF2 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+
+
+{
+ICLASS    : VANDPD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : LOGICAL_FP
+EXTENSION : AVX
+PATTERN : VV1 0x54  VL128 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 MEM0:r:dq:u64
+
+PATTERN : VV1 0x54  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 REG2=XMM_B():r:dq:u64
+
+PATTERN : VV1 0x54  VL256 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 MEM0:r:qq:u64
+
+PATTERN : VV1 0x54  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 REG2=YMM_B():r:qq:u64
+}
+
+
+
+{
+ICLASS    : VANDPS
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : LOGICAL_FP
+EXTENSION : AVX
+PATTERN : VV1 0x54  VL128 VNP V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq MEM0:r:dq
+
+PATTERN : VV1 0x54  VL128 VNP V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq REG2=XMM_B():r:dq
+
+PATTERN : VV1 0x54  VL256 VNP V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq REG1=YMM_N():r:qq MEM0:r:qq
+
+PATTERN : VV1 0x54  VL256 VNP V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq REG1=YMM_N():r:qq REG2=YMM_B():r:qq
+}
+
+
+{
+ICLASS    : VANDNPD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : LOGICAL_FP
+EXTENSION : AVX
+PATTERN : VV1 0x55  VL128 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 MEM0:r:dq:u64
+
+PATTERN : VV1 0x55  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 REG2=XMM_B():r:dq:u64
+
+PATTERN : VV1 0x55  VL256 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 MEM0:r:qq:u64
+
+PATTERN : VV1 0x55  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 REG2=YMM_B():r:qq:u64
+}
+
+
+
+{
+ICLASS    : VANDNPS
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : LOGICAL_FP
+EXTENSION : AVX
+PATTERN : VV1 0x55  VL128 VNP V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq MEM0:r:dq
+
+PATTERN : VV1 0x55  VL128 VNP V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq REG2=XMM_B():r:dq
+
+PATTERN : VV1 0x55  VL256 VNP V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq REG1=YMM_N():r:qq MEM0:r:qq
+
+PATTERN : VV1 0x55  VL256 VNP V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq REG1=YMM_N():r:qq REG2=YMM_B():r:qq
+}
+
+
+
+{
+ICLASS    : VBLENDPD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x0D  VL128 V66 V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64 IMM0:r:b
+
+PATTERN : VV1 0x0D  VL128 V66 V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64 IMM0:r:b
+
+PATTERN : VV1 0x0D  VL256 V66 V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64 IMM0:r:b
+
+PATTERN : VV1 0x0D  VL256 V66 V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64 IMM0:r:b
+}
+
+
+{
+ICLASS    : VBLENDPS
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x0C  VL128 V66 V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32 IMM0:r:b
+
+PATTERN : VV1 0x0C  VL128 V66 V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32 IMM0:r:b
+
+PATTERN : VV1 0x0C  VL256 V66 V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32 IMM0:r:b
+
+PATTERN : VV1 0x0C  VL256 V66 V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32 IMM0:r:b
+}
+
+
+
+
+
+
+{
+ICLASS    : VCMPPD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0xC2  V66 VL128 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64 IMM0:r:b
+
+PATTERN : VV1 0xC2  V66 VL128 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64 IMM0:r:b
+
+PATTERN : VV1 0xC2  V66 VL256 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64 IMM0:r:b
+
+PATTERN : VV1 0xC2  V66 VL256 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64 IMM0:r:b
+}
+
+
+
+{
+ICLASS    : VCMPPS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0xC2  VNP VL128 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32 IMM0:r:b
+
+PATTERN : VV1 0xC2  VNP VL128 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32 IMM0:r:b
+
+PATTERN : VV1 0xC2  VNP VL256 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32 IMM0:r:b
+
+PATTERN : VV1 0xC2  VNP VL256 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32 IMM0:r:b
+}
+
+
+
+{
+ICLASS    : VCMPSD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+PATTERN : VV1 0xC2   VF2 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:q:f64 IMM0:r:b
+
+PATTERN : VV1 0xC2   VF2 V0F  MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:q:f64 IMM0:r:b
+}
+
+
+
+{
+ICLASS    : VCMPSS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+
+ATTRIBUTES : simd_scalar MXCSR
+
+PATTERN : VV1 0xC2   VF3 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:d:f32 IMM0:r:b
+
+PATTERN : VV1 0xC2   VF3 V0F  MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:d:f32 IMM0:r:b
+}
+
+
+{
+ICLASS    : VCOMISD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+
+FLAGS     : MUST [ zf-mod pf-mod cf-mod of-0 af-0 sf-0 ]
+PATTERN : VV1 0x2F   V66 V0F  NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():r:q:f64 MEM0:r:q:f64
+
+PATTERN : VV1 0x2F   V66 V0F  NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():r:q:f64 REG1=XMM_B():r:q:f64
+}
+
+{
+ICLASS    : VCOMISS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+
+FLAGS     : MUST [ zf-mod pf-mod cf-mod of-0 af-0 sf-0 ]
+PATTERN : VV1 0x2F   VNP V0F  NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():r:d:f32 MEM0:r:d:f32
+
+PATTERN : VV1 0x2F   VNP V0F  NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():r:d:f32 REG1=XMM_B():r:d:f32
+}
+
+
+{
+ICLASS    : VCVTDQ2PD
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0xE6  VL128 VF3 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 MEM0:r:q:i32
+
+PATTERN : VV1 0xE6  VL128 VF3 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_B():r:q:i32
+
+PATTERN : VV1 0xE6  VL256 VF3 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64 MEM0:r:dq:i32
+
+PATTERN : VV1 0xE6  VL256 VF3 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=XMM_B():r:dq:i32
+}
+
+{
+ICLASS    : VCVTDQ2PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0x5B  VL128 VNP V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 MEM0:r:dq:i32
+
+PATTERN : VV1 0x5B  VL128 VNP V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_B():r:dq:i32
+
+PATTERN : VV1 0x5B  VL256 VNP V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32 MEM0:r:qq:i32
+
+PATTERN : VV1 0x5B  VL256 VNP V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_B():r:qq:i32
+}
+
+{
+ICLASS    : VCVTPD2DQ
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0xE6  VL128 VF2 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 MEM0:r:dq:f64
+
+PATTERN : VV1 0xE6  VL128 VF2 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_B():r:dq:f64
+
+PATTERN : VV1 0xE6  VL256 VF2 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 MEM0:r:qq:f64
+
+PATTERN : VV1 0xE6  VL256 VF2 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=YMM_B():r:qq:f64
+}
+
+
+{
+ICLASS    : VCVTTPD2DQ
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0xE6  VL128 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 MEM0:r:dq:f64
+
+PATTERN : VV1 0xE6  VL128 V66 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_B():r:dq:f64
+
+PATTERN : VV1 0xE6  VL256 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 MEM0:r:qq:f64
+
+PATTERN : VV1 0xE6  VL256 V66 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=YMM_B():r:qq:f64
+}
+
+
+{
+ICLASS    : VCVTPD2PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0x5A  V66 VL128 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 MEM0:r:dq:f64
+
+PATTERN : VV1 0x5A  V66 VL128 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_B():r:dq:f64
+
+PATTERN : VV1 0x5A  V66 VL256 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 MEM0:r:qq:f64
+
+PATTERN : VV1 0x5A  V66 VL256 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=YMM_B():r:qq:f64
+}
+
+{
+ICLASS    : VCVTPS2DQ
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0x5B  VL128 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 MEM0:r:dq:f32
+
+PATTERN : VV1 0x5B  VL128 V66 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x5B  VL256 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i32 MEM0:r:qq:f32
+
+PATTERN : VV1 0x5B  VL256 V66 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_B():r:qq:f32
+}
+
+{
+ICLASS    : VCVTTPS2DQ
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0x5B  VL128 VF3 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 MEM0:r:dq:f32
+
+PATTERN : VV1 0x5B  VL128 VF3 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x5B  VL256 VF3 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i32 MEM0:r:qq:f32
+
+PATTERN : VV1 0x5B  VL256 VF3 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_B():r:qq:f32
+}
+
+{
+ICLASS    : VCVTPS2PD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0x5A  VNP VL128 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 MEM0:r:q:f32
+
+PATTERN : VV1 0x5A  VNP VL128 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_B():r:q:f32
+
+PATTERN : VV1 0x5A  VNP VL256 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64 MEM0:r:dq:f32
+
+PATTERN : VV1 0x5A  VNP VL256 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=XMM_B():r:dq:f32
+}
+
+
+
+
+{
+ICLASS    : VCVTSD2SI
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+COMMENT   : SNB/IVB/HSW require VEX.L=128. Later processors are LIG
+
+PATTERN : VV1 0x2D   VF2 V0F  NOVSR not64 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPR32_R():w:d:i32 MEM0:r:q:f64
+
+PATTERN : VV1 0x2D   VF2 V0F  NOVSR not64 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR32_R():w:d:i32 REG1=XMM_B():r:q:f64
+
+
+PATTERN : VV1 0x2D   VF2 V0F  NOVSR mode64 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPR32_R():w:d:i32 MEM0:r:q:f64
+
+PATTERN : VV1 0x2D   VF2 V0F  NOVSR mode64 norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR32_R():w:d:i32 REG1=XMM_B():r:q:f64
+
+
+
+PATTERN : VV1 0x2D   VF2 V0F  NOVSR mode64 rexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPR64_R():w:q:i64 MEM0:r:q:f64
+
+PATTERN : VV1 0x2D   VF2 V0F  NOVSR mode64 rexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR64_R():w:q:i64 REG1=XMM_B():r:q:f64
+}
+
+{
+ICLASS    : VCVTTSD2SI
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+COMMENT   : SNB/IVB/HSW require VEX.L=128. Later processors are LIG
+
+
+PATTERN : VV1 0x2C   VF2 V0F  NOVSR not64 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPR32_R():w:d:i32 MEM0:r:q:f64
+
+PATTERN : VV1 0x2C   VF2 V0F  NOVSR not64 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR32_R():w:d:i32 REG1=XMM_B():r:q:f64
+
+
+
+PATTERN : VV1 0x2C   VF2 V0F  NOVSR mode64 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPR32_R():w:d:i32 MEM0:r:q:f64
+
+PATTERN : VV1 0x2C   VF2 V0F  NOVSR mode64 norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR32_R():w:d:i32 REG1=XMM_B():r:q:f64
+
+
+
+PATTERN : VV1 0x2C   VF2 V0F  NOVSR mode64 rexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPR64_R():w:q:i64 MEM0:r:q:f64
+
+PATTERN : VV1 0x2C   VF2 V0F  NOVSR mode64 rexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR64_R():w:q:i64 REG1=XMM_B():r:q:f64
+}
+
+
+
+
+{
+ICLASS    : VCVTSS2SI
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+COMMENT   : SNB/IVB/HSW require VEX.L=128. Later processors are LIG
+
+PATTERN : VV1 0x2D   VF3 V0F  NOVSR not64 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPR32_R():w:d:i32 MEM0:r:d:f32
+
+PATTERN : VV1 0x2D   VF3 V0F  NOVSR not64 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR32_R():w:d:i32 REG1=XMM_B():r:d:f32
+
+
+
+PATTERN : VV1 0x2D   VF3 V0F  NOVSR mode64 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPR32_R():w:d:i32 MEM0:r:d:f32
+
+PATTERN : VV1 0x2D   VF3 V0F  NOVSR mode64 norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR32_R():w:d:i32 REG1=XMM_B():r:d:f32
+
+
+PATTERN : VV1 0x2D   VF3 V0F  NOVSR mode64 rexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPR64_R():w:q:i64 MEM0:r:d:f32
+
+PATTERN : VV1 0x2D   VF3 V0F  NOVSR mode64 rexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR64_R():w:q:i64 REG1=XMM_B():r:d:f32
+}
+
+{
+ICLASS    : VCVTTSS2SI
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+COMMENT   : SNB/IVB/HSW require VEX.L=128. Later processors are LIG
+
+PATTERN : VV1 0x2C   VF3 V0F  NOVSR not64 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPR32_R():w:d:i32 MEM0:r:d:f32
+
+PATTERN : VV1 0x2C   VF3 V0F  NOVSR not64 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR32_R():w:d:i32 REG1=XMM_B():r:d:f32
+
+
+
+PATTERN : VV1 0x2C   VF3 V0F  NOVSR mode64 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPR32_R():w:d:i32 MEM0:r:d:f32
+
+PATTERN : VV1 0x2C   VF3 V0F  NOVSR mode64 norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR32_R():w:d:i32 REG1=XMM_B():r:d:f32
+
+
+
+
+PATTERN : VV1 0x2C   VF3 V0F  NOVSR mode64 rexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPR64_R():w:q:i64 MEM0:r:d:f32
+
+PATTERN : VV1 0x2C   VF3 V0F  NOVSR mode64 rexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR64_R():w:q:i64 REG1=XMM_B():r:d:f32
+}
+
+
+
+
+{
+ICLASS    : VCVTSD2SS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+
+PATTERN : VV1 0x5A  VF2 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:q:f64
+
+PATTERN : VV1 0x5A  VF2 V0F  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:q:f64
+
+}
+
+
+{
+ICLASS    : VCVTSI2SD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+
+PATTERN : VV1 0x2A  VF2 V0F not64 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:d:i32
+
+PATTERN : VV1 0x2A  VF2 V0F not64 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=GPR32_B():r:d:i32
+
+
+
+PATTERN : VV1 0x2A  VF2 V0F mode64 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:d:i32
+
+PATTERN : VV1 0x2A  VF2 V0F mode64 norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=GPR32_B():r:d:i32
+
+
+
+PATTERN : VV1 0x2A  VF2 V0F mode64 rexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:q:i64
+
+PATTERN : VV1 0x2A  VF2 V0F mode64 rexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=GPR64_B():r:q:i64
+}
+
+
+{
+ICLASS    : VCVTSI2SS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+
+PATTERN : VV1 0x2A   VF3 V0F not64 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:d:i32
+
+PATTERN : VV1 0x2A   VF3 V0F not64 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=GPR32_B():r:d:i32
+
+
+
+PATTERN : VV1 0x2A   VF3 V0F mode64 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:d:i32
+
+PATTERN : VV1 0x2A   VF3 V0F mode64 norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=GPR32_B():r:d:i32
+
+
+
+PATTERN : VV1 0x2A   VF3 V0F mode64 rexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:q:i64
+
+PATTERN : VV1 0x2A   VF3 V0F mode64 rexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=GPR64_B():r:q:i64
+}
+
+
+{
+ICLASS    : VCVTSS2SD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+
+PATTERN : VV1 0x5A  VF3 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:d:f32
+
+PATTERN : VV1 0x5A  VF3 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:d:f32
+}
+
+
+{
+ICLASS    : VDIVPD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0x5E  V66 V0F VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN : VV1 0x5E  V66 V0F VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+PATTERN : VV1 0x5E  V66 V0F VL256 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN : VV1 0x5E  V66 V0F VL256 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+
+
+{
+ICLASS    : VDIVPS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0x5E  VNP V0F VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN : VV1 0x5E  VNP V0F VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x5E  VNP V0F VL256 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN : VV1 0x5E  VNP V0F VL256 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+
+
+
+{
+ICLASS    : VDIVSD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+
+PATTERN : VV1 0x5E  VF2 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:q:f64
+
+PATTERN : VV1 0x5E  VF2 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:q:f64
+}
+
+{
+ICLASS    : VDIVSS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+
+PATTERN : VV1 0x5E  VF3 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:d:f32
+
+PATTERN : VV1 0x5E  VF3 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:d:f32
+}
+
+
+{
+ICLASS    : VEXTRACTF128
+EXCEPTIONS: avx-type-6
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x19  norexw_prefix VL256 V66 V0F3A NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : MEM0:w:dq:f64 REG0=YMM_R():r:dq:f64  IMM0:r:b
+
+PATTERN : VV1 0x19  norexw_prefix VL256 V66 V0F3A NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_B():w:dq:f64 REG1=YMM_R():r:dq:f64 IMM0:r:b
+}
+
+
+
+{
+ICLASS    : VDPPD
+EXCEPTIONS: avx-type-2D
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0x41  VL128 V66 V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64 IMM0:r:b
+
+PATTERN : VV1 0x41  VL128 V66 V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64 IMM0:r:b
+}
+
+{
+ICLASS    : VDPPS
+EXCEPTIONS: avx-type-2D
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0x40  VL128 V66 V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32 IMM0:r:b
+
+PATTERN : VV1 0x40  VL128 V66 V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32 IMM0:r:b
+
+PATTERN : VV1 0x40  VL256 V66 V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32 IMM0:r:b
+
+PATTERN : VV1 0x40  VL256 V66 V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32 IMM0:r:b
+}
+
+
+{
+ICLASS    : VEXTRACTPS
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x17  VL128 V66 V0F3A  NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : MEM0:w:d:f32  REG0=XMM_R():r:dq:f32  IMM0:r:b
+
+PATTERN : VV1 0x17  VL128 V66 V0F3A  NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=GPR32_B():w  REG1=XMM_R():r:dq:f32  IMM0:r:b
+}
+
+
+{
+ICLASS    : VZEROALL
+EXCEPTIONS: avx-type-8
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : xmm_state_w
+
+PATTERN : VV1 0x77 VNP  V0F VL256  NOVSR
+OPERANDS:
+
+}
+
+# FIXME: how to denote partial upper clobber!
+{
+ICLASS    : VZEROUPPER
+EXCEPTIONS: avx-type-8
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : xmm_state_w NOTSX  # FIXME: should be ymm_state_w?
+
+PATTERN : VV1 0x77 VNP  V0F VL128 NOVSR
+OPERANDS:
+}
+
+
+{
+ICLASS    : VHADDPD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0x7C  VL128 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN : VV1 0x7C  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+PATTERN : VV1 0x7C  VL256 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN : VV1 0x7C  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+
+
+{
+ICLASS    : VHADDPS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0x7C  VL128 VF2 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN : VV1 0x7C  VL128 VF2 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x7C  VL256 VF2 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN : VV1 0x7C  VL256 VF2 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+
+
+{
+ICLASS    : VHSUBPD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0x7D  VL128 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN : VV1 0x7D  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+PATTERN : VV1 0x7D  VL256 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN : VV1 0x7D  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+
+
+{
+ICLASS    : VHSUBPS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0x7D  VL128 VF2 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN : VV1 0x7D  VL128 VF2 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x7D  VL256 VF2 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN : VV1 0x7D  VL256 VF2 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+
+
+
+{
+ICLASS    : VPERMILPD
+EXCEPTIONS: avx-type-6
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+# 2008-02-01 moved norexw_prefix to after V0F38 to avoid graph build conflict with VBLENDPD
+PATTERN : VV1 0x0D VL128 V66 V0F38 norexw_prefix  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:u64
+
+PATTERN : VV1 0x0D  VL128 V66 V0F38 norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:u64
+
+PATTERN : VV1 0x0D  VL256 V66 V0F38 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:u64
+
+PATTERN : VV1 0x0D  VL256 V66 V0F38 norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:u64
+
+########################################
+# IMMEDIATE FORM
+########################################
+
+# 2008-02-01 moved norexw_prefix to after V0F3A to avoid a graph build conflict with VPHSUBW
+PATTERN : VV1 0x05  VL128 V66 V0F3A norexw_prefix NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 MEM0:r:dq:f64 IMM0:r:b
+
+PATTERN : VV1 0x05  VL128 V66 V0F3A norexw_prefix NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_B():r:dq:f64 IMM0:r:b
+
+PATTERN : VV1 0x05  VL256 V66 V0F3A norexw_prefix NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 MEM0:r:qq:f64 IMM0:r:b
+
+PATTERN : VV1 0x05  VL256 V66 V0F3A norexw_prefix NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_B():r:qq:f64 IMM0:r:b
+}
+
+
+{
+ICLASS    : VPERMILPS
+EXCEPTIONS: avx-type-6
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+# moved norexw_prefix to after V0F38 to avoid graph build conflict with VBLENDPS
+PATTERN : VV1 0x0C VL128 V66 V0F38 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:u32
+
+PATTERN : VV1 0x0C  VL128 V66 V0F38 norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:u32
+
+PATTERN : VV1 0x0C  VL256 V66 V0F38  norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:u32
+
+PATTERN : VV1 0x0C  VL256 V66 V0F38  norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:u32
+
+########################################
+# IMMEDIATE FORM
+########################################
+
+# 2008-02-01: moved norexw_prefix after V0F3A due to graph-build collision with VPMADDUBSW
+PATTERN : VV1 0x04 VL128 V66 V0F3A norexw_prefix NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 MEM0:r:dq:f32 IMM0:r:b
+
+PATTERN : VV1 0x04 VL128 V66 V0F3A norexw_prefix NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_B():r:dq:f32 IMM0:r:b
+
+PATTERN : VV1 0x04 VL256 V66 V0F3A norexw_prefix NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32 MEM0:r:qq:f32 IMM0:r:b
+
+PATTERN : VV1 0x04 VL256 V66 V0F3A norexw_prefix NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_B():r:qq:f32 IMM0:r:b
+}
+
+
+{
+ICLASS    : VPERM2F128
+EXCEPTIONS: avx-type-6
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+
+# 2008-02-01 moved norexw_prefix to after V0F3A to avoid conflict with VPHSUBD
+PATTERN : VV1 0x06 VL256 V66 V0F3A norexw_prefix  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64 IMM0:r:b
+
+PATTERN : VV1 0x06 VL256 V66 V0F3A norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64 IMM0:r:b
+}
+
+
+
+{
+ICLASS    : VBROADCASTSS
+EXCEPTIONS: avx-type-6
+CPL       : 3
+CATEGORY  : BROADCAST
+EXTENSION : AVX
+PATTERN : VV1 0x18  norexw_prefix VL128 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 MEM0:r:d:f32 EMX_BROADCAST_1TO4_32
+
+PATTERN : VV1 0x18  norexw_prefix VL256 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32 MEM0:r:d:f32 EMX_BROADCAST_1TO8_32
+}
+{
+ICLASS    : VBROADCASTSD
+EXCEPTIONS: avx-type-6
+CPL       : 3
+CATEGORY  : BROADCAST
+EXTENSION : AVX
+PATTERN : VV1 0x19  norexw_prefix VL256 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64 MEM0:r:q:f64 EMX_BROADCAST_1TO4_64
+}
+
+{
+ICLASS    : VBROADCASTF128
+EXCEPTIONS: avx-type-6
+CPL       : 3
+CATEGORY  : BROADCAST
+EXTENSION : AVX
+COMMENT : There is no F128 type. I just set these to f64 for lack of anything better.
+PATTERN : VV1 0x1A norexw_prefix VL256 V66 V0F38 NOVSR  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64 MEM0:r:dq:f64 EMX_BROADCAST_2TO4_64
+}
+
+
+{
+ICLASS    : VINSERTF128
+EXCEPTIONS: avx-type-6
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x18  norexw_prefix VL256 V66 V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:dq:f64 IMM0:r:b EMX_BROADCAST_2TO4_64
+
+PATTERN : VV1 0x18  norexw_prefix  VL256 V66 V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=XMM_B():r:dq:f64 IMM0:r:b EMX_BROADCAST_2TO4_64
+}
+
+{
+ICLASS    : VINSERTPS
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x21  VL128 V66 V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:d:f32 IMM0:r:b
+
+PATTERN : VV1 0x21  VL128 V66 V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32 IMM0:r:b
+}
+
+
+
+
+
+{
+ICLASS    : VLDDQU
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xF0  VL128 VF2 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq  MEM0:r:dq
+
+PATTERN : VV1 0xF0  VL256 VF2 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq MEM0:r:qq
+}
+
+
+
+
+
+
+{
+ICLASS    : VMASKMOVPS
+EXCEPTIONS: avx-type-6
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : maskop NONTEMPORAL
+# load  forms
+PATTERN : VV1 0x2C V66 VL128 V0F38 norexw_prefix  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32   REG1=XMM_N():r:dq MEM0:r:dq:f32
+
+PATTERN : VV1 0x2C V66 VL256 V0F38    norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32   REG1=YMM_N():r:qq MEM0:r:qq:f32
+
+# store forms
+PATTERN : VV1 0x2E V66 V0F38 VL128  norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:dq:f32  REG0=XMM_N():r:dq   REG1=XMM_R():r:dq:f32
+
+PATTERN : VV1 0x2E V66 V0F38 VL256 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:qq:f32   REG0=YMM_N():r:qq  REG1=YMM_R():r:qq:f32
+}
+
+{
+ICLASS    : VMASKMOVPD
+EXCEPTIONS: avx-type-6
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : maskop
+# load forms
+PATTERN : VV1 0x2D  V66 VL128 V0F38  norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64   REG1=XMM_N():r:dq:u64 MEM0:r:dq:f64
+
+PATTERN : VV1 0x2D  V66 VL256 V0F38 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64   REG1=YMM_N():r:qq:u64 MEM0:r:qq:f64
+
+# store forms
+PATTERN : VV1 0x2F   V66 V0F38 VL128 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:dq:f64  REG0=XMM_N():r:dq:u64  REG1=XMM_R():r:dq:f64
+
+PATTERN : VV1 0x2F   V66 V0F38 VL256 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:qq:f64  REG0=YMM_N():r:qq:u64   REG1=YMM_R():r:qq:f64
+}
+
+{
+ICLASS    : VPTEST
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : LOGICAL
+EXTENSION : AVX
+FLAGS     : MUST [ zf-mod cf-mod ]
+PATTERN : VV1 0x17  VL128 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():r:dq MEM0:r:dq
+
+PATTERN : VV1 0x17  VL128 V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():r:dq REG1=XMM_B():r:dq
+
+PATTERN : VV1 0x17  VL256 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():r:qq MEM0:r:qq
+
+PATTERN : VV1 0x17  VL256 V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():r:qq REG1=YMM_B():r:qq
+}
+
+{
+ICLASS    : VTESTPS
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : LOGICAL_FP
+EXTENSION : AVX
+FLAGS     : MUST [ zf-mod cf-mod ]
+PATTERN : VV1 0x0E VL128 V66 V0F38 norexw_prefix  NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN : VV1 0x0E  VL128 V66 V0F38 norexw_prefix NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():r:dq:f32 REG1=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x0E VL256 V66 V0F38  norexw_prefix NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN : VV1 0x0E VL256 V66 V0F38 norexw_prefix NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():r:qq:f32 REG1=YMM_B():r:qq:f32
+}
+
+{
+ICLASS    : VTESTPD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : LOGICAL_FP
+EXTENSION : AVX
+FLAGS     : MUST [ zf-mod cf-mod ]
+PATTERN : VV1 0x0F  VL128 V66 V0F38 norexw_prefix NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN : VV1 0x0F VL128 V66 V0F38 norexw_prefix NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():r:dq:f64 REG1=XMM_B():r:dq:f64
+
+PATTERN : VV1 0x0F VL256 V66 V0F38  norexw_prefix NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN : VV1 0x0F VL256 V66 V0F38 norexw_prefix NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():r:qq:f64 REG1=YMM_B():r:qq:f64
+}
+
+
+{
+ICLASS    : VMAXPD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0x5F  V66 V0F VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN : VV1 0x5F  V66 V0F VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+PATTERN : VV1 0x5F  V66 V0F VL256 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN : VV1 0x5F  V66 V0F VL256 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+
+{
+ICLASS    : VMAXPS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0x5F  VNP V0F VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN : VV1 0x5F  VNP V0F VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x5F  VNP V0F VL256 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN : VV1 0x5F  VNP V0F VL256 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+
+
+
+{
+ICLASS    : VMAXSD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+
+PATTERN : VV1 0x5F  VF2 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:q:f64
+
+PATTERN : VV1 0x5F  VF2 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:q:f64
+}
+
+{
+ICLASS    : VMAXSS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+
+PATTERN : VV1 0x5F  VF3 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:d:f32
+
+PATTERN : VV1 0x5F  VF3 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:d:f32
+}
+
+{
+ICLASS    : VMINPD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0x5D  V66 V0F VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN : VV1 0x5D  V66 V0F VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+PATTERN : VV1 0x5D  V66 V0F VL256 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN : VV1 0x5D  V66 V0F VL256 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+
+{
+ICLASS    : VMINPS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES:  MXCSR
+PATTERN : VV1 0x5D  VNP V0F VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN : VV1 0x5D  VNP V0F VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x5D  VNP V0F VL256 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN : VV1 0x5D  VNP V0F VL256 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+
+
+
+{
+ICLASS    : VMINSD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+
+PATTERN : VV1 0x5D  VF2 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:q:f64
+
+PATTERN : VV1 0x5D  VF2 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:q:f64
+}
+
+{
+ICLASS    : VMINSS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+
+PATTERN : VV1 0x5D  VF3 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:d:f32
+
+PATTERN : VV1 0x5D  VF3 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:d:f32
+}
+
+
+{
+ICLASS    : VMOVAPD
+EXCEPTIONS: avx-type-1
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+ATTRIBUTES :  REQUIRES_ALIGNMENT
+
+# 128b load
+
+PATTERN : VV1 0x28  VL128 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64  MEM0:r:dq:f64
+
+PATTERN : VV1 0x28  VL128 V66 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64  REG1=XMM_B():r:dq:f64
+IFORM     : VMOVAPD_XMMdq_XMMdq_28
+
+# 128b store
+
+PATTERN : VV1 0x29  VL128 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:dq:f64 REG0=XMM_R():r:dq:f64
+
+PATTERN : VV1 0x29  VL128 V66 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  :  REG0=XMM_B():w:dq:f64 REG1=XMM_R():r:dq:f64
+IFORM     : VMOVAPD_XMMdq_XMMdq_29
+
+# 256b load
+
+PATTERN : VV1 0x28  VL256 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64  MEM0:r:qq:f64
+
+PATTERN : VV1 0x28  VL256 V66 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64  REG1=YMM_B():r:qq:f64
+IFORM     : VMOVAPD_YMMqq_YMMqq_28
+
+# 256b store
+
+PATTERN : VV1 0x29  VL256 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:qq:f64 REG0=YMM_R():r:qq:f64
+
+PATTERN : VV1 0x29  VL256 V66 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  :  REG0=YMM_B():w:qq:f64 REG1=YMM_R():r:qq:f64
+IFORM     : VMOVAPD_YMMqq_YMMqq_29
+}
+
+
+
+{
+ICLASS    : VMOVAPS
+EXCEPTIONS: avx-type-1
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+ATTRIBUTES :  REQUIRES_ALIGNMENT
+
+# 128b load
+
+PATTERN : VV1 0x28  VL128 VNP V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32  MEM0:r:dq:f32
+
+PATTERN : VV1 0x28  VL128 VNP V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32  REG1=XMM_B():r:dq:f32
+IFORM     : VMOVAPS_XMMdq_XMMdq_28
+# 128b store
+
+PATTERN : VV1 0x29  VL128 VNP V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:dq:f32 REG0=XMM_R():r:dq:f32
+
+PATTERN : VV1 0x29  VL128 VNP V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  :  REG0=XMM_B():w:dq:f32 REG1=XMM_R():r:dq:f32
+IFORM     : VMOVAPS_XMMdq_XMMdq_29
+
+# 256b load
+
+PATTERN : VV1 0x28  VL256 VNP V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32  MEM0:r:qq:f32
+
+PATTERN : VV1 0x28  VL256 VNP V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32  REG1=YMM_B():r:qq:f32
+IFORM     : VMOVAPS_YMMqq_YMMqq_28
+
+# 256b store
+
+PATTERN : VV1 0x29  VL256 VNP V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:qq:f32 REG0=YMM_R():r:qq:f32
+
+PATTERN : VV1 0x29  VL256 VNP V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  :  REG0=YMM_B():w:qq:f32 REG1=YMM_R():r:qq:f32
+IFORM     : VMOVAPS_YMMqq_YMMqq_29
+}
+
+
+
+{
+ICLASS    : VMOVD
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+
+# 32b load
+PATTERN : VV1 0x6E  VL128 V66 V0F not64 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq  MEM0:r:d
+
+PATTERN : VV1 0x6E  VL128 V66 V0F not64  NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq  REG1=GPR32_B():r:d
+
+# 32b store
+PATTERN : VV1 0x7E  VL128 V66 V0F not64  NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:d           REG0=XMM_R():r:d
+
+PATTERN : VV1 0x7E  VL128 V66 V0F not64  NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR32_B():w:d REG1=XMM_R():r:d
+
+
+
+# 32b load
+PATTERN : VV1 0x6E  VL128 V66 V0F mode64 norexw_prefix NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq  MEM0:r:d
+
+PATTERN : VV1 0x6E  VL128 V66 V0F mode64 norexw_prefix NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq  REG1=GPR32_B():r:d
+
+# 32b store
+PATTERN : VV1 0x7E  VL128 V66 V0F mode64 norexw_prefix NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:d           REG0=XMM_R():r:d
+
+PATTERN : VV1 0x7E  VL128 V66 V0F mode64 norexw_prefix NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR32_B():w:d REG1=XMM_R():r:d
+
+
+}
+
+{
+ICLASS    : VMOVQ
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+
+# 64b load
+PATTERN : VV1 0x6E  VL128 V66 V0F mode64 rexw_prefix NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq  MEM0:r:q
+IFORM     : VMOVQ_XMMdq_MEMq_6E
+
+PATTERN : VV1 0x6E  VL128 V66 V0F mode64 rexw_prefix NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq  REG1=GPR64_B():r:q
+
+# 64b store
+PATTERN : VV1 0x7E  VL128 V66 V0F mode64 rexw_prefix NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:q           REG0=XMM_R():r:q
+IFORM     : VMOVQ_MEMq_XMMq_7E
+
+PATTERN : VV1 0x7E  VL128 V66 V0F mode64 rexw_prefix NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR64_B():w:q REG1=XMM_R():r:q
+
+
+# 2nd page of MOVQ forms
+PATTERN : VV1 0x7E  VL128 VF3 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq   MEM0:r:q
+IFORM     : VMOVQ_XMMdq_MEMq_7E
+
+PATTERN : VV1 0x7E  VL128 VF3 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq   REG1=XMM_B():r:q
+IFORM     : VMOVQ_XMMdq_XMMq_7E
+
+PATTERN : VV1 0xD6  VL128 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:q   REG0=XMM_R():r:q
+IFORM     : VMOVQ_MEMq_XMMq_D6
+
+PATTERN : VV1 0xD6  VL128 V66 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_B():w:dq  REG1=XMM_R():r:q
+IFORM     : VMOVQ_XMMdq_XMMq_D6
+
+}
+
+
+
+
+{
+ICLASS    : VMOVDDUP
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+
+PATTERN : VV1 0x12  VL128 VF2 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64  MEM0:r:q:f64
+
+PATTERN : VV1 0x12  VL128 VF2 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64  REG1=XMM_B():r:dq:f64
+
+
+PATTERN : VV1 0x12  VL256 VF2 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64  MEM0:r:qq:f64
+
+PATTERN : VV1 0x12  VL256 VF2 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64  REG1=YMM_B():r:qq:f64
+}
+
+
+
+{
+ICLASS    : VMOVDQA
+EXCEPTIONS: avx-type-1
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+ATTRIBUTES :  REQUIRES_ALIGNMENT
+
+# LOAD XMM
+
+PATTERN : VV1 0x6F  VL128 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq  MEM0:r:dq
+
+PATTERN : VV1 0x6F  VL128 V66 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq  REG1=XMM_B():r:dq
+IFORM     : VMOVDQA_XMMdq_XMMdq_6F
+
+# STORE XMM
+
+PATTERN : VV1 0x7F  VL128 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:dq REG0=XMM_R():r:dq
+
+PATTERN : VV1 0x7F  VL128 V66 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_B():w:dq REG1=XMM_R():r:dq
+IFORM     : VMOVDQA_XMMdq_XMMdq_7F
+
+# LOAD YMM
+
+PATTERN : VV1 0x6F  VL256 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq  MEM0:r:qq
+
+PATTERN : VV1 0x6F  VL256 V66 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq  REG1=YMM_B():r:qq
+IFORM     : VMOVDQA_YMMqq_YMMqq_6F
+
+
+# STORE YMM
+
+PATTERN : VV1 0x7F  VL256 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:qq REG0=YMM_R():r:qq
+
+PATTERN : VV1 0x7F  VL256 V66 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_B():w:qq REG1=YMM_R():r:qq
+IFORM     : VMOVDQA_YMMqq_YMMqq_7F
+}
+
+
+{
+ICLASS    : VMOVDQU
+EXCEPTIONS: avx-type-4M
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+
+# LOAD XMM
+
+PATTERN : VV1 0x6F  VL128 VF3 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq  MEM0:r:dq
+
+PATTERN : VV1 0x6F  VL128 VF3 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq  REG1=XMM_B():r:dq
+IFORM     : VMOVDQU_XMMdq_XMMdq_6F
+
+# LOAD YMM
+
+PATTERN : VV1 0x6F  VL256 VF3 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq  MEM0:r:qq
+
+PATTERN : VV1 0x6F  VL256 VF3 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq  REG1=YMM_B():r:qq
+IFORM     : VMOVDQU_YMMqq_YMMqq_6F
+
+# STORE XMM
+
+PATTERN : VV1 0x7F  VL128 VF3 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:dq REG0=XMM_R():r:dq
+
+PATTERN : VV1 0x7F  VL128 VF3 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_B():w:dq REG1=XMM_R():r:dq
+IFORM     : VMOVDQU_XMMdq_XMMdq_7F
+
+# STORE YMM
+
+PATTERN : VV1 0x7F  VL256 VF3 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:qq REG0=YMM_R():r:qq
+
+PATTERN : VV1 0x7F  VL256 VF3 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_B():w:qq REG1=YMM_R():r:qq
+IFORM     : VMOVDQU_YMMqq_YMMqq_7F
+}
+
+#################################################
+## skipping to the end
+#################################################
+
+#################################################
+## MACROS
+#################################################
+{
+ICLASS    : VMOVSHDUP
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+PATTERN : VV1 0x16  VL128 VF3 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 MEM0:r:dq:f32
+
+PATTERN : VV1 0x16  VL128 VF3 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x16  VL256 VF3 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32 MEM0:r:qq:f32
+
+PATTERN : VV1 0x16  VL256 VF3 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VMOVSLDUP
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+PATTERN : VV1 0x12  VL128 VF3 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 MEM0:r:dq:f32
+
+PATTERN : VV1 0x12  VL128 VF3 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x12  VL256 VF3 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32 MEM0:r:qq:f32
+
+PATTERN : VV1 0x12  VL256 VF3 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_B():r:qq:f32
+}
+
+
+
+{
+ICLASS    : VPOR
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : LOGICAL
+EXTENSION : AVX
+PATTERN : VV1 0xEB  VL128 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u128 REG1=XMM_N():r:dq:u128 MEM0:r:dq:u128
+
+PATTERN : VV1 0xEB  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u128 REG1=XMM_N():r:dq:u128 REG2=XMM_B():r:dq:u128
+}
+{
+ICLASS    : VPAND
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : LOGICAL
+EXTENSION : AVX
+PATTERN : VV1 0xDB  VL128 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u128 REG1=XMM_N():r:dq:u128 MEM0:r:dq:u128
+
+PATTERN : VV1 0xDB  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u128 REG1=XMM_N():r:dq:u128 REG2=XMM_B():r:dq:u128
+}
+{
+ICLASS    : VPANDN
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : LOGICAL
+EXTENSION : AVX
+PATTERN : VV1 0xDF  VL128 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u128 REG1=XMM_N():r:dq:u128 MEM0:r:dq:u128
+
+PATTERN : VV1 0xDF  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u128 REG1=XMM_N():r:dq:u128 REG2=XMM_B():r:dq:u128
+}
+{
+ICLASS    : VPXOR
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : LOGICAL
+EXTENSION : AVX
+PATTERN : VV1 0xEF  VL128 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u128 REG1=XMM_N():r:dq:u128 MEM0:r:dq:u128
+
+PATTERN : VV1 0xEF  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u128 REG1=XMM_N():r:dq:u128 REG2=XMM_B():r:dq:u128
+}
+
+
+{
+ICLASS    : VPABSB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x1C   V66 V0F38 VL128 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u8 MEM0:r:dq:i8
+
+PATTERN : VV1 0x1C  V66 V0F38 VL128 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u8  REG1=XMM_B():r:dq:i8
+}
+{
+ICLASS    : VPABSW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x1D   V66 V0F38 VL128 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u16 MEM0:r:dq:i16
+
+PATTERN : VV1 0x1D  V66 V0F38 VL128 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u16  REG1=XMM_B():r:dq:i16
+}
+{
+ICLASS    : VPABSD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x1E   V66 V0F38 VL128 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u32 MEM0:r:dq:i32
+
+PATTERN : VV1 0x1E  V66 V0F38 VL128 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u32  REG1=XMM_B():r:dq:i32
+}
+
+{
+ICLASS    : VPHMINPOSUW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x41   V66 V0F38 VL128 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u16 MEM0:r:dq:u16
+
+PATTERN : VV1 0x41  V66 V0F38 VL128 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u16  REG1=XMM_B():r:dq:u16
+}
+
+
+
+
+
+
+
+
+
+
+{
+ICLASS    : VPSHUFD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x70  VL128 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq MEM0:r:dq  IMM0:r:b
+
+PATTERN : VV1 0x70  VL128 V66 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_B():r:dq IMM0:r:b
+}
+{
+ICLASS    : VPSHUFHW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x70  VL128 VF3 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq MEM0:r:dq  IMM0:r:b
+
+PATTERN : VV1 0x70  VL128 VF3 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_B():r:dq IMM0:r:b
+}
+{
+ICLASS    : VPSHUFLW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x70  VL128 VF2 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq MEM0:r:dq  IMM0:r:b
+
+PATTERN : VV1 0x70  VL128 VF2 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_B():r:dq IMM0:r:b
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+{
+ICLASS    : VPACKSSWB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x63  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0x63  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+{
+ICLASS    : VPACKSSDW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x6B  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i32 MEM0:r:dq:i32
+
+PATTERN : VV1 0x6B  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i32 REG2=XMM_B():r:dq:i32
+}
+{
+ICLASS    : VPACKUSWB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x67  V66 V0F VL128  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0x67  V66 V0F VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+{
+ICLASS    : VPACKUSDW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x2B  V66 V0F38 VL128  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:i32 MEM0:r:dq:i32
+
+PATTERN : VV1 0x2B  V66 V0F38 VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:i32 REG2=XMM_B():r:dq:i32
+}
+
+{
+ICLASS    : VPSLLW
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xF1  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 MEM0:r:dq:u64
+
+PATTERN : VV1 0xF1  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 REG2=XMM_B():r:dq:u64
+}
+{
+ICLASS    : VPSLLD
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xF2  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u32 REG1=XMM_N():r:dq:u32 MEM0:r:dq:u64
+
+PATTERN : VV1 0xF2  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u32 REG1=XMM_N():r:dq:u32 REG2=XMM_B():r:dq:u64
+}
+{
+ICLASS    : VPSLLQ
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xF3  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 MEM0:r:dq:u64
+
+PATTERN : VV1 0xF3  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 REG2=XMM_B():r:dq:u64
+}
+
+{
+ICLASS    : VPSRLW
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xD1  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 MEM0:r:dq:u64
+
+PATTERN : VV1 0xD1  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 REG2=XMM_B():r:dq:u64
+}
+{
+ICLASS    : VPSRLD
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xD2  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u32 REG1=XMM_N():r:dq:u32 MEM0:r:dq:u64
+
+PATTERN : VV1 0xD2  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u32 REG1=XMM_N():r:dq:u32 REG2=XMM_B():r:dq:u64
+}
+{
+ICLASS    : VPSRLQ
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xD3  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 MEM0:r:dq:u64
+
+PATTERN : VV1 0xD3  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 REG2=XMM_B():r:dq:u64
+}
+
+{
+ICLASS    : VPSRAW
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xE1  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 MEM0:r:dq:u64
+
+PATTERN : VV1 0xE1  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:u64
+}
+{
+ICLASS    : VPSRAD
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xE2  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 MEM0:r:dq:u64
+
+PATTERN : VV1 0xE2  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 REG2=XMM_B():r:dq:u64
+}
+
+{
+ICLASS    : VPADDB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xFC  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 MEM0:r:dq:i8
+
+PATTERN : VV1 0xFC  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 REG2=XMM_B():r:dq:i8
+}
+{
+ICLASS    : VPADDW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xFD  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0xFD  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+{
+ICLASS    : VPADDD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xFE  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 MEM0:r:dq:i32
+
+PATTERN : VV1 0xFE  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 REG2=XMM_B():r:dq:i32
+}
+{
+ICLASS    : VPADDQ
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xD4  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i64 REG1=XMM_N():r:dq:i64 MEM0:r:dq:i64
+
+PATTERN : VV1 0xD4  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i64 REG1=XMM_N():r:dq:i64 REG2=XMM_B():r:dq:i64
+}
+
+{
+ICLASS    : VPADDSB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xEC  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 MEM0:r:dq:i8
+
+PATTERN : VV1 0xEC  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 REG2=XMM_B():r:dq:i8
+}
+{
+ICLASS    : VPADDSW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xED  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0xED  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+
+{
+ICLASS    : VPADDUSB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xDC  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 MEM0:r:dq:u8
+
+PATTERN : VV1 0xDC  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 REG2=XMM_B():r:dq:u8
+}
+{
+ICLASS    : VPADDUSW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xDD  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 MEM0:r:dq:u16
+
+PATTERN : VV1 0xDD  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 REG2=XMM_B():r:dq:u16
+}
+
+{
+ICLASS    : VPAVGB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xE0  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 MEM0:r:dq:u8
+
+PATTERN : VV1 0xE0  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 REG2=XMM_B():r:dq:u8
+}
+{
+ICLASS    : VPAVGW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xE3  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 MEM0:r:dq:u16
+
+PATTERN : VV1 0xE3  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 REG2=XMM_B():r:dq:u16
+}
+
+{
+ICLASS    : VPCMPEQB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x74  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 MEM0:r:dq:u8
+
+PATTERN : VV1 0x74  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 REG2=XMM_B():r:dq:u8
+}
+{
+ICLASS    : VPCMPEQW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x75  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 MEM0:r:dq:u16
+
+PATTERN : VV1 0x75  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 REG2=XMM_B():r:dq:u16
+}
+{
+ICLASS    : VPCMPEQD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x76  V66 V0F VL128  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u32 REG1=XMM_N():r:dq:u32 MEM0:r:dq:u32
+
+PATTERN : VV1 0x76  V66 V0F VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u32 REG1=XMM_N():r:dq:u32 REG2=XMM_B():r:dq:u32
+}
+{
+ICLASS    : VPCMPEQQ
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x29  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 MEM0:r:dq:u64
+
+PATTERN : VV1 0x29  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 REG2=XMM_B():r:dq:u64
+}
+
+{
+ICLASS    : VPCMPGTB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x64  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 MEM0:r:dq:i8
+
+PATTERN : VV1 0x64  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 REG2=XMM_B():r:dq:i8
+}
+{
+ICLASS    : VPCMPGTW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x65  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0x65  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+{
+ICLASS    : VPCMPGTD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x66  V66 V0F VL128  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 MEM0:r:dq:i32
+
+PATTERN : VV1 0x66  V66 V0F VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 REG2=XMM_B():r:dq:i32
+}
+{
+ICLASS    : VPCMPGTQ
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x37  V66 V0F38 VL128  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i64 MEM0:r:dq:i64
+
+PATTERN : VV1 0x37  V66 V0F38 VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i64 REG2=XMM_B():r:dq:i64
+}
+
+{
+ICLASS    : VPHADDW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x01  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0x01  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+{
+ICLASS    : VPHADDD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x02  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 MEM0:r:dq:i32
+
+PATTERN : VV1 0x02  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 REG2=XMM_B():r:dq:i32
+}
+{
+ICLASS    : VPHADDSW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x03  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0x03  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+{
+ICLASS    : VPHSUBW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x05  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0x05  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+{
+ICLASS    : VPHSUBD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x06  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 MEM0:r:dq:i32
+
+PATTERN : VV1 0x06  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 REG2=XMM_B():r:dq:i32
+}
+{
+ICLASS    : VPHSUBSW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x07  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0x07  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+
+{
+ICLASS    : VPMULHUW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xE4  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 MEM0:r:dq:u16
+
+PATTERN : VV1 0xE4  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 REG2=XMM_B():r:dq:u16
+}
+{
+ICLASS    : VPMULHRSW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x0B  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0x0B  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+{
+ICLASS    : VPMULHW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xE5  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0xE5  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+{
+ICLASS    : VPMULLW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xD5  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0xD5  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+{
+ICLASS    : VPMULLD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x40  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 MEM0:r:dq:i32
+
+PATTERN : VV1 0x40  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 REG2=XMM_B():r:dq:i32
+}
+
+{
+ICLASS    : VPMULUDQ
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xF4  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u32 MEM0:r:dq:u32
+
+PATTERN : VV1 0xF4  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u32 REG2=XMM_B():r:dq:u32
+}
+{
+ICLASS    : VPMULDQ
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x28  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i64 REG1=XMM_N():r:dq:i32 MEM0:r:dq:i32
+
+PATTERN : VV1 0x28  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i64 REG1=XMM_N():r:dq:i32 REG2=XMM_B():r:dq:i32
+}
+
+{
+ICLASS    : VPSADBW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xF6  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u8 MEM0:r:dq:u8
+
+PATTERN : VV1 0xF6  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u8 REG2=XMM_B():r:dq:u8
+}
+{
+ICLASS    : VPSHUFB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x00  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 MEM0:r:dq:u8
+
+PATTERN : VV1 0x00  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 REG2=XMM_B():r:dq:u8
+}
+
+{
+ICLASS    : VPSIGNB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x08  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 MEM0:r:dq:i8
+
+PATTERN : VV1 0x08  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 REG2=XMM_B():r:dq:i8
+}
+{
+ICLASS    : VPSIGNW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x09  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0x09  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+{
+ICLASS    : VPSIGND
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x0A  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 MEM0:r:dq:i32
+
+PATTERN : VV1 0x0A  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 REG2=XMM_B():r:dq:i32
+}
+
+{
+ICLASS    : VPSUBSB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xE8  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 MEM0:r:dq:i8
+
+PATTERN : VV1 0xE8  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 REG2=XMM_B():r:dq:i8
+}
+{
+ICLASS    : VPSUBSW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xE9  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0xE9  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+
+{
+ICLASS    : VPSUBUSB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xD8  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 MEM0:r:dq:u8
+
+PATTERN : VV1 0xD8  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 REG2=XMM_B():r:dq:u8
+}
+{
+ICLASS    : VPSUBUSW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xD9  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 MEM0:r:dq:u16
+
+PATTERN : VV1 0xD9  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 REG2=XMM_B():r:dq:u16
+}
+
+{
+ICLASS    : VPSUBB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xF8  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 MEM0:r:dq:i8
+
+PATTERN : VV1 0xF8  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 REG2=XMM_B():r:dq:i8
+}
+{
+ICLASS    : VPSUBW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xF9  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0xF9  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+{
+ICLASS    : VPSUBD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xFA  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 MEM0:r:dq:i32
+
+PATTERN : VV1 0xFA  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 REG2=XMM_B():r:dq:i32
+}
+{
+ICLASS    : VPSUBQ
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xFB  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i64 REG1=XMM_N():r:dq:i64 MEM0:r:dq:i64
+
+PATTERN : VV1 0xFB  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i64 REG1=XMM_N():r:dq:i64 REG2=XMM_B():r:dq:i64
+}
+
+{
+ICLASS    : VPUNPCKHBW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x68  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 MEM0:r:dq:u8
+
+PATTERN : VV1 0x68  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 REG2=XMM_B():r:dq:u8
+}
+{
+ICLASS    : VPUNPCKHWD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x69  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 MEM0:r:dq:u16
+
+PATTERN : VV1 0x69  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 REG2=XMM_B():r:dq:u16
+}
+{
+ICLASS    : VPUNPCKHDQ
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x6A  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u32 REG1=XMM_N():r:dq:u32 MEM0:r:dq:u32
+
+PATTERN : VV1 0x6A  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u32 REG1=XMM_N():r:dq:u32 REG2=XMM_B():r:dq:u32
+}
+{
+ICLASS    : VPUNPCKHQDQ
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x6D  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 MEM0:r:dq:u64
+
+PATTERN : VV1 0x6D  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 REG2=XMM_B():r:dq:u64
+}
+
+{
+ICLASS    : VPUNPCKLBW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x60  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 MEM0:r:dq:u8
+
+PATTERN : VV1 0x60  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 REG2=XMM_B():r:dq:u8
+}
+{
+ICLASS    : VPUNPCKLWD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x61  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 MEM0:r:dq:u16
+
+PATTERN : VV1 0x61  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 REG2=XMM_B():r:dq:u16
+}
+{
+ICLASS    : VPUNPCKLDQ
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x62  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u32 REG1=XMM_N():r:dq:u32 MEM0:r:dq:u32
+
+PATTERN : VV1 0x62  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u32 REG1=XMM_N():r:dq:u32 REG2=XMM_B():r:dq:u32
+}
+{
+ICLASS    : VPUNPCKLQDQ
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x6C  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 MEM0:r:dq:u64
+
+PATTERN : VV1 0x6C  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 REG2=XMM_B():r:dq:u64
+}
+
+
+
+{
+ICLASS    : VPSRLDQ
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x73  VL128 V66 V0F MOD[0b11] MOD=3 REG[0b011] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_N():w:dq:u128 REG1=XMM_B():r:dq:u128 IMM0:r:b   # NDD
+}
+{
+ICLASS    : VPSLLDQ
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x73  VL128 V66 V0F MOD[0b11] MOD=3 REG[0b111] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_N():w:dq:u128 REG1=XMM_B():r:dq:u128 IMM0:r:b   # NDD
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+{
+ICLASS    : VMOVLHPS
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+PATTERN : VV1 0x16  VL128 VNP V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:q:f32 REG2=XMM_B():r:q:f32
+}
+{
+ICLASS    : VMOVHLPS
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+PATTERN : VV1 0x12  VL128 VNP V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+}
+
+
+
+
+
+
+
+{
+ICLASS    : VPALIGNR
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x0F  VL128 V66 V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 MEM0:r:dq:u8 IMM0:r:b
+
+PATTERN : VV1 0x0F  VL128 V66 V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 REG2=XMM_B():r:dq:u8 IMM0:r:b
+}
+{
+ICLASS    : VPBLENDW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x0E  VL128 V66 V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 MEM0:r:dq:u16 IMM0:r:b
+
+PATTERN : VV1 0x0E  VL128 V66 V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 REG2=XMM_B():r:dq:u16 IMM0:r:b
+}
+
+
+
+
+
+
+
+
+
+
+
+
+############################################################
+{
+ICLASS    : VROUNDPD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: MXCSR
+PATTERN : VV1 0x09  VL128 V66 V0F3A NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64  MEM0:r:dq:f64 IMM0:r:b
+
+PATTERN : VV1 0x09  VL128 V66 V0F3A NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_B():r:dq:f64 IMM0:r:b
+
+PATTERN : VV1 0x09  VL256 V66 V0F3A NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64  MEM0:r:qq:f64 IMM0:r:b
+
+PATTERN : VV1 0x09  VL256 V66 V0F3A NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_B():r:qq:f64 IMM0:r:b
+}
+{
+ICLASS    : VROUNDPS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: MXCSR
+PATTERN : VV1 0x08  VL128 V66 V0F3A NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32  MEM0:r:dq:f32 IMM0:r:b
+
+PATTERN : VV1 0x08  VL128 V66 V0F3A NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_B():r:dq:f32 IMM0:r:b
+
+PATTERN : VV1 0x08  VL256 V66 V0F3A NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32  MEM0:r:qq:f32 IMM0:r:b
+
+PATTERN : VV1 0x08  VL256 V66 V0F3A NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_B():r:qq:f32 IMM0:r:b
+}
+{
+ICLASS    : VROUNDSD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: MXCSR simd_scalar
+PATTERN : VV1 0x0B  V66 V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64  REG1=XMM_N():r:dq:f64  MEM0:r:q:f64         IMM0:r:b
+
+PATTERN : VV1 0x0B  V66 V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64  REG1=XMM_N():r:dq:f64  REG2=XMM_B():r:q:f64 IMM0:r:b
+}
+{
+ICLASS    : VROUNDSS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: MXCSR simd_scalar
+PATTERN : VV1 0x0A  V66 V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32  REG1=XMM_N():r:dq:f32  MEM0:r:d:f32         IMM0:r:b
+
+PATTERN : VV1 0x0A  V66 V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32  REG1=XMM_N():r:dq:f32  REG2=XMM_B():r:d:f32 IMM0:r:b
+}
+
+{
+ICLASS    : VSHUFPD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xC6  VL128 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64 IMM0:r:b
+
+PATTERN : VV1 0xC6  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64 IMM0:r:b
+
+PATTERN : VV1 0xC6  VL256 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64 IMM0:r:b
+
+PATTERN : VV1 0xC6  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64 IMM0:r:b
+}
+{
+ICLASS    : VSHUFPS
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xC6  VL128 VNP V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32 IMM0:r:b
+
+PATTERN : VV1 0xC6  VL128 VNP V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32 IMM0:r:b
+
+PATTERN : VV1 0xC6  VL256 VNP V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32 IMM0:r:b
+
+PATTERN : VV1 0xC6  VL256 VNP V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32 IMM0:r:b
+}
+
+{
+ICLASS    : VRCPPS
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x53  VNP VL128 NOVSR V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32  MEM0:r:dq:f32
+
+PATTERN : VV1 0x53  VNP VL128 NOVSR V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32  REG1=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x53  VNP VL256 NOVSR V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32  MEM0:r:qq:f32
+
+PATTERN : VV1 0x53  VNP VL256 NOVSR V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32  REG1=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VRCPSS
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: simd_scalar
+PATTERN : VV1 0x53  VF3 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:d:f32
+
+PATTERN : VV1 0x53  VF3 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:d:f32
+}
+
+{
+ICLASS    : VRSQRTPS
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x52  VNP VL128 NOVSR V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32  MEM0:r:dq:f32
+
+PATTERN : VV1 0x52  VNP VL128 NOVSR V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32  REG1=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x52  VNP VL256 NOVSR V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32  MEM0:r:qq:f32
+
+PATTERN : VV1 0x52  VNP VL256 NOVSR V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32  REG1=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VRSQRTSS
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: simd_scalar
+PATTERN : VV1 0x52  VF3 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:d:f32
+
+PATTERN : VV1 0x52  VF3 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:d:f32
+}
+
+{
+ICLASS    : VSQRTPD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: MXCSR
+PATTERN : VV1 0x51  VL128 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 MEM0:r:dq:f64
+
+PATTERN : VV1 0x51  VL128 V66 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64  REG1=XMM_B():r:dq:f64
+
+PATTERN : VV1 0x51  VL256 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64  MEM0:r:qq:f64
+
+PATTERN : VV1 0x51  VL256 V66 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64  REG1=YMM_B():r:qq:f64
+}
+{
+ICLASS    : VSQRTPS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: MXCSR
+PATTERN : VV1 0x51  VL128 VNP NOVSR V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32  MEM0:r:dq:f32
+
+PATTERN : VV1 0x51  VL128 VNP NOVSR V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32  REG1=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x51  VL256 VNP NOVSR V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32  MEM0:r:qq:f32
+
+PATTERN : VV1 0x51  VL256 VNP NOVSR V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32  REG1=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VSQRTSD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : MXCSR simd_scalar
+PATTERN : VV1 0x51  VF2 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:q:f64
+
+PATTERN : VV1 0x51  VF2 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:q:f64
+}
+{
+ICLASS    : VSQRTSS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: MXCSR simd_scalar
+PATTERN : VV1 0x51  VF3 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:d:f32
+
+PATTERN : VV1 0x51  VF3 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:d:f32
+}
+
+
+{
+ICLASS    : VUNPCKHPD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x15  VL128 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN : VV1 0x15  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+PATTERN : VV1 0x15  VL256 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN : VV1 0x15  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+{
+ICLASS    : VUNPCKHPS
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x15  VL128 VNP V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN : VV1 0x15  VL128 VNP V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x15  VL256 VNP V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN : VV1 0x15  VL256 VNP V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+
+
+
+{
+ICLASS    : VSUBPD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: MXCSR
+PATTERN : VV1 0x5C  V66 V0F VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN : VV1 0x5C  V66 V0F VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+PATTERN : VV1 0x5C  V66 V0F VL256 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN : VV1 0x5C  V66 V0F VL256 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+{
+ICLASS    : VSUBPS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: MXCSR
+PATTERN : VV1 0x5C  VL128 VNP V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN : VV1 0x5C  VL128 VNP V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x5C  VL256 VNP V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN : VV1 0x5C  VL256 VNP V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VSUBSD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : MXCSR SIMD_SCALAR
+PATTERN : VV1 0x5C  VF2 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:q:f64
+
+PATTERN : VV1 0x5C  VF2 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:q:f64
+}
+{
+ICLASS    : VSUBSS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: MXCSR simd_scalar
+PATTERN : VV1 0x5C  VF3 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:d:f32
+
+PATTERN : VV1 0x5C  VF3 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:d:f32
+}
+
+{
+ICLASS    : VMULPD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: MXCSR
+PATTERN : VV1 0x59  VL128 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN : VV1 0x59  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+PATTERN : VV1 0x59  VL256 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN : VV1 0x59  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+{
+ICLASS    : VMULPS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: MXCSR
+PATTERN : VV1 0x59  VL128 VNP V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN : VV1 0x59  VL128 VNP V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x59  VL256 VNP V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN : VV1 0x59  VL256 VNP V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VMULSD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : MXCSR simd_scalar
+PATTERN : VV1 0x59  VF2 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:q:f64
+
+PATTERN : VV1 0x59  VF2 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:q:f64
+}
+{
+ICLASS    : VMULSS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: MXCSR simd_scalar
+PATTERN : VV1 0x59  VF3 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:d:f32
+
+PATTERN : VV1 0x59  VF3 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:d:f32
+}
+
+{
+ICLASS    : VORPD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : LOGICAL_FP
+EXTENSION : AVX
+PATTERN : VV1 0x56  VL128 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 MEM0:r:dq:u64
+
+PATTERN : VV1 0x56  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 REG2=XMM_B():r:dq:u64
+
+PATTERN : VV1 0x56  VL256 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 MEM0:r:qq:u64
+
+PATTERN : VV1 0x56  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 REG2=YMM_B():r:qq:u64
+}
+{
+ICLASS    : VORPS
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : LOGICAL_FP
+EXTENSION : AVX
+PATTERN : VV1 0x56  VNP V0F VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u32 REG1=XMM_N():r:dq:u32 MEM0:r:dq:u32
+
+PATTERN : VV1 0x56  VNP V0F VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u32 REG1=XMM_N():r:dq:u32 REG2=XMM_B():r:dq:u32
+
+PATTERN : VV1 0x56  VNP V0F VL256 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u32 REG1=YMM_N():r:qq:u32 MEM0:r:qq:u32
+
+PATTERN : VV1 0x56  VNP V0F VL256 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u32 REG1=YMM_N():r:qq:u32 REG2=YMM_B():r:qq:u32
+}
+
+{
+ICLASS    : VPMAXSB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x3C  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 MEM0:r:dq:i8
+
+PATTERN : VV1 0x3C  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 REG2=XMM_B():r:dq:i8
+}
+{
+ICLASS    : VPMAXSW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xEE  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0xEE  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+{
+ICLASS    : VPMAXSD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x3D  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 MEM0:r:dq:i32
+
+PATTERN : VV1 0x3D  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 REG2=XMM_B():r:dq:i32
+}
+
+{
+ICLASS    : VPMAXUB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xDE  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 MEM0:r:dq:u8
+
+PATTERN : VV1 0xDE  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 REG2=XMM_B():r:dq:u8
+}
+{
+ICLASS    : VPMAXUW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x3E  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 MEM0:r:dq:u16
+
+PATTERN : VV1 0x3E  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 REG2=XMM_B():r:dq:u16
+}
+{
+ICLASS    : VPMAXUD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x3F  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u32 REG1=XMM_N():r:dq:u32 MEM0:r:dq:u32
+
+PATTERN : VV1 0x3F  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u32 REG1=XMM_N():r:dq:u32 REG2=XMM_B():r:dq:u32
+}
+
+{
+ICLASS    : VPMINSB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x38  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 MEM0:r:dq:i8
+
+PATTERN : VV1 0x38  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 REG2=XMM_B():r:dq:i8
+}
+{
+ICLASS    : VPMINSW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xEA  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0xEA  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+{
+ICLASS    : VPMINSD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x39  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 MEM0:r:dq:i32
+
+PATTERN : VV1 0x39  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i32 REG2=XMM_B():r:dq:i32
+}
+
+{
+ICLASS    : VPMINUB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xDA  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 MEM0:r:dq:u8
+
+PATTERN : VV1 0xDA  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u8 REG1=XMM_N():r:dq:u8 REG2=XMM_B():r:dq:u8
+}
+{
+ICLASS    : VPMINUW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x3A  V66 V0F38 VL128  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 MEM0:r:dq:u16
+
+PATTERN : VV1 0x3A  V66 V0F38 VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u16 REG2=XMM_B():r:dq:u16
+}
+{
+ICLASS    : VPMINUD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x3B  V66 V0F38 VL128  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u32 REG1=XMM_N():r:dq:u32 MEM0:r:dq:u32
+
+PATTERN : VV1 0x3B  V66 V0F38 VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u32 REG1=XMM_N():r:dq:u32 REG2=XMM_B():r:dq:u32
+}
+
+
+{
+ICLASS    : VPMADDWD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xF5  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i16 MEM0:r:dq:i16
+
+PATTERN : VV1 0xF5  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32 REG1=XMM_N():r:dq:i16 REG2=XMM_B():r:dq:i16
+}
+{
+ICLASS    : VPMADDUBSW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x04  VL128 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:u8 MEM0:r:dq:i8
+
+PATTERN : VV1 0x04  VL128 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16 REG1=XMM_N():r:dq:u8 REG2=XMM_B():r:dq:i8
+}
+
+
+{
+ICLASS    : VMPSADBW
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x42  VL128 V66 V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u8 MEM0:r:dq:u8 IMM0:r:b
+
+PATTERN : VV1 0x42  VL128 V66 V0F3A MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u16 REG1=XMM_N():r:dq:u8 REG2=XMM_B():r:dq:u8 IMM0:r:b
+}
+
+
+############################################################
+{
+ICLASS    : VPSLLW
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x71  VL128 V66 V0F MOD[0b11] MOD=3 REG[0b110] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_N():w:dq:u16 REG1=XMM_B():r:dq:u16 IMM0:r:b # NDD
+}
+{
+ICLASS    : VPSLLD
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x72  VL128 V66 V0F MOD[0b11] MOD=3 REG[0b110] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_N():w:dq:u32 REG1=XMM_B():r:dq:u32 IMM0:r:b  #NDD
+}
+{
+ICLASS    : VPSLLQ
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x73  VL128 V66 V0F MOD[0b11] MOD=3 REG[0b110] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_N():w:dq:u64 REG1=XMM_B():r:dq:u64 IMM0:r:b # NDD
+}
+
+{
+ICLASS    : VPSRAW
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x71  VL128 V66 V0F MOD[0b11] MOD=3 REG[0b100] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_N():w:dq:i16 REG1=XMM_B():r:dq:i16 IMM0:r:b # NDD
+}
+{
+ICLASS    : VPSRAD
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x72  VL128 V66 V0F MOD[0b11] MOD=3 REG[0b100] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_N():w:dq:i32 REG1=XMM_B():r:dq:i32 IMM0:r:b # NDD
+}
+{
+ICLASS    : VPSRLW
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x71  VL128 V66 V0F MOD[0b11] MOD=3 REG[0b010] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_N():w:dq:u16 REG1=XMM_B():r:dq:u16 IMM0:r:b # NDD
+}
+{
+ICLASS    : VPSRLD
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x72  VL128 V66 V0F MOD[0b11] MOD=3 REG[0b010] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_N():w:dq:u32 REG1=XMM_B():r:dq:u32 IMM0:r:b # NDD
+}
+{
+ICLASS    : VPSRLQ
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x73  VL128 V66 V0F MOD[0b11] MOD=3 REG[0b010] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_N():w:dq:u64 REG1=XMM_B():r:dq:u64 IMM0:r:b  # NDD
+}
+
+
+{
+ICLASS    : VUCOMISD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+
+FLAGS     : MUST [ zf-mod pf-mod cf-mod of-0 af-0 sf-0 ]
+
+PATTERN : VV1 0x2E V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():r:dq:f64  MEM0:r:q:f64
+
+PATTERN : VV1 0x2E V66 V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():r:dq:f64  REG1=XMM_B():r:q:f64
+}
+
+{
+ICLASS    : VUCOMISS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar MXCSR
+
+FLAGS     : MUST [ zf-mod pf-mod cf-mod of-0 af-0 sf-0 ]
+
+PATTERN : VV1 0x2E VNP V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():r:dq:f32  MEM0:r:d:f32
+
+PATTERN : VV1 0x2E VNP V0F NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():r:dq:f32  REG1=XMM_B():r:d:f32
+}
+
+###############################################
+
+
+{
+ICLASS    : VUNPCKLPD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x14  VL128 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+
+PATTERN : VV1 0x14  VL128 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+PATTERN : VV1 0x14  VL256 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+
+PATTERN : VV1 0x14  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+
+
+{
+ICLASS    : VUNPCKLPS
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x14  VL128 VNP V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+
+PATTERN : VV1 0x14  VL128 VNP V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+PATTERN : VV1 0x14  VL256 VNP V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+
+PATTERN : VV1 0x14  VL256 VNP V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+
+
+
+
+{
+ICLASS    : VXORPD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : LOGICAL_FP
+EXTENSION : AVX
+PATTERN : VV1 0x57  V66 V0F VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 MEM0:r:dq:u64
+
+PATTERN : VV1 0x57  V66 V0F VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u64 REG1=XMM_N():r:dq:u64 REG2=XMM_B():r:dq:u64
+
+PATTERN : VV1 0x57  V66 V0F VL256 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 MEM0:r:qq:u64
+
+PATTERN : VV1 0x57  V66 V0F VL256 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 REG2=YMM_B():r:qq:u64
+}
+
+
+{
+ICLASS    : VXORPS
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : LOGICAL_FP
+EXTENSION : AVX
+PATTERN : VV1 0x57  VNP V0F VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq MEM0:r:dq
+
+PATTERN : VV1 0x57  VNP V0F VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq REG2=XMM_B():r:dq
+
+PATTERN : VV1 0x57  VNP V0F VL256 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq REG1=YMM_N():r:qq MEM0:r:qq
+
+PATTERN : VV1 0x57  VNP V0F VL256 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq REG1=YMM_N():r:qq REG2=YMM_B():r:qq
+}
+
+
+############################################################################
+
+{
+ICLASS    : VMOVSS
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar
+
+# NOTE: REG1 is ignored!!!
+PATTERN : VV1 0x10  VF3 V0F MOD[mm] MOD!=3  NOVSR REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32  MEM0:r:d:f32
+
+PATTERN   : VV1 0x10  VF3 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32  REG1=XMM_N():r:dq:f32    REG2=XMM_B():r:d:f32
+IFORM     : VMOVSS_XMMdq_XMMdq_XMMd_10
+
+PATTERN : VV1 0x11  VF3 V0F  MOD[mm] MOD!=3 NOVSR  REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:d:f32          REG0=XMM_R():r:d:f32
+
+PATTERN : VV1 0x11  VF3 V0F  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_B():w:dq:f32   REG1=XMM_N():r:dq:f32   REG2=XMM_R():r:d:f32
+IFORM     : VMOVSS_XMMdq_XMMdq_XMMd_11
+}
+############################################################################
+{
+ICLASS    : VMOVSD
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+ATTRIBUTES : simd_scalar
+
+# NOTE: REG1 is ignored!!!
+PATTERN : VV1 0x10  VF2 V0F MOD[mm] MOD!=3  NOVSR REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64   MEM0:r:q:f64
+
+PATTERN : VV1 0x10  VF2 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64  REG1=XMM_N():r:dq:f64    REG2=XMM_B():r:q:f64
+IFORM     : VMOVSD_XMMdq_XMMdq_XMMq_10
+
+PATTERN : VV1 0x11  VF2 V0F MOD[mm] MOD!=3 NOVSR REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:q:f64           REG0=XMM_R():r:q:f64
+
+PATTERN : VV1 0x11  VF2 V0F  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_B():w:dq:f64   REG1=XMM_N():r:dq:f64  REG2=XMM_R():r:q:f64
+IFORM     : VMOVSD_XMMdq_XMMdq_XMMq_11
+}
+############################################################################
+{
+ICLASS    : VMOVUPD
+EXCEPTIONS: avx-type-4M
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+
+PATTERN : VV1 0x10  V66 VL128 V0F NOVSR  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64   MEM0:r:dq:f64
+
+PATTERN : VV1 0x10  V66 VL128 V0F NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f64   REG1=XMM_B():r:dq:f64
+IFORM     : VMOVUPD_XMMdq_XMMdq_10
+
+PATTERN : VV1 0x11  V66 VL128 V0F NOVSR  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:dq:f64           REG0=XMM_R():r:dq:f64
+
+PATTERN : VV1 0x11  V66 VL128 V0F NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_B():w:dq:f64   REG1=XMM_R():r:dq:f64
+IFORM     : VMOVUPD_XMMdq_XMMdq_11
+
+# 256b versions
+
+PATTERN : VV1 0x10  V66 VL256 V0F NOVSR  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f64      MEM0:r:qq:f64
+
+PATTERN : VV1 0x10  V66 VL256 V0F NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64      REG1=YMM_B():r:qq:f64
+IFORM     : VMOVUPD_YMMqq_YMMqq_10
+
+PATTERN : VV1 0x11  V66 VL256 V0F NOVSR  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:qq:f64              REG0=YMM_R():r:qq:f64
+
+PATTERN : VV1 0x11  V66 VL256 V0F NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_B():w:qq:f64      REG1=YMM_R():r:qq:f64
+IFORM     : VMOVUPD_YMMqq_YMMqq_11
+}
+
+############################################################################
+{
+ICLASS    : VMOVUPS
+EXCEPTIONS: avx-type-4M
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+
+PATTERN : VV1 0x10  VNP VL128 V0F NOVSR  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32   MEM0:r:dq:f32
+
+PATTERN : VV1 0x10  VNP VL128 V0F NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32   REG1=XMM_B():r:dq:f32
+IFORM     : VMOVUPS_XMMdq_XMMdq_10
+
+PATTERN : VV1 0x11  VNP VL128 V0F NOVSR  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:dq:f32           REG0=XMM_R():r:dq:f32
+
+PATTERN : VV1 0x11  VNP VL128 V0F NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_B():w:dq:f32   REG1=XMM_R():r:dq:f32
+IFORM     : VMOVUPS_XMMdq_XMMdq_11
+
+# 256b versions
+
+PATTERN : VV1 0x10  VNP VL256 V0F NOVSR  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:f32      MEM0:r:qq:f32
+
+PATTERN : VV1 0x10  VNP VL256 V0F NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32      REG1=YMM_B():r:qq:f32
+IFORM     : VMOVUPS_YMMqq_YMMqq_10
+
+PATTERN : VV1 0x11  VNP VL256 V0F NOVSR  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:qq:f32              REG0=YMM_R():r:qq:f32
+
+PATTERN : VV1 0x11  VNP VL256 V0F NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_B():w:qq:f32      REG1=YMM_R():r:qq:f32
+IFORM     : VMOVUPS_YMMqq_YMMqq_11
+}
+
+
+############################################################################
+{
+ICLASS    : VMOVLPD
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+COMMENT: 3op version uses high part of XMM_N
+PATTERN : VV1 0x12  VL128 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64   REG1=XMM_N():r:dq:f64   MEM0:r:q:f64
+
+PATTERN : VV1 0x13  VL128 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:q:f64            REG0=XMM_R():r:q:f64
+}
+
+{
+ICLASS    : VMOVLPS
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+
+COMMENT: 3op version uses high part of XMM_N
+PATTERN : VV1 0x12  VL128 VNP V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32   REG1=XMM_N():r:dq:f32   MEM0:r:q:f32
+
+PATTERN : VV1 0x13  VL128 VNP V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:q:f32            REG0=XMM_R():r:q:f32
+}
+
+{
+ICLASS    : VMOVHPD
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+COMMENT:  3op form use low bits of REG1, 2op form uses high bits of REG0
+PATTERN : VV1 0x16  VL128 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f64   REG1=XMM_N():r:q:f64   MEM0:r:q:f64
+
+PATTERN : VV1 0x17  VL128 V66 V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:q:f64            REG0=XMM_R():r:dq:f64
+}
+
+{
+ICLASS    : VMOVHPS
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+
+COMMENT:  3op form use low bits of REG1, 2op form uses high bits of REG0
+PATTERN : VV1 0x16  VL128 VNP V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:f32   REG1=XMM_N():r:q:f32   MEM0:r:q:f32
+
+PATTERN : VV1 0x17  VL128 VNP V0F NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : MEM0:w:q:f32            REG0=XMM_R():r:dq:f32
+}
+############################################################################
+
+{
+ICLASS    : VMOVMSKPD
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+PATTERN : VV1 0x50  VL128 V66 V0F  NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR32_R():w:d   REG1=XMM_B():r:dq:f64
+
+# 256b versions
+
+PATTERN : VV1 0x50  VL256 V66 V0F  NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR32_R():w:d   REG1=YMM_B():r:qq:f64
+}
+
+{
+ICLASS    : VMOVMSKPS
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+PATTERN : VV1 0x50  VL128 VNP V0F  NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR32_R():w:d   REG1=XMM_B():r:dq:f32
+
+# 256b versions
+
+PATTERN : VV1 0x50  VL256 VNP V0F  NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR32_R():w:d   REG1=YMM_B():r:qq:f32
+}
+
+############################################################################
+{
+ICLASS    : VPMOVMSKB
+EXCEPTIONS: avx-type-7
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0xD7  VL128 V66 V0F  NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR32_R():w:d:u32   REG1=XMM_B():r:dq:i8
+}
+
+############################################################################
+
+############################################################################
+# SX versions
+############################################################################
+
+{
+ICLASS    : VPMOVSXBW
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x20  VL128 V66 V0F38 NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i16   REG1=XMM_B():r:q:i8
+PATTERN : VV1 0x20  VL128 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i16  MEM0:r:q:i8
+}
+
+############################################################################
+{
+ICLASS    : VPMOVSXBD
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x21  VL128 V66 V0F38 NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32   REG1=XMM_B():r:d:i8
+PATTERN : VV1 0x21  VL128 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32   MEM0:r:d:i8
+}
+############################################################################
+{
+ICLASS    : VPMOVSXBQ
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x22  VL128 V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i64   REG1=XMM_B():r:w:i8
+PATTERN : VV1 0x22  VL128 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i64   MEM0:r:w:i8
+}
+############################################################################
+{
+ICLASS    : VPMOVSXWD
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x23  VL128 V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i32   REG1=XMM_B():r:q:i16
+PATTERN : VV1 0x23  VL128 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i32   MEM0:r:q:i16
+}
+############################################################################
+{
+ICLASS    : VPMOVSXWQ
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x24  VL128 V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i64   REG1=XMM_B():r:d:i16
+PATTERN : VV1 0x24  VL128 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i64   MEM0:r:d:i16
+}
+############################################################################
+{
+ICLASS    : VPMOVSXDQ
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x25  VL128 V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:i64   REG1=XMM_B():r:q:i32
+PATTERN : VV1 0x25  VL128 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:i64   MEM0:r:q:i32
+}
+
+
+
+
+
+############################################################################
+# ZX versions
+############################################################################
+
+{
+ICLASS    : VPMOVZXBW
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x30  VL128 V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u16   REG1=XMM_B():r:q:u8
+PATTERN : VV1 0x30  VL128 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u16   MEM0:r:q:u8
+}
+
+############################################################################
+{
+ICLASS    : VPMOVZXBD
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x31  VL128 V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u32   REG1=XMM_B():r:d:u8
+PATTERN : VV1 0x31  VL128 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u32   MEM0:r:d:u8
+}
+############################################################################
+{
+ICLASS    : VPMOVZXBQ
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x32  V66 V0F38 VL128 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u64   REG1=XMM_B():r:w:u8
+PATTERN : VV1 0x32  V66 V0F38 VL128 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u64   MEM0:r:w:u8
+}
+############################################################################
+{
+ICLASS    : VPMOVZXWD
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x33  V66 V0F38 VL128 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u32   REG1=XMM_B():r:q:u16
+PATTERN : VV1 0x33  V66 V0F38 VL128 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u32   MEM0:r:q:u16
+}
+############################################################################
+{
+ICLASS    : VPMOVZXWQ
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x34  VL128 V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u64   REG1=XMM_B():r:d:u16
+PATTERN : VV1 0x34  VL128 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u64   MEM0:r:d:u16
+}
+############################################################################
+{
+ICLASS    : VPMOVZXDQ
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x35  VL128 V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u64   REG1=XMM_B():r:q:u32
+PATTERN : VV1 0x35  VL128 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u64   MEM0:r:q:u32
+}
+
+
+
+############################################################################
+############################################################################
+{
+ICLASS    : VPEXTRB
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+COMMENT: WIG
+PATTERN : VV1 0x14  VL128 V66 V0F3A NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : MEM0:w:b           REG0=XMM_R():r:dq:u8 IMM0:r:b
+
+PATTERN : VV1 0x14  VL128 V66 V0F3A NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=GPR32_B():w:d REG1=XMM_R():r:dq:u8 IMM0:r:b
+}
+############################################################################
+{
+ICLASS    : VPEXTRW
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+COMMENT: WIG
+
+PATTERN : VV1 0x15  VL128 V66 V0F3A NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : MEM0:w:w           REG0=XMM_R():r:dq:u16 IMM0:r:b
+
+PATTERN : VV1 0x15  VL128 V66 V0F3A NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=GPR32_B():w:d REG1=XMM_R():r:dq:u16 IMM0:r:b
+IFORM     : VPEXTRW_GPR32d_XMMdq_IMMb_15
+
+# special C5 reg-only versions from SSE2:
+
+PATTERN   : VV1 0xC5  VL128 V66 V0F  NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=GPR32_R():w:d    REG1=XMM_B():r:dq:u16 IMM0:r:b
+IFORM     : VPEXTRW_GPR32d_XMMdq_IMMb_C5
+}
+############################################################################
+{
+ICLASS    : VPEXTRQ
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x16  VL128 V66 V0F3A mode64 rexw_prefix  NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : MEM0:w:q              REG0=XMM_R():r:dq:u64 IMM0:r:b
+PATTERN : VV1 0x16  VL128 V66 V0F3A mode64 rexw_prefix NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=GPR64_B():w:q    REG1=XMM_R():r:dq:u64 IMM0:r:b
+}
+############################################################################
+{
+ICLASS    : VPEXTRD
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+COMMENT   : SNB had an errata where it would #UD of VEX.W=1 outside of 64b mode.  Not modeled.
+
+# 64b mode
+PATTERN   : VV1 0x16 VL128 V66 V0F3A mode64 norexw_prefix NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : MEM0:w:d REG0=XMM_R():r:dq:u32 IMM0:r:b
+PATTERN   : VV1 0x16 VL128 V66 V0F3A mode64 norexw_prefix NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=GPR32_B():w:d REG1=XMM_R():r:dq:u32 IMM0:r:b
+
+# not64b mode
+PATTERN   : VV1 0x16 VL128 V66 V0F3A not64  NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : MEM0:w:d REG0=XMM_R():r:dq:u32 IMM0:r:b
+PATTERN   : VV1 0x16 VL128 V66 V0F3A not64  NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=GPR32_B():w:d REG1=XMM_R():r:dq:u32 IMM0:r:b
+
+}
+############################################################################
+
+
+
+
+
+
+{
+ICLASS    : VPINSRB
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+COMMENT: WIG
+PATTERN : VV1 0x20  VL128 V66 V0F3A  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u8     REG1=XMM_N():r:dq:u8  MEM0:r:b:u8            IMM0:r:b
+PATTERN : VV1 0x20  VL128 V66 V0F3A  MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u8     REG1=XMM_N():r:dq:u8  REG2=GPR32_B():r:d:u8  IMM0:r:b
+}
+
+{
+ICLASS    : VPINSRW
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+COMMENT : WIG
+PATTERN : VV1 0xC4  VL128 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u16     REG1=XMM_N():r:dq:u16  MEM0:r:w:u16           IMM0:r:b
+
+PATTERN : VV1 0xC4  VL128 V66 V0F  MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u16     REG1=XMM_N():r:dq:u16  REG2=GPR32_B():r:d:u16  IMM0:r:b
+}
+
+{
+ICLASS    : VPINSRD
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+COMMENT   : SNB had an errata where it would #UD of VEX.W=1 outside of 64b mode. Not modeled
+# 64b mode
+PATTERN : VV1 0x22  VL128 V66 V0F3A mode64 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u32     REG1=XMM_N():r:dq:u32  MEM0:r:d:u32            IMM0:r:b
+PATTERN : VV1 0x22  VL128 V66 V0F3A mode64 norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u32     REG1=XMM_N():r:dq:u32  REG2=GPR32_B():r:d:u32  IMM0:r:b
+
+# 32b mode
+PATTERN : VV1 0x22  VL128 V66 V0F3A not64 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u32     REG1=XMM_N():r:dq:u32  MEM0:r:d:u32            IMM0:r:b
+PATTERN : VV1 0x22  VL128 V66 V0F3A not64 MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u32     REG1=XMM_N():r:dq:u32  REG2=GPR32_B():r:d:u32  IMM0:r:b
+}
+{
+ICLASS    : VPINSRQ
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x22  VL128 V66 V0F3A mode64 rexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u64     REG1=XMM_N():r:dq:u64  MEM0:r:q:u64            IMM0:r:b
+PATTERN : VV1 0x22  VL128 V66 V0F3A mode64 rexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u64     REG1=XMM_N():r:dq:u64  REG2=GPR64_B():r:q:u64  IMM0:r:b
+}
+
+############################################################################
+
+
+
+
+
+{
+ICLASS    : VPCMPESTRI
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : STTNI
+EXTENSION : AVX
+FLAGS     : MUST [ cf-mod zf-mod sf-mod of-mod af-0 pf-0 ]
+
+# outside of 64b mode, vex.w is ignored for this instr
+PATTERN : VV1 0x61  VL128 V66 V0F3A NOVSR not64 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     MEM0:r:dq         IMM0:r:b REG1=XED_REG_EAX:r:SUPP REG2=XED_REG_EDX:r:SUPP REG3=XED_REG_ECX:w:SUPP
+PATTERN : VV1 0x61  VL128 V66 V0F3A NOVSR not64 MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     REG1=XMM_B():r:dq IMM0:r:b REG2=XED_REG_EAX:r:SUPP REG3=XED_REG_EDX:r:SUPP REG4=XED_REG_ECX:w:SUPP
+
+# in 64b mode, vex.w changes the behavior for GPRs
+PATTERN : VV1 0x61  VL128 V66 V0F3A NOVSR mode64 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     MEM0:r:dq         IMM0:r:b REG1=XED_REG_EAX:r:SUPP REG2=XED_REG_EDX:r:SUPP REG3=XED_REG_ECX:w:SUPP
+PATTERN : VV1 0x61  VL128 V66 V0F3A NOVSR mode64 norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     REG1=XMM_B():r:dq IMM0:r:b REG2=XED_REG_EAX:r:SUPP REG3=XED_REG_EDX:r:SUPP REG4=XED_REG_ECX:w:SUPP
+
+PATTERN : VV1 0x61  VL128 V66 V0F3A NOVSR mode64 rexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     MEM0:r:dq         IMM0:r:b REG1=XED_REG_RAX:r:SUPP REG2=XED_REG_RDX:r:SUPP REG3=XED_REG_RCX:w:SUPP
+PATTERN : VV1 0x61  VL128 V66 V0F3A NOVSR mode64 rexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     REG1=XMM_B():r:dq IMM0:r:b REG2=XED_REG_RAX:r:SUPP REG3=XED_REG_RDX:r:SUPP REG4=XED_REG_RCX:w:SUPP
+}
+{
+ICLASS    : VPCMPISTRI
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : STTNI
+EXTENSION : AVX
+FLAGS     : MUST [ cf-mod zf-mod sf-mod of-mod af-0 pf-0 ]
+
+# outside of 64b mode, vex.w is ignored for this instr
+PATTERN : VV1 0x63  VL128 V66 V0F3A NOVSR  not64  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     MEM0:r:dq         IMM0:r:b REG1=XED_REG_ECX:w:SUPP
+PATTERN : VV1 0x63  VL128 V66 V0F3A NOVSR  not64  MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     REG1=XMM_B():r:dq IMM0:r:b REG2=XED_REG_ECX:w:SUPP
+
+# in 64b mode, vex.w changes the behavior for GPRs
+PATTERN : VV1 0x63  VL128 V66 V0F3A NOVSR mode64 norexw_prefix  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     MEM0:r:dq         IMM0:r:b REG1=XED_REG_ECX:w:SUPP
+PATTERN : VV1 0x63  VL128 V66 V0F3A NOVSR mode64 norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     REG1=XMM_B():r:dq IMM0:r:b REG2=XED_REG_ECX:w:SUPP
+
+PATTERN : VV1 0x63  VL128 V66 V0F3A NOVSR mode64 rexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     MEM0:r:dq         IMM0:r:b REG1=XED_REG_RCX:w:SUPP
+PATTERN : VV1 0x63  VL128 V66 V0F3A NOVSR mode64 rexw_prefix  MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     REG1=XMM_B():r:dq IMM0:r:b REG2=XED_REG_RCX:w:SUPP
+}
+
+{
+ICLASS    : VPCMPESTRM
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : STTNI
+EXTENSION : AVX
+FLAGS     : MUST [ cf-mod zf-mod sf-mod of-mod af-0 pf-0 ]
+
+# outside of 64b mode, vex.w is ignored for this instr
+PATTERN : VV1 0x60  VL128 V66 V0F3A NOVSR not64 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     MEM0:r:dq         IMM0:r:b REG1=XED_REG_EAX:r:SUPP REG2=XED_REG_EDX:r:SUPP REG3=XED_REG_XMM0:w:dq:SUPP
+PATTERN : VV1 0x60  VL128 V66 V0F3A NOVSR not64 MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     REG1=XMM_B():r:dq IMM0:r:b REG2=XED_REG_EAX:r:SUPP REG3=XED_REG_EDX:r:SUPP REG4=XED_REG_XMM0:w:dq:SUPP
+
+# in 64b mode, vex.w changes the behavior for GPRs
+PATTERN : VV1 0x60  VL128 V66 V0F3A NOVSR mode64 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     MEM0:r:dq         IMM0:r:b REG1=XED_REG_EAX:r:SUPP REG2=XED_REG_EDX:r:SUPP REG3=XED_REG_XMM0:w:dq:SUPP
+PATTERN : VV1 0x60  VL128 V66 V0F3A NOVSR mode64 norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     REG1=XMM_B():r:dq IMM0:r:b REG2=XED_REG_EAX:r:SUPP REG3=XED_REG_EDX:r:SUPP REG4=XED_REG_XMM0:w:dq:SUPP
+
+PATTERN : VV1 0x60  VL128 V66 V0F3A NOVSR mode64 rexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     MEM0:r:dq         IMM0:r:b REG1=XED_REG_RAX:r:SUPP REG2=XED_REG_RDX:r:SUPP REG3=XED_REG_XMM0:w:dq:SUPP
+PATTERN : VV1 0x60  VL128 V66 V0F3A NOVSR mode64 rexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     REG1=XMM_B():r:dq IMM0:r:b REG2=XED_REG_RAX:r:SUPP REG3=XED_REG_RDX:r:SUPP REG4=XED_REG_XMM0:w:dq:SUPP
+}
+
+{
+ICLASS    : VPCMPISTRM
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : STTNI
+EXTENSION : AVX
+FLAGS     : MUST [ cf-mod zf-mod sf-mod of-mod af-0 pf-0 ]
+PATTERN : VV1 0x62  VL128 V66 V0F3A NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     MEM0:r:dq         IMM0:r:b REG1=XED_REG_XMM0:w:dq:SUPP
+PATTERN : VV1 0x62  VL128 V66 V0F3A NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():r:dq     REG1=XMM_B():r:dq IMM0:r:b REG2=XED_REG_XMM0:w:dq:SUPP
+}
+####################################################################################
+
+
+
+####################################################################################
+{
+ICLASS    : VMASKMOVDQU
+EXCEPTIONS: avx-type-4
+CPL       : 3
+
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES : maskop fixed_base0 NOTSX NONTEMPORAL
+PATTERN : VV1 0xF7 V0F V66 VL128  NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():r:dq:u8 REG1=XMM_B():r:dq:u8 MEM0:w:SUPP:dq:u8 BASE0=ArDI():r:SUPP SEG0=FINAL_DSEG():r:SUPP
+}
+
+####################################################################################
+{
+ICLASS    : VLDMXCSR
+EXCEPTIONS: avx-type-5L
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: MXCSR
+PATTERN   : VV1 0xAE VL128 VNP V0F NOVSR MOD[mm] MOD!=3 REG[0b010] RM[nnn] no_refining_prefix MODRM()
+OPERANDS  : MEM0:r:d REG0=XED_REG_MXCSR:w:SUPP
+}
+{
+ICLASS    : VSTMXCSR
+EXCEPTIONS: avx-type-5
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+ATTRIBUTES: MXCSR_RD
+PATTERN   : VV1 0xAE VL128 VNP V0F NOVSR MOD[mm] MOD!=3 REG[0b011] RM[nnn] no_refining_prefix MODRM()
+OPERANDS  : MEM0:w:d REG0=XED_REG_MXCSR:r:SUPP
+}
+#######################################################################################
+
+{
+ICLASS    : VPBLENDVB
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+
+# W0 (modrm.rm memory op 2nd to last)
+PATTERN : VV1 0x4C   VL128 V66 V0F3A norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 MEM0:r:dq:i8 REG2=XMM_SE():r:dq:i8
+
+PATTERN : VV1 0x4C   VL128 V66 V0F3A norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS  : REG0=XMM_R():w:dq:i8 REG1=XMM_N():r:dq:i8 REG2=XMM_B():r:dq:i8 REG3=XMM_SE():r:dq:i8
+}
+
+{
+ICLASS    : VBLENDVPD
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+
+# W0 (modrm.rm memory op 2nd to last)
+PATTERN : VV1 0x4B   V66 V0F3A VL128 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64 REG2=XMM_SE():r:dq:u64
+
+PATTERN : VV1 0x4B   V66 V0F3A VL128 norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64 REG3=XMM_SE():r:dq:u64
+
+PATTERN : VV1 0x4B   V66 V0F3A VL256 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64 REG2=YMM_SE():r:qq:u64
+
+PATTERN : VV1 0x4B   V66 V0F3A VL256 norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64 REG3=YMM_SE():r:qq:u64
+
+}
+
+{
+ICLASS    : VBLENDVPS
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+
+# W0 (modrm.rm memory op 2nd to last)
+PATTERN : VV1 0x4A   V66 V0F3A VL128 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32 REG2=XMM_SE():r:dq:u32
+
+PATTERN : VV1 0x4A   V66 V0F3A VL128 norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS  : REG0=XMM_R():w:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32 REG3=XMM_SE():r:dq:u32
+
+PATTERN : VV1 0x4A   V66 V0F3A VL256 norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32 REG2=YMM_SE():r:qq:u32
+
+PATTERN : VV1 0x4A   V66 V0F3A VL256 norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32 REG3=YMM_SE():r:qq:u32
+
+
+}
+
+#######################################################################################
+
+
+
+{
+ICLASS    : VMOVNTDQA
+EXCEPTIONS: avx-type-1
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+ATTRIBUTES :  REQUIRES_ALIGNMENT NOTSX  NONTEMPORAL
+
+PATTERN : VV1 0x2A  V66 V0F38 VL128 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq MEM0:r:dq
+}
+
+
+
+
+
+{
+ICLASS    : VMOVNTDQ
+EXCEPTIONS: avx-type-1
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+ATTRIBUTES :  REQUIRES_ALIGNMENT NOTSX NONTEMPORAL
+PATTERN : VV1 0xE7  V66 V0F VL128 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS : MEM0:w:dq:i32  REG0=XMM_R():r:dq:i32
+
+}
+{
+ICLASS    : VMOVNTPD
+EXCEPTIONS: avx-type-1
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+ATTRIBUTES :  REQUIRES_ALIGNMENT NOTSX NONTEMPORAL
+PATTERN : VV1 0x2B  V66 V0F VL128 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS : MEM0:w:dq:f64  REG0=XMM_R():r:dq:f64
+
+}
+{
+ICLASS    : VMOVNTPS
+EXCEPTIONS: avx-type-1
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+ATTRIBUTES :  REQUIRES_ALIGNMENT NOTSX NONTEMPORAL
+PATTERN : VV1 0x2B  VNP V0F VL128 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS : MEM0:w:dq:f32  REG0=XMM_R():r:dq:f32
+
+}
+
+
+
+###FILE: ./datafiles/avx/avx-movnt-store.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+AVX_INSTRUCTIONS()::
+
+
+{
+ICLASS    : VMOVNTDQ
+EXCEPTIONS: avx-type-1
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+ATTRIBUTES :  REQUIRES_ALIGNMENT NOTSX NONTEMPORAL
+PATTERN : VV1 0xE7  V66 V0F VL256 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS : MEM0:w:qq:i32  REG0=YMM_R():r:qq:i32
+
+}
+{
+ICLASS    : VMOVNTPD
+EXCEPTIONS: avx-type-1
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+ATTRIBUTES :  REQUIRES_ALIGNMENT NOTSX NONTEMPORAL
+PATTERN : VV1 0x2B  V66 V0F VL256 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS : MEM0:w:qq:f64  REG0=YMM_R():r:qq:f64
+
+}
+{
+ICLASS    : VMOVNTPS
+EXCEPTIONS: avx-type-1
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX
+ATTRIBUTES :  REQUIRES_ALIGNMENT NOTSX NONTEMPORAL
+PATTERN : VV1 0x2B  VNP V0F VL256 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS : MEM0:w:qq:f32  REG0=YMM_R():r:qq:f32
+
+}
+
+
+
+###FILE: ./datafiles/avx/avx-aes-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+AVX_INSTRUCTIONS()::
+
+{
+ICLASS    : VAESKEYGENASSIST
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AES
+EXTENSION : AVXAES
+PATTERN : VV1 0xDF VL128 V66 V0F3A  NOVSR MOD[0b11] MOD=3  REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq  REG1=XMM_B():r:dq IMM0:r:b
+PATTERN : VV1 0xDF  VL128 V66 V0F3A NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq  MEM0:r:dq IMM0:r:b
+}
+{
+ICLASS    : VAESENC
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AES
+EXTENSION : AVXAES
+PATTERN : VV1 0xDC V66 V0F38  MOD[0b11] MOD=3  REG[rrr] RM[nnn] VL128
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq  REG2=XMM_B():r:dq
+PATTERN : VV1 0xDC V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM() VL128
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq  MEM0:r:dq
+}
+{
+ICLASS    : VAESENCLAST
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AES
+EXTENSION : AVXAES
+PATTERN : VV1 0xDD V66 V0F38 MOD[0b11] MOD=3  REG[rrr] RM[nnn] VL128
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq  REG2=XMM_B():r:dq
+PATTERN : VV1 0xDD  V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() VL128
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq  MEM0:r:dq
+}
+{
+ICLASS    : VAESDEC
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AES
+EXTENSION : AVXAES
+PATTERN : VV1 0xDE V66 V0F38 MOD[0b11] MOD=3  REG[rrr] RM[nnn] VL128
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq  REG2=XMM_B():r:dq
+PATTERN : VV1 0xDE V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() VL128
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq  MEM0:r:dq
+}
+{
+ICLASS    : VAESDECLAST
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AES
+EXTENSION : AVXAES
+PATTERN : VV1 0xDF V66 V0F38 MOD[0b11] MOD=3  REG[rrr] RM[nnn] VL128
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq  REG2=XMM_B():r:dq
+PATTERN : VV1 0xDF V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() VL128
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq  MEM0:r:dq
+}
+{
+ICLASS    : VAESIMC
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AES
+EXTENSION : AVXAES
+PATTERN : VV1 0xDB VL128 V66 V0F38 NOVSR MOD[0b11] MOD=3  REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq  REG1=XMM_B():r:dq
+PATTERN : VV1 0xDB VL128 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq  MEM0:r:dq
+}
+
+
+
+###FILE: ./datafiles/avx/avx-pclmul-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+AVX_INSTRUCTIONS()::
+{
+ICLASS    : VPCLMULQDQ
+EXCEPTIONS: avx-type-4
+CPL       : 3
+CATEGORY  : AVX
+EXTENSION : AVX
+PATTERN : VV1 0x44  V66 V0F3A  MOD[0b11]  MOD=3  REG[rrr] RM[nnn] VL128 UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u128  REG1=XMM_N():r:dq:u64 REG2=XMM_B():r:dq:u64 IMM0:r:b
+PATTERN : VV1 0x44  V66 V0F3A  MOD[mm]  MOD!=3 REG[rrr] RM[nnn] MODRM() VL128 UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u128  REG1=XMM_N():r:dq:u64 MEM0:r:dq:u64 IMM0:r:b
+}
+
+
+###FILE: ./datafiles/ivbavx/fp16-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+AVX_INSTRUCTIONS()::
+{
+ICLASS    : VCVTPH2PS
+COMMENT   : UPCONVERT -- NO IMMEDIATE
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : F16C
+ATTRIBUTES : MXCSR
+EXCEPTIONS: avx-type-11
+# 128b form
+
+PATTERN : VV1 0x13 VL128 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()  W0
+OPERANDS  : REG0=XMM_R():w:dq:f32 MEM0:r:q:f16
+
+PATTERN : VV1 0x13 VL128 V66 V0F38 NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn] W0
+OPERANDS  : REG0=XMM_R():w:dq:f32  REG1=XMM_B():r:q:f16
+
+
+# 256b form
+
+PATTERN : VV1 0x13 VL256 V66 V0F38 NOVSR  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() W0
+OPERANDS  : REG0=YMM_R():w:qq:f32 MEM0:r:dq:f16
+
+PATTERN : VV1 0x13 VL256 V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]  W0
+OPERANDS  : REG0=YMM_R():w:qq:f32  REG1=XMM_B():r:dq:f16
+}
+
+
+{
+ICLASS    : VCVTPS2PH
+COMMENT   : DOWNCONVERT -- HAS IMMEDIATE
+CPL       : 3
+CATEGORY  : CONVERT
+EXTENSION : F16C
+ATTRIBUTES : MXCSR
+EXCEPTIONS: avx-type-11
+# 128b imm8 form
+
+PATTERN : VV1 0x1D VL128 V66 V0F3A NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8() W0
+OPERANDS  : MEM0:w:q:f16 REG0=XMM_R():r:dq:f32  IMM0:r:b
+
+PATTERN : VV1 0x1D VL128 V66 V0F3A NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8() W0
+OPERANDS  : REG0=XMM_B():w:q:f16 REG1=XMM_R():r:dq:f32   IMM0:r:b
+
+# 256b imm8 form
+
+PATTERN : VV1 0x1D VL256 V66 V0F3A NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8() W0
+OPERANDS  : MEM0:w:dq:f16 REG0=YMM_R():r:qq:f32  IMM0:r:b
+
+PATTERN : VV1 0x1D VL256 V66 V0F3A NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8() W0
+OPERANDS  : REG0=XMM_B():w:dq:f16 REG1=YMM_R():r:qq:f32    IMM0:r:b
+
+}
+
+
+
+###FILE: ./datafiles/avxhsw/gather-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+AVX_INSTRUCTIONS()::
+
+
+# DEST in MODRM.REG
+# BASE in SIB.base
+# INDEX in SIB.index
+# MASK in VEX.VVVV   --  NOTE mask is a signed integer!!!
+
+#                    VL = 128                        VL = 256
+#            dest/mask   index  memsz        dest/mask   index   memsz
+# qps/qd      xmm       xmm      2*32=64b      xmm*       ymm*    4*32=128b
+# dps/dd      xmm       xmm      4*32=128b     ymm        ymm     8*32=256b
+# dpd/dq      xmm       xmm      2*64=128b     ymm*       xmm*    4*64=256b
+# qpd/qq      xmm       xmm      2*64=128b     ymm        ymm     4*64=256b
+
+
+
+{
+ICLASS    : VGATHERDPD
+CPL       : 3
+CATEGORY  : AVX2GATHER
+EXTENSION : AVX2GATHER
+ATTRIBUTES : gather DWORD_INDICES ELEMENT_SIZE_q SPECIAL_AGEN_REQUIRED
+EXCEPTIONS: avx-type-12
+
+
+# VL = 256 - when data/mask differ from index size see asterisks in above chart.
+PATTERN : VV1 0x92   VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] RM=4 VMODRM_XMM() eanot16
+OPERANDS  : REG0=YMM_R():crw:qq:f64   MEM0:r:q:f64 REG1=YMM_N():rw:qq:i64
+IFORM: VGATHERDPD_YMMf64_MEMf64_YMMi64_VL256
+
+# VL = 128 - index, mask and dest are all XMMs
+PATTERN : VV1 0x92   VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] RM=4 VMODRM_XMM() eanot16
+OPERANDS  : REG0=XMM_R():crw:dq:f64   MEM0:r:q:f64 REG1=XMM_N():rw:dq:i64
+IFORM: VGATHERDPD_XMMf64_MEMf64_XMMi64_VL128
+
+COMMENT: mask reg is zeroized on normal termination. mask_sz=data_sz
+}
+{
+ICLASS    : VGATHERDPS
+CPL       : 3
+CATEGORY  : AVX2GATHER
+EXTENSION : AVX2GATHER
+ATTRIBUTES : gather DWORD_INDICES ELEMENT_SIZE_d SPECIAL_AGEN_REQUIRED
+EXCEPTIONS: avx-type-12
+
+
+# VL = 256 - when data/mask differ from index size see asterisks in above chart.
+PATTERN : VV1 0x92   VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] RM=4 VMODRM_YMM() eanot16
+OPERANDS  : REG0=YMM_R():crw:qq:f32   MEM0:r:d:f32 REG1=YMM_N():rw:qq:i32
+IFORM: VGATHERDPS_YMMf32_MEMf32_YMMi32_VL256
+
+# VL = 128 - index, mask and dest are all XMMs
+PATTERN : VV1 0x92   VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] RM=4 VMODRM_XMM() eanot16
+OPERANDS  : REG0=XMM_R():crw:dq:f32   MEM0:r:d:f32 REG1=XMM_N():rw:dq:i32
+IFORM: VGATHERDPS_XMMf32_MEMf32_XMMi32_VL128
+
+COMMENT: mask reg is zeroized on normal termination. mask_sz=data_sz
+}
+{
+ICLASS    : VGATHERQPD
+CPL       : 3
+CATEGORY  : AVX2GATHER
+EXTENSION : AVX2GATHER
+ATTRIBUTES : gather QWORD_INDICES ELEMENT_SIZE_q SPECIAL_AGEN_REQUIRED
+EXCEPTIONS: avx-type-12
+
+# VL = 256 - when data/mask differ from index size see asterisks in above chart.
+PATTERN : VV1 0x93   VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] RM=4 VMODRM_YMM() eanot16
+OPERANDS  : REG0=YMM_R():crw:qq:f64   MEM0:r:q:f64 REG1=YMM_N():rw:qq:i64
+IFORM: VGATHERQPD_YMMf64_MEMf64_YMMi64_VL256
+
+# VL = 128 - index, mask and dest are all XMMs
+PATTERN : VV1 0x93   VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] RM=4 VMODRM_XMM() eanot16
+OPERANDS  : REG0=XMM_R():crw:dq:f64   MEM0:r:q:f64 REG1=XMM_N():rw:dq:i64
+IFORM: VGATHERQPD_XMMf64_MEMf64_XMMi64_VL128
+
+COMMENT: mask reg is zeroized on normal termination. mask_sz=data_sz
+}
+{
+ICLASS    : VGATHERQPS
+CPL       : 3
+CATEGORY  : AVX2GATHER
+EXTENSION : AVX2GATHER
+ATTRIBUTES : gather QWORD_INDICES ELEMENT_SIZE_d SPECIAL_AGEN_REQUIRED
+EXCEPTIONS: avx-type-12
+
+
+# VL = 256 - when data/mask differ from index size see asterisks in above chart.
+PATTERN : VV1 0x93   VL256 V66 V0F38   W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] RM=4 VMODRM_YMM() eanot16
+OPERANDS  : REG0=XMM_R():crw:dq:f32   MEM0:r:d:f32 REG1=XMM_N():rw:dq:i32
+IFORM: VGATHERQPS_XMMf32_MEMf32_XMMi32_VL256
+
+# VL = 128 - index, mask and dest are all XMMs
+PATTERN : VV1 0x93   VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] RM=4 VMODRM_XMM() eanot16
+OPERANDS  : REG0=XMM_R():crw:q:f32   MEM0:r:d:f32 REG1=XMM_N():rw:q:i32
+IFORM: VGATHERQPS_XMMf32_MEMf32_XMMi32_VL128
+
+COMMENT: mask reg is zeroized on normal termination. mask_sz=data_sz
+}
+
+{
+ICLASS    : VPGATHERDQ
+CPL       : 3
+CATEGORY  : AVX2GATHER
+EXTENSION : AVX2GATHER
+ATTRIBUTES : gather DWORD_INDICES ELEMENT_SIZE_q SPECIAL_AGEN_REQUIRED
+EXCEPTIONS: avx-type-12
+
+# VL = 256 - when data/mask differ from index size see asterisks in above chart.
+PATTERN : VV1 0x90   VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] RM=4 VMODRM_XMM() eanot16
+OPERANDS  : REG0=YMM_R():crw:qq:u64   MEM0:r:q:u64 REG1=YMM_N():rw:qq:i64
+IFORM: VPGATHERDQ_YMMu64_MEMq_YMMi64_VL256
+
+# VL = 128 - index, mask and dest are all XMMs
+PATTERN : VV1 0x90   VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] RM=4 VMODRM_XMM() eanot16
+OPERANDS  : REG0=XMM_R():crw:dq:u64   MEM0:r:q:u64 REG1=XMM_N():rw:dq:i64
+IFORM: VPGATHERDQ_XMMu64_MEMq_XMMi64_VL128
+
+COMMENT: mask reg is zeroized on normal termination. mask_sz=data_sz
+}
+{
+ICLASS    : VPGATHERDD
+CPL       : 3
+CATEGORY  : AVX2GATHER
+EXTENSION : AVX2GATHER
+ATTRIBUTES : gather DWORD_INDICES ELEMENT_SIZE_d SPECIAL_AGEN_REQUIRED
+EXCEPTIONS: avx-type-12
+
+# VL = 256 - when data/mask differ from index size see asterisks in above chart.
+PATTERN : VV1 0x90   VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] RM=4 VMODRM_YMM() eanot16
+OPERANDS  : REG0=YMM_R():crw:qq:u32   MEM0:r:d:u32 REG1=YMM_N():rw:qq:i32
+IFORM: VPGATHERDD_YMMu32_MEMd_YMMi32_VL256
+
+# VL = 128 - index, mask and dest are all XMMs
+PATTERN : VV1 0x90   VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] RM=4 VMODRM_XMM() eanot16
+OPERANDS  : REG0=XMM_R():crw:dq:u32   MEM0:r:d:u32 REG1=XMM_N():rw:dq:i32
+IFORM: VPGATHERDD_XMMu32_MEMd_XMMi32_VL128
+
+COMMENT: mask reg is zeroized on normal termination. mask_sz=data_sz
+}
+{
+ICLASS    : VPGATHERQQ
+CPL       : 3
+CATEGORY  : AVX2GATHER
+EXTENSION : AVX2GATHER
+ATTRIBUTES : gather QWORD_INDICES ELEMENT_SIZE_q SPECIAL_AGEN_REQUIRED
+EXCEPTIONS: avx-type-12
+
+# VL = 256 - when data/mask differ from index size see asterisks in above chart.
+PATTERN : VV1 0x91   VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] RM=4 VMODRM_YMM() eanot16
+OPERANDS  : REG0=YMM_R():crw:qq:u64   MEM0:r:q:u64 REG1=YMM_N():rw:qq:i64
+IFORM: VPGATHERQQ_YMMu64_MEMq_YMMi64_VL256
+
+# VL = 128 - index, mask and dest are all XMMs
+PATTERN : VV1 0x91   VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] RM=4 VMODRM_XMM() eanot16
+OPERANDS  : REG0=XMM_R():crw:dq:u64   MEM0:r:q:u64 REG1=XMM_N():rw:dq:i64
+IFORM: VPGATHERQQ_XMMu64_MEMq_XMMi64_VL128
+
+COMMENT: mask reg is zeroized on normal termination. mask_sz=data_sz
+}
+{
+ICLASS    : VPGATHERQD
+CPL       : 3
+CATEGORY  : AVX2GATHER
+EXTENSION : AVX2GATHER
+ATTRIBUTES : gather QWORD_INDICES ELEMENT_SIZE_d SPECIAL_AGEN_REQUIRED
+EXCEPTIONS: avx-type-12
+
+# VL = 256 - when data/mask differ from index size see asterisks in above chart.
+PATTERN : VV1 0x91   VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] RM=4 VMODRM_YMM() eanot16
+OPERANDS  : REG0=XMM_R():crw:dq:u32   MEM0:r:d:u32 REG1=XMM_N():rw:dq:i32
+IFORM: VPGATHERQD_XMMu32_MEMd_XMMi32_VL256
+
+# VL = 128 - index, mask and dest are all XMMs
+PATTERN : VV1 0x91   VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] RM=4 VMODRM_XMM() eanot16
+OPERANDS  : REG0=XMM_R():crw:q:u32   MEM0:r:d:u32 REG1=XMM_N():rw:q:i32
+IFORM: VPGATHERQD_XMMu32_MEMd_XMMi32_VL128
+
+COMMENT: mask reg is zeroized on normal termination. mask_sz=data_sz
+}
+
+
+
+###FILE: ./datafiles/avxhsw/hsw-int256-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2017 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+AVX_INSTRUCTIONS()::
+
+
+{
+ICLASS    : VPABSB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x1C   VL256 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u8 MEM0:r:qq:i8
+
+PATTERN : VV1 0x1C   VL256 V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u8  REG1=YMM_B():r:qq:i8
+}
+{
+ICLASS    : VPABSW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x1D   VL256 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u16 MEM0:r:qq:i16
+
+PATTERN : VV1 0x1D   VL256 V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u16  REG1=YMM_B():r:qq:i16
+}
+{
+ICLASS    : VPABSD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x1E   VL256 V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u32 MEM0:r:qq:i32
+
+PATTERN : VV1 0x1E   VL256 V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u32  REG1=YMM_B():r:qq:i32
+}
+
+
+
+
+
+
+
+
+
+{
+ICLASS    : VPACKSSWB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x63  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0x63  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+{
+ICLASS    : VPACKSSDW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x6B  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i32 MEM0:r:qq:i32
+
+PATTERN : VV1 0x6B  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i32 REG2=YMM_B():r:qq:i32
+}
+{
+ICLASS    : VPACKUSWB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x67  V66 V0F VL256  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0x67  V66 V0F VL256 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+{
+ICLASS    : VPACKUSDW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x2B  V66 V0F38 VL256  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:i32 MEM0:r:qq:i32
+
+PATTERN : VV1 0x2B  V66 V0F38 VL256 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:i32 REG2=YMM_B():r:qq:i32
+}
+
+{
+ICLASS    : VPSLLW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xF1  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 MEM0:r:dq:u64
+
+PATTERN : VV1 0xF1  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 REG2=XMM_B():r:q:u64
+}
+{
+ICLASS    : VPSLLD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xF2  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u32 REG1=YMM_N():r:qq:u32 MEM0:r:dq:u64
+
+PATTERN : VV1 0xF2  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u32 REG1=YMM_N():r:qq:u32 REG2=XMM_B():r:q:u64
+}
+{
+ICLASS    : VPSLLQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xF3  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 MEM0:r:dq:u64
+
+PATTERN : VV1 0xF3  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 REG2=XMM_B():r:q:u64
+}
+
+{
+ICLASS    : VPSRLW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xD1  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 MEM0:r:dq:u64
+
+PATTERN : VV1 0xD1  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 REG2=XMM_B():r:q:u64
+}
+{
+ICLASS    : VPSRLD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xD2  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u32 REG1=YMM_N():r:qq:u32 MEM0:r:dq:u64
+
+PATTERN : VV1 0xD2  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u32 REG1=YMM_N():r:qq:u32 REG2=XMM_B():r:q:u64
+}
+{
+ICLASS    : VPSRLQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xD3  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 MEM0:r:dq:u64
+
+PATTERN : VV1 0xD3  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 REG2=XMM_B():r:q:u64
+}
+
+{
+ICLASS    : VPSRAW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xE1  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 MEM0:r:dq:u64
+
+PATTERN : VV1 0xE1  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 REG2=XMM_B():r:q:u64
+}
+{
+ICLASS    : VPSRAD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xE2  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 MEM0:r:dq:u64
+
+PATTERN : VV1 0xE2  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 REG2=XMM_B():r:q:u64
+}
+
+
+{
+ICLASS    : VPADDB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xFC  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i8 MEM0:r:qq:i8
+
+PATTERN : VV1 0xFC  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i8 REG2=YMM_B():r:qq:i8
+}
+{
+ICLASS    : VPADDW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xFD  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0xFD  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+{
+ICLASS    : VPADDD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xFE  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 MEM0:r:qq:i32
+
+PATTERN : VV1 0xFE  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 REG2=YMM_B():r:qq:i32
+}
+{
+ICLASS    : VPADDQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xD4  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i64 REG1=YMM_N():r:qq:i64 MEM0:r:qq:i64
+
+PATTERN : VV1 0xD4  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i64 REG1=YMM_N():r:qq:i64 REG2=YMM_B():r:qq:i64
+}
+
+{
+ICLASS    : VPADDSB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xEC  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i8 MEM0:r:qq:i8
+
+PATTERN : VV1 0xEC  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i8 REG2=YMM_B():r:qq:i8
+}
+{
+ICLASS    : VPADDSW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xED  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0xED  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+
+{
+ICLASS    : VPADDUSB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xDC  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 MEM0:r:qq:u8
+
+PATTERN : VV1 0xDC  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 REG2=YMM_B():r:qq:u8
+}
+{
+ICLASS    : VPADDUSW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xDD  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 MEM0:r:qq:u16
+
+PATTERN : VV1 0xDD  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 REG2=YMM_B():r:qq:u16
+}
+
+{
+ICLASS    : VPAVGB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xE0  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 MEM0:r:qq:u8
+
+PATTERN : VV1 0xE0  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 REG2=YMM_B():r:qq:u8
+}
+{
+ICLASS    : VPAVGW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xE3  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 MEM0:r:qq:u16
+
+PATTERN : VV1 0xE3  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 REG2=YMM_B():r:qq:u16
+}
+
+
+{
+ICLASS    : VPCMPEQB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x74  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 MEM0:r:qq:u8
+
+PATTERN : VV1 0x74  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 REG2=YMM_B():r:qq:u8
+}
+{
+ICLASS    : VPCMPEQW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x75  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 MEM0:r:qq:u16
+
+PATTERN : VV1 0x75  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 REG2=YMM_B():r:qq:u16
+}
+{
+ICLASS    : VPCMPEQD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x76  V66 V0F VL256  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u32 REG1=YMM_N():r:qq:u32 MEM0:r:qq:u32
+
+PATTERN : VV1 0x76  V66 V0F VL256 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u32 REG1=YMM_N():r:qq:u32 REG2=YMM_B():r:qq:u32
+}
+{
+ICLASS    : VPCMPEQQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x29  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 MEM0:r:qq:u64
+
+PATTERN : VV1 0x29  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 REG2=YMM_B():r:qq:u64
+}
+
+{
+ICLASS    : VPCMPGTB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x64  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i8 MEM0:r:qq:i8
+
+PATTERN : VV1 0x64  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i8 REG2=YMM_B():r:qq:i8
+}
+{
+ICLASS    : VPCMPGTW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x65  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0x65  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+{
+ICLASS    : VPCMPGTD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x66  V66 V0F VL256  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 MEM0:r:qq:i32
+
+PATTERN : VV1 0x66  V66 V0F VL256 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 REG2=YMM_B():r:qq:i32
+}
+{
+ICLASS    : VPCMPGTQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x37  V66 V0F38 VL256  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i64 REG1=YMM_N():r:qq:i64 MEM0:r:qq:i64
+
+PATTERN : VV1 0x37  V66 V0F38 VL256 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i64 REG1=YMM_N():r:qq:i64 REG2=YMM_B():r:qq:i64
+}
+
+
+{
+ICLASS    : VPHADDW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x01  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0x01  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+{
+ICLASS    : VPHADDD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x02  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 MEM0:r:qq:i32
+
+PATTERN : VV1 0x02  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 REG2=YMM_B():r:qq:i32
+}
+{
+ICLASS    : VPHADDSW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x03  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0x03  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+{
+ICLASS    : VPHSUBW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x05  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0x05  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+{
+ICLASS    : VPHSUBD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x06  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 MEM0:r:qq:i32
+
+PATTERN : VV1 0x06  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 REG2=YMM_B():r:qq:i32
+}
+{
+ICLASS    : VPHSUBSW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x07  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0x07  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+
+{
+ICLASS    : VPMADDWD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xF5  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0xF5  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+{
+ICLASS    : VPMADDUBSW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x04  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:u8 MEM0:r:qq:i8
+
+PATTERN : VV1 0x04  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:u8 REG2=YMM_B():r:qq:i8
+}
+
+{
+ICLASS    : VPMAXSB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x3C  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i8 MEM0:r:qq:i8
+
+PATTERN : VV1 0x3C  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i8 REG2=YMM_B():r:qq:i8
+}
+{
+ICLASS    : VPMAXSW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xEE  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0xEE  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+{
+ICLASS    : VPMAXSD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x3D  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 MEM0:r:qq:i32
+
+PATTERN : VV1 0x3D  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 REG2=YMM_B():r:qq:i32
+}
+
+{
+ICLASS    : VPMAXUB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xDE  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 MEM0:r:qq:u8
+
+PATTERN : VV1 0xDE  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 REG2=YMM_B():r:qq:u8
+}
+{
+ICLASS    : VPMAXUW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x3E  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 MEM0:r:qq:u16
+
+PATTERN : VV1 0x3E  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 REG2=YMM_B():r:qq:u16
+}
+{
+ICLASS    : VPMAXUD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x3F  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u32 REG1=YMM_N():r:qq:u32 MEM0:r:qq:u32
+
+PATTERN : VV1 0x3F  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u32 REG1=YMM_N():r:qq:u32 REG2=YMM_B():r:qq:u32
+}
+
+{
+ICLASS    : VPMINSB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x38  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i8 MEM0:r:qq:i8
+
+PATTERN : VV1 0x38  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i8 REG2=YMM_B():r:qq:i8
+}
+{
+ICLASS    : VPMINSW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xEA  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0xEA  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+{
+ICLASS    : VPMINSD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x39  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 MEM0:r:qq:i32
+
+PATTERN : VV1 0x39  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 REG2=YMM_B():r:qq:i32
+}
+
+{
+ICLASS    : VPMINUB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xDA  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 MEM0:r:qq:u8
+
+PATTERN : VV1 0xDA  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 REG2=YMM_B():r:qq:u8
+}
+{
+ICLASS    : VPMINUW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x3A  V66 V0F38 VL256  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 MEM0:r:qq:u16
+
+PATTERN : VV1 0x3A  V66 V0F38 VL256 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 REG2=YMM_B():r:qq:u16
+}
+{
+ICLASS    : VPMINUD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x3B  V66 V0F38 VL256  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u32 REG1=YMM_N():r:qq:u32 MEM0:r:qq:u32
+
+PATTERN : VV1 0x3B  V66 V0F38 VL256 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u32 REG1=YMM_N():r:qq:u32 REG2=YMM_B():r:qq:u32
+}
+
+{
+ICLASS    : VPMULHUW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xE4  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 MEM0:r:qq:u16
+
+PATTERN : VV1 0xE4  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 REG2=YMM_B():r:qq:u16
+}
+{
+ICLASS    : VPMULHRSW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x0B  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0x0B  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+
+{
+ICLASS    : VPMULHW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xE5  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0xE5  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+{
+ICLASS    : VPMULLW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xD5  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0xD5  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+{
+ICLASS    : VPMULLD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x40  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 MEM0:r:qq:i32
+
+PATTERN : VV1 0x40  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 REG2=YMM_B():r:qq:i32
+}
+
+{
+ICLASS    : VPMULUDQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xF4  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u32 MEM0:r:qq:u32
+
+PATTERN : VV1 0xF4  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u32 REG2=YMM_B():r:qq:u32
+}
+{
+ICLASS    : VPMULDQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x28  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i64 REG1=YMM_N():r:qq:i32 MEM0:r:qq:i32
+
+PATTERN : VV1 0x28  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i64 REG1=YMM_N():r:qq:i32 REG2=YMM_B():r:qq:i32
+}
+
+{
+ICLASS    : VPSADBW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xF6  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u8 MEM0:r:qq:u8
+
+PATTERN : VV1 0xF6  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u8 REG2=YMM_B():r:qq:u8
+}
+{
+ICLASS    : VPSHUFB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x00  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 MEM0:r:qq:u8
+
+PATTERN : VV1 0x00  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 REG2=YMM_B():r:qq:u8
+}
+
+{
+ICLASS    : VPSIGNB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x08  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i8 MEM0:r:qq:i8
+
+PATTERN : VV1 0x08  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i8 REG2=YMM_B():r:qq:i8
+}
+{
+ICLASS    : VPSIGNW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x09  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0x09  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+{
+ICLASS    : VPSIGND
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x0A  VL256 V66 V0F38  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 MEM0:r:qq:i32
+
+PATTERN : VV1 0x0A  VL256 V66 V0F38 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 REG2=YMM_B():r:qq:i32
+}
+
+
+{
+ICLASS    : VPSUBSB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xE8  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i8 MEM0:r:qq:i8
+
+PATTERN : VV1 0xE8  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i8 REG2=YMM_B():r:qq:i8
+}
+{
+ICLASS    : VPSUBSW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xE9  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0xE9  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+
+{
+ICLASS    : VPSUBUSB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xD8  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 MEM0:r:qq:u8
+
+PATTERN : VV1 0xD8  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 REG2=YMM_B():r:qq:u8
+}
+{
+ICLASS    : VPSUBUSW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xD9  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 MEM0:r:qq:u16
+
+PATTERN : VV1 0xD9  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 REG2=YMM_B():r:qq:u16
+}
+
+{
+ICLASS    : VPSUBB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xF8  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i8 MEM0:r:qq:i8
+
+PATTERN : VV1 0xF8  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i8 REG1=YMM_N():r:qq:i8 REG2=YMM_B():r:qq:i8
+}
+{
+ICLASS    : VPSUBW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xF9  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 MEM0:r:qq:i16
+
+PATTERN : VV1 0xF9  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16 REG1=YMM_N():r:qq:i16 REG2=YMM_B():r:qq:i16
+}
+{
+ICLASS    : VPSUBD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xFA  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 MEM0:r:qq:i32
+
+PATTERN : VV1 0xFA  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i32 REG1=YMM_N():r:qq:i32 REG2=YMM_B():r:qq:i32
+}
+{
+ICLASS    : VPSUBQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xFB  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i64 REG1=YMM_N():r:qq:i64 MEM0:r:qq:i64
+
+PATTERN : VV1 0xFB  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i64 REG1=YMM_N():r:qq:i64 REG2=YMM_B():r:qq:i64
+}
+
+{
+ICLASS    : VPUNPCKHBW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x68  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 MEM0:r:qq:u8
+
+PATTERN : VV1 0x68  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 REG2=YMM_B():r:qq:u8
+}
+{
+ICLASS    : VPUNPCKHWD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x69  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 MEM0:r:qq:u16
+
+PATTERN : VV1 0x69  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 REG2=YMM_B():r:qq:u16
+}
+{
+ICLASS    : VPUNPCKHDQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x6A  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u32 REG1=YMM_N():r:qq:u32 MEM0:r:qq:u32
+
+PATTERN : VV1 0x6A  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u32 REG1=YMM_N():r:qq:u32 REG2=YMM_B():r:qq:u32
+}
+{
+ICLASS    : VPUNPCKHQDQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x6D  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 MEM0:r:qq:u64
+
+PATTERN : VV1 0x6D  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 REG2=YMM_B():r:qq:u64
+}
+
+{
+ICLASS    : VPUNPCKLBW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x60  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 MEM0:r:qq:u8
+
+PATTERN : VV1 0x60  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 REG2=YMM_B():r:qq:u8
+}
+{
+ICLASS    : VPUNPCKLWD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x61  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 MEM0:r:qq:u16
+
+PATTERN : VV1 0x61  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 REG2=YMM_B():r:qq:u16
+}
+{
+ICLASS    : VPUNPCKLDQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x62  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u32 REG1=YMM_N():r:qq:u32 MEM0:r:qq:u32
+
+PATTERN : VV1 0x62  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u32 REG1=YMM_N():r:qq:u32 REG2=YMM_B():r:qq:u32
+}
+{
+ICLASS    : VPUNPCKLQDQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x6C  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 MEM0:r:qq:u64
+
+PATTERN : VV1 0x6C  VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_N():r:qq:u64 REG2=YMM_B():r:qq:u64
+}
+
+
+{
+ICLASS    : VPALIGNR
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x0F  VL256 V66 V0F3A  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 MEM0:r:qq:u8 IMM0:r:b
+
+PATTERN : VV1 0x0F  VL256 V66 V0F3A  MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 REG2=YMM_B():r:qq:u8 IMM0:r:b
+}
+{
+ICLASS    : VPBLENDW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x0E  VL256 V66 V0F3A  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 MEM0:r:qq:u16 IMM0:r:b
+
+PATTERN : VV1 0x0E  VL256 V66 V0F3A  MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u16 REG2=YMM_B():r:qq:u16 IMM0:r:b
+}
+{
+ICLASS    : VMPSADBW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x42  VL256 V66 V0F3A  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u8 MEM0:r:qq:u8 IMM0:r:b
+
+PATTERN : VV1 0x42  VL256 V66 V0F3A  MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_N():r:qq:u8 REG2=YMM_B():r:qq:u8 IMM0:r:b
+}
+
+
+
+{
+ICLASS    : VPOR
+CPL       : 3
+CATEGORY  : LOGICAL
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xEB  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u256 REG1=YMM_N():r:qq:u256 MEM0:r:qq:u256
+
+PATTERN : VV1 0xEB   VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u256 REG1=YMM_N():r:qq:u256 REG2=YMM_B():r:qq:u256
+}
+{
+ICLASS    : VPAND
+CPL       : 3
+CATEGORY  : LOGICAL
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xDB  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u256 REG1=YMM_N():r:qq:u256 MEM0:r:qq:u256
+
+PATTERN : VV1 0xDB   VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u256 REG1=YMM_N():r:qq:u256 REG2=YMM_B():r:qq:u256
+}
+{
+ICLASS    : VPANDN
+CPL       : 3
+CATEGORY  : LOGICAL
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xDF  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u256 REG1=YMM_N():r:qq:u256 MEM0:r:qq:u256
+
+PATTERN : VV1 0xDF   VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u256 REG1=YMM_N():r:qq:u256 REG2=YMM_B():r:qq:u256
+}
+{
+ICLASS    : VPXOR
+CPL       : 3
+CATEGORY  : LOGICAL
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0xEF  VL256 V66 V0F  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u256 REG1=YMM_N():r:qq:u256 MEM0:r:qq:u256
+
+PATTERN : VV1 0xEF   VL256 V66 V0F MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u256 REG1=YMM_N():r:qq:u256 REG2=YMM_B():r:qq:u256
+}
+
+
+
+{
+ICLASS    : VPBLENDVB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x4C   VL256 V66 V0F3A norexw_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() SE_IMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 MEM0:r:qq:u8 REG2=YMM_SE():r:qq:u8
+
+PATTERN : VV1 0x4C   VL256 V66 V0F3A norexw_prefix MOD[0b11] MOD=3 REG[rrr] RM[nnn] SE_IMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u8 REG1=YMM_N():r:qq:u8 REG2=YMM_B():r:qq:u8 REG3=YMM_SE():r:qq:u8
+}
+
+
+
+
+{
+ICLASS    : VPMOVMSKB
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-7
+PATTERN : VV1 0xD7  VL256 V66 V0F  NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPR32_R():w:d:u32   REG1=YMM_B():r:qq:i8
+}
+
+
+
+{
+ICLASS    : VPSHUFD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x70   VL256 V66 V0F NOVSR   MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u32 MEM0:r:qq:u32  IMM0:r:b
+
+PATTERN : VV1 0x70   VL256 V66 V0F NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u32 REG1=YMM_B():r:qq:u32 IMM0:r:b
+}
+{
+ICLASS    : VPSHUFHW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x70   VL256 VF3 V0F NOVSR   MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u16 MEM0:r:qq:u16  IMM0:r:b
+
+PATTERN : VV1 0x70   VL256 VF3 V0F NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_B():r:qq:u16 IMM0:r:b
+}
+{
+ICLASS    : VPSHUFLW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x70   VL256 VF2 V0F NOVSR   MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u16 MEM0:r:qq:u16  IMM0:r:b
+
+PATTERN : VV1 0x70   VL256 VF2 V0F NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u16 REG1=YMM_B():r:qq:u16 IMM0:r:b
+}
+
+
+
+{
+ICLASS    : VPSRLDQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-7
+PATTERN : VV1 0x73  VL256 V66 V0F   MOD[0b11] MOD=3 REG[0b011] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_N():w:qq:u128 REG1=YMM_B():r:qq:u128 IMM0:r:b   # NDD
+}
+{
+ICLASS    : VPSLLDQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-7
+PATTERN : VV1 0x73  VL256 V66 V0F   MOD[0b11] MOD=3 REG[0b111] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_N():w:qq:u128 REG1=YMM_B():r:qq:u128 IMM0:r:b   # NDD
+}
+
+##############################################
+
+{
+ICLASS    : VPSLLW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-7
+PATTERN : VV1 0x71   VL256  V66 V0F MOD[0b11] MOD=3 REG[0b110] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_N():w:qq:u16 REG1=YMM_B():r:qq:u16 IMM0:r:b # NDD
+}
+{
+ICLASS    : VPSLLD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-7
+PATTERN : VV1 0x72   VL256  V66 V0F MOD[0b11] MOD=3 REG[0b110] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_N():w:qq:u32 REG1=YMM_B():r:qq:u32 IMM0:r:b  #NDD
+}
+{
+ICLASS    : VPSLLQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-7
+PATTERN : VV1 0x73   VL256  V66 V0F MOD[0b11] MOD=3 REG[0b110] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_N():w:qq:u64 REG1=YMM_B():r:qq:u64 IMM0:r:b # NDD
+}
+
+{
+ICLASS    : VPSRAW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-7
+PATTERN : VV1 0x71   VL256  V66 V0F MOD[0b11] MOD=3 REG[0b100] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_N():w:qq:i16 REG1=YMM_B():r:qq:i16 IMM0:r:b # NDD
+}
+{
+ICLASS    : VPSRAD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-7
+PATTERN : VV1 0x72   VL256  V66 V0F MOD[0b11] MOD=3 REG[0b100] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_N():w:qq:i32 REG1=YMM_B():r:qq:i32 IMM0:r:b # NDD
+}
+{
+ICLASS    : VPSRLW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-7
+PATTERN : VV1 0x71   VL256  V66 V0F MOD[0b11] MOD=3 REG[0b010] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_N():w:qq:u16 REG1=YMM_B():r:qq:u16 IMM0:r:b # NDD
+}
+{
+ICLASS    : VPSRLD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-7
+
+PATTERN : VV1 0x72   VL256  V66 V0F MOD[0b11] MOD=3 REG[0b010] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_N():w:qq:u32 REG1=YMM_B():r:qq:u32 IMM0:r:b # NDD
+}
+{
+ICLASS    : VPSRLQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-7
+PATTERN : VV1 0x73   VL256  V66 V0F MOD[0b11] MOD=3 REG[0b010] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_N():w:qq:u64 REG1=YMM_B():r:qq:u64 IMM0:r:b  # NDD
+}
+
+
+
+############################################################################
+# SX versions
+############################################################################
+
+{
+ICLASS    : VPMOVSXBW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-5
+PATTERN : VV1 0x20   VL256  V66 V0F38 NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i16   REG1=XMM_B():r:dq:i8
+PATTERN : VV1 0x20   VL256  V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i16   MEM0:r:dq:i8
+}
+
+############################################################################
+{
+ICLASS    : VPMOVSXBD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-5
+PATTERN : VV1 0x21   VL256  V66 V0F38 NOVSR  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i32   REG1=XMM_B():r:q:i8
+PATTERN : VV1 0x21   VL256  V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i32   MEM0:r:q:i8
+}
+############################################################################
+{
+ICLASS    : VPMOVSXBQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-5
+PATTERN : VV1 0x22   VL256  V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i64   REG1=XMM_B():r:d:i8
+PATTERN : VV1 0x22   VL256  V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i64   MEM0:r:d:i8
+}
+############################################################################
+{
+ICLASS    : VPMOVSXWD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-5
+PATTERN : VV1 0x23   VL256  V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i32   REG1=XMM_B():r:dq:i16
+PATTERN : VV1 0x23   VL256  V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i32   MEM0:r:dq:i16
+}
+############################################################################
+{
+ICLASS    : VPMOVSXWQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-5
+PATTERN : VV1 0x24   VL256  V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i64   REG1=XMM_B():r:q:i16
+PATTERN : VV1 0x24   VL256  V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i64   MEM0:r:q:i16
+}
+############################################################################
+{
+ICLASS    : VPMOVSXDQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-5
+PATTERN : VV1 0x25   VL256  V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:i64   REG1=XMM_B():r:dq:i32
+PATTERN : VV1 0x25   VL256  V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:i64   MEM0:r:dq:i32
+}
+
+
+
+
+
+############################################################################
+# ZX versions
+############################################################################
+
+{
+ICLASS    : VPMOVZXBW
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-5
+PATTERN : VV1 0x30   VL256  V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u16   REG1=XMM_B():r:dq:u8
+PATTERN : VV1 0x30   VL256  V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u16   MEM0:r:dq:u8
+}
+
+############################################################################
+{
+ICLASS    : VPMOVZXBD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-5
+PATTERN : VV1 0x31   VL256  V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u32   REG1=XMM_B():r:q:u8
+PATTERN : VV1 0x31   VL256  V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u32   MEM0:r:q:u8
+}
+############################################################################
+{
+ICLASS    : VPMOVZXBQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-5
+PATTERN : VV1 0x32   V66  V0F38 VL256 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u64  REG1=XMM_B():r:d:u8
+PATTERN : VV1 0x32   V66  V0F38 VL256 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u64   MEM0:r:d:u8
+}
+############################################################################
+{
+ICLASS    : VPMOVZXWD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-5
+PATTERN : VV1 0x33   V66  V0F38 VL256 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u32   REG1=XMM_B():r:dq:u16
+PATTERN : VV1 0x33   V66  V0F38 VL256 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u32   MEM0:r:dq:u16
+}
+############################################################################
+{
+ICLASS    : VPMOVZXWQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-5
+PATTERN : VV1 0x34   VL256  V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u64   REG1=XMM_B():r:q:u16
+PATTERN : VV1 0x34   VL256  V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u64   MEM0:r:q:u16
+}
+############################################################################
+{
+ICLASS    : VPMOVZXDQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-5
+PATTERN : VV1 0x35   VL256  V66 V0F38 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u64   REG1=XMM_B():r:dq:u32
+PATTERN : VV1 0x35   VL256  V66 V0F38 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u64   MEM0:r:dq:u32
+}
+
+
+##################################
+# newer stuff 2009-08-14
+
+
+{
+ICLASS    : VINSERTI128
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-6
+PATTERN : VV1 0x38  VL256 V66 V0F3A W0  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u128 REG1=YMM_N():r:qq:u128 MEM0:r:dq:u128 IMM0:r:b
+
+PATTERN : VV1 0x38  VL256 V66 V0F3A W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u128 REG1=YMM_N():r:qq:u128 REG2=XMM_B():r:dq:u128 IMM0:r:b
+}
+
+
+
+
+
+{
+ICLASS    : VEXTRACTI128
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-6
+PATTERN : VV1 0x39  VL256 V66 V0F3A W0  NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : MEM0:w:dq:u128 REG0=YMM_R():r:qq:u128  IMM0:r:b
+
+PATTERN : VV1 0x39  VL256 V66 V0F3A W0 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_B():w:dq:u128 REG1=YMM_R():r:qq:u128  IMM0:r:b
+}
+
+
+###########################################################################
+
+### # VPMASKMOVD  masked load and store
+### # VPMASKMOVQ  masked load and store
+
+
+
+
+{
+ICLASS    : VPMASKMOVD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+ATTRIBUTES: maskop
+EXCEPTIONS: avx-type-6
+PATTERN : VV1 0x8C  VL128 V66 V0F38 W0  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS : REG0=XMM_R():w:dq:u32  REG1=XMM_N():r:dq:u32  MEM0:r:dq:u32
+
+
+PATTERN : VV1 0x8C  VL256 V66 V0F38 W0  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS : REG0=YMM_R():w:qq:u32  REG1=YMM_N():r:qq:u32  MEM0:r:qq:u32
+}
+{
+ICLASS    : VPMASKMOVQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+ATTRIBUTES: maskop
+EXCEPTIONS: avx-type-6
+
+PATTERN : VV1 0x8C  VL128 V66 V0F38 W1  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS : REG0=XMM_R():w:dq:u64  REG1=XMM_N():r:dq:u64  MEM0:r:dq:u64
+
+
+PATTERN : VV1 0x8C  VL256 V66 V0F38 W1  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS : REG0=YMM_R():w:qq:u64  REG1=YMM_N():r:qq:u64  MEM0:r:qq:u64
+}
+
+{
+ICLASS    : VPMASKMOVD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+ATTRIBUTES: maskop
+EXCEPTIONS: avx-type-6
+PATTERN : VV1 0x8E  VL128 V66 V0F38 W0  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS :  MEM0:w:dq:u32  REG0=XMM_N():r:dq:u32  REG1=XMM_R():r:dq:u32
+
+
+PATTERN : VV1 0x8E  VL256 V66 V0F38 W0  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS : MEM0:w:qq:u32  REG0=YMM_N():r:qq:u32  REG1=YMM_R():r:qq:u32
+}
+{
+ICLASS    : VPMASKMOVQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+ATTRIBUTES: maskop
+EXCEPTIONS: avx-type-6
+PATTERN : VV1 0x8E  VL128 V66 V0F38 W1  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS :  MEM0:w:dq:u64  REG0=XMM_N():r:dq:u64  REG1=XMM_R():r:dq:u64
+
+
+PATTERN : VV1 0x8E  VL256 V66 V0F38 W1  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS : MEM0:w:qq:u64  REG0=YMM_N():r:qq:u64  REG1=YMM_R():r:qq:u64
+}
+###########################################################################
+
+
+### # VPERM2I128 256b only
+
+{
+ICLASS    : VPERM2I128
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-6 # Note: vperm2f128 is type 4...
+
+PATTERN : VV1 0x46  VL256 V66 V0F3A W0  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u128  REG1=YMM_N():r:qq:u128  MEM0:r:qq:u128         IMM0:r:b
+
+PATTERN : VV1 0x46  VL256 V66 V0F3A W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u128  REG1=YMM_N():r:qq:u128  REG2=YMM_B():r:qq:u128 IMM0:r:b
+}
+
+
+{
+ICLASS    : VPERMQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+
+PATTERN : VV1 0x00 VL256 V0F3A V66 W1 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u64 MEM0:r:qq:u64  IMM0:r:b
+
+PATTERN : VV1 0x00 VL256 V0F3A V66 W1 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u64 REG1=YMM_B():r:qq:u64 IMM0:r:b
+}
+
+{
+ICLASS    : VPERMPD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+
+PATTERN : VV1 0x01 VL256 V0F3A V66 W1 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 MEM0:r:qq:f64  IMM0:r:b
+
+PATTERN : VV1 0x01 VL256 V0F3A V66 W1 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:f64 REG1=YMM_B():r:qq:f64 IMM0:r:b
+}
+
+
+
+
+
+
+
+
+{
+ICLASS    : VPERMD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+
+
+PATTERN : VV1 0x36  VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS : REG0=YMM_R():w:qq:u32  REG1=YMM_N():r:qq:u32  MEM0:r:qq:u32
+
+PATTERN : VV1 0x36  VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS : REG0=YMM_R():w:qq:u32  REG1=YMM_N():r:qq:u32  REG2=YMM_B():r:qq:u32
+}
+{
+ICLASS    : VPERMPS
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+
+PATTERN : VV1 0x16  VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS : REG0=YMM_R():w:qq:f32  REG1=YMM_N():r:qq:f32  MEM0:r:qq:f32
+
+PATTERN : VV1 0x16  VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS : REG0=YMM_R():w:qq:f32  REG1=YMM_N():r:qq:f32  REG2=YMM_B():r:qq:f32
+}
+
+
+###########################################################################
+
+
+### # VPBLENDD imm 128/256
+
+
+
+{
+ICLASS    : VPBLENDD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+
+PATTERN : VV1 0x02  VL128 V66 V0F3A W0  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u32  REG1=XMM_N():r:dq:u32  MEM0:r:dq:u32         IMM0:r:b
+
+PATTERN : VV1 0x02  VL128 V66 V0F3A W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=XMM_R():w:dq:u32  REG1=XMM_N():r:dq:u32  REG2=XMM_B():r:dq:u32 IMM0:r:b
+
+
+PATTERN : VV1 0x02  VL256 V66 V0F3A W0  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u32  REG1=YMM_N():r:qq:u32  MEM0:r:qq:u32         IMM0:r:b
+
+PATTERN : VV1 0x02  VL256 V66 V0F3A W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn] UIMM8()
+OPERANDS  : REG0=YMM_R():w:qq:u32  REG1=YMM_N():r:qq:u32  REG2=YMM_B():r:qq:u32 IMM0:r:b
+}
+
+
+
+###########################################################################
+
+{
+ICLASS    : VPBROADCASTB
+COMMENT : gpr 128/256
+CPL       : 3
+CATEGORY  : BROADCAST
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-6
+
+PATTERN : VV1 0x78 VL128 V66 V0F38 W0 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u8 MEM0:r:b:u8 EMX_BROADCAST_1TO16_8
+
+PATTERN : VV1 0x78 VL128 V66 V0F38 W0 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u8  REG1=XMM_B():r:b:u8 EMX_BROADCAST_1TO16_8
+
+PATTERN : VV1 0x78 VL256 V66 V0F38 W0 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u8 MEM0:r:b:u8 EMX_BROADCAST_1TO32_8
+
+PATTERN : VV1 0x78 VL256 V66 V0F38 W0 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u8  REG1=XMM_B():r:b:u8 EMX_BROADCAST_1TO32_8
+
+}
+
+
+
+
+{
+ICLASS    : VPBROADCASTW
+COMMENT : gpr 128/256
+CPL       : 3
+CATEGORY  : BROADCAST
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-6
+
+PATTERN : VV1 0x79 VL128 V66 V0F38 W0 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u16 MEM0:r:w:u16 EMX_BROADCAST_1TO8_16
+
+PATTERN : VV1 0x79 VL128 V66 V0F38 W0 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u16  REG1=XMM_B():r:w:u16  EMX_BROADCAST_1TO8_16
+
+PATTERN : VV1 0x79 VL256 V66 V0F38 W0 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u16 MEM0:r:w:u16 EMX_BROADCAST_1TO16_16
+
+PATTERN : VV1 0x79 VL256 V66 V0F38 W0 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u16  REG1=XMM_B():r:w:u16 EMX_BROADCAST_1TO16_16
+}
+
+
+
+
+### # VPBROADCASTD gpr/mem
+
+
+{
+ICLASS    : VPBROADCASTD
+COMMENT : gpr 128/256
+CPL       : 3
+CATEGORY  : BROADCAST
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-6
+
+PATTERN : VV1 0x58 VL128 V66 V0F38 W0 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u32 MEM0:r:d:u32 EMX_BROADCAST_1TO4_32
+
+PATTERN : VV1 0x58 VL128 V66 V0F38 W0 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u32  REG1=XMM_B():r:d:u32  EMX_BROADCAST_1TO4_32
+
+
+PATTERN : VV1 0x58 VL256 V66 V0F38 W0 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u32 MEM0:r:d:u32  EMX_BROADCAST_1TO8_32
+
+PATTERN : VV1 0x58 VL256 V66 V0F38 W0 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u32  REG1=XMM_B():r:d:u32 EMX_BROADCAST_1TO8_32
+}
+
+
+
+### # VPBROADCASTQ gpr/mem
+
+{
+ICLASS    : VPBROADCASTQ
+COMMENT : gpr 128/256
+CPL       : 3
+CATEGORY  : BROADCAST
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-6
+
+PATTERN : VV1 0x59 VL128 V66 V0F38 W0 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq:u64 MEM0:r:q:u64 EMX_BROADCAST_1TO2_64
+
+PATTERN : VV1 0x59 VL128 V66 V0F38 W0 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:u64  REG1=XMM_B():r:q:u64  EMX_BROADCAST_1TO2_64
+
+PATTERN : VV1 0x59 VL256 V66 V0F38 W0 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u64 MEM0:r:q:u64 EMX_BROADCAST_1TO4_64
+
+PATTERN : VV1 0x59 VL256 V66 V0F38 W0 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:u64  REG1=XMM_B():r:q:u64  EMX_BROADCAST_1TO4_64
+}
+
+
+
+
+
+
+{
+ICLASS    : VBROADCASTSS
+CPL       : 3
+CATEGORY  : BROADCAST
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-6
+COMMENT   : xmm,xmm and ymm,xmm
+PATTERN : VV1 0x18  VL128 V66 V0F38 W0 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq:f32  REG1=XMM_B():r:dq:f32 EMX_BROADCAST_1TO4_32
+
+PATTERN : VV1 0x18  VL256 V66 V0F38 W0 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f32  REG1=XMM_B():r:dq:f32 EMX_BROADCAST_1TO8_32
+}
+
+
+{
+ICLASS    : VBROADCASTSD
+CPL       : 3
+CATEGORY  : BROADCAST
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-6
+COMMENT   : ymm,xmm only
+PATTERN : VV1 0x19  VL256 V66 V0F38 W0 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq:f64  REG1=XMM_B():r:dq:f64 EMX_BROADCAST_1TO4_64
+}
+
+
+
+{
+ICLASS    : VBROADCASTI128
+CPL       : 3
+CATEGORY  : BROADCAST
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-6
+COMMENT : memonly 256  -- FIXME: make types u64 like in AVX1?
+PATTERN : VV1 0x5A VL256 V66 V0F38 W0 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq:u128  MEM0:r:dq:u128 EMX_BROADCAST_2TO4_64
+}
+
+
+###FILE: ./datafiles/avxhsw/hsw-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+INSTRUCTIONS()::
+
+{
+ICLASS    : TZCNT
+CPL       : 3
+CATEGORY  : BMI1
+EXTENSION : BMI1
+FLAGS     : MUST [ of-u sf-u zf-mod af-u pf-u cf-mod ]
+PATTERN   : 0x0F 0xBC refining_f3  TZCNT=1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPRv_R():w MEM0:r:v
+
+PATTERN   : 0x0F 0xBC refining_f3 TZCNT=1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPRv_R():w  REG1=GPRv_B():r
+}
+
+{
+ICLASS    : BSF
+VERSION   : 1
+COMMENT   : AMD reused 0FBC for TZCNT and made BSF not have a refining prefix.  This version replaces the normal version of BSF
+CPL       : 3
+CATEGORY  : BITBYTE
+EXTENSION : BASE
+ISA_SET   : I386
+FLAGS     : MUST [ of-u sf-u zf-mod af-u pf-u cf-u ]
+
+PATTERN   : 0x0F 0xBC not_refining_f3 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPRv_R():cw MEM0:r:v
+
+PATTERN   : 0x0F 0xBC not_refining_f3 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPRv_R():cw REG1=GPRv_B():r
+
+PATTERN   : 0x0F 0xBC refining_f3 TZCNT=0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPRv_R():cw MEM0:r:v
+
+PATTERN   : 0x0F 0xBC refining_f3 TZCNT=0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPRv_R():cw REG1=GPRv_B():r
+}
+
+{
+ICLASS    : INVPCID
+CPL       : 0
+CATEGORY  : MISC
+EXTENSION : INVPCID
+ISA_SET   : INVPCID
+ATTRIBUTES : RING0 NOTSX
+PATTERN   : 0x0F 0x38 0x82 osz_refining_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn]  REFINING66() mode64 MODRM() CR_WIDTH()
+OPERANDS  : REG0=GPR64_R():r MEM0:r:dq
+PATTERN   : 0x0F 0x38 0x82 osz_refining_prefix MOD[mm] MOD!=3 REG[rrr] RM[nnn]  REFINING66() not64 MODRM() CR_WIDTH()
+OPERANDS  : REG0=GPR32_R():r MEM0:r:dq
+COMMENT   :
+}
+
+
+###FILE: ./datafiles/avxhsw/hsw-lzcnt.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+INSTRUCTIONS()::
+
+# LZCNT reg16, reg/mem16 F30FBD /r
+# LZCNT reg32, reg/mem32 F30FBD /r
+# LZCNT reg64, reg/mem64 F30FBD /r
+
+{
+ICLASS    : LZCNT
+# This replace the AMD version in LZCNT builds
+VERSION   : 2
+CPL       : 3
+CATEGORY  : LZCNT
+EXTENSION : LZCNT
+COMMENT:  : These next one WAS introduced first by AMD circa SSE4a.
+FLAGS     : MUST [ cf-mod zf-mod of-u af-u pf-u sf-u ]
+PATTERN   : 0x0F 0xBD f3_refining_prefix LZCNT=1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPRv_R():w:v     MEM0:r:v
+PATTERN   : 0x0F 0xBD f3_refining_prefix LZCNT=1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPRv_R():w:v     REG1=GPRv_B():r:v
+}
+
+
+{
+ICLASS    : BSR
+VERSION   : 2
+COMMENT   : AMD reused 0FBD for LZCNT and made BSR not have a refining prefix.  This version replaces the normal version of BSR
+CPL       : 3
+CATEGORY  : BITBYTE
+EXTENSION : BASE
+ISA_SET   : I386
+FLAGS     : MUST [ of-u sf-u zf-mod af-u pf-u cf-u ]
+PATTERN   : 0x0F 0xBD not_refining_f3 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPRv_R():cw MEM0:r:v
+
+PATTERN   : 0x0F 0xBD not_refining_f3 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPRv_R():cw REG1=GPRv_B():r
+
+PATTERN   : 0x0F 0xBD  refining_f3 LZCNT=0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=GPRv_R():cw MEM0:r:v
+
+PATTERN   : 0x0F 0xBD  refining_f3 LZCNT=0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=GPRv_R():cw REG1=GPRv_B():r
+}
+
+
+###FILE: ./datafiles/avxhsw/hsw-vex-gpr-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+
+AVX_INSTRUCTIONS()::
+
+{
+ICLASS    : PDEP
+CPL       : 3
+CATEGORY  : BMI2
+EXTENSION : BMI2
+
+#32b
+PATTERN   : VV1 0xF5 V0F38 VF2 not64 VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():rw:d REG1=VGPR32_N():r:d MEM0:r:d
+
+PATTERN   : VV1 0xF5 V0F38 VF2 W0 mode64 VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():rw:d REG1=VGPR32_N():r:d MEM0:r:d
+
+PATTERN   : VV1 0xF5 V0F38 VF2 not64 VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():rw:d REG1=VGPR32_N():r:d REG2=VGPR32_B():r:d
+
+PATTERN   : VV1 0xF5 V0F38 VF2 W0 mode64 VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():rw:d REG1=VGPR32_N():r:d REG2=VGPR32_B():r:d
+
+# 64b
+PATTERN   : VV1 0xF5 V0F38 VF2 W1 VL128 mode64 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR64_R():rw:q REG1=VGPR64_N():r:q MEM0:r:q
+
+PATTERN   : VV1 0xF5 V0F38 VF2 W1 VL128 mode64 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR64_R():rw:q REG1=VGPR64_N():r:q REG2=VGPR64_B():r:q
+}
+
+{
+ICLASS    : PEXT
+CPL       : 3
+CATEGORY  : BMI2
+EXTENSION : BMI2
+
+
+#32b
+PATTERN   : VV1 0xF5 V0F38 VF3 not64 VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():rw:d REG1=VGPR32_N():r:d MEM0:r:d
+
+PATTERN   : VV1 0xF5 V0F38 VF3 W0 mode64 VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():rw:d REG1=VGPR32_N():r:d MEM0:r:d
+
+PATTERN   : VV1 0xF5 V0F38 VF3 not64 VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():rw:d REG1=VGPR32_N():r:d REG2=VGPR32_B():r:d
+
+PATTERN   : VV1 0xF5 V0F38 VF3 W0 mode64 VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():rw:d REG1=VGPR32_N():r:d REG2=VGPR32_B():r:d
+
+# 64b
+PATTERN   : VV1 0xF5 V0F38 VF3 W1 VL128 mode64 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR64_R():rw:q REG1=VGPR64_N():r:q MEM0:r:q
+
+PATTERN   : VV1 0xF5 V0F38 VF3 W1 VL128 mode64 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR64_R():rw:q REG1=VGPR64_N():r:q REG2=VGPR64_B():r:q
+}
+
+
+{
+ICLASS    : ANDN
+CPL       : 3
+CATEGORY  : BMI1
+EXTENSION : BMI1
+FLAGS     : MUST [ of-0 sf-mod zf-mod af-u pf-u cf-0 ]
+
+# 32b
+PATTERN   : VV1 0xF2 V0F38 VNP  not64 VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():rw:d REG1=VGPR32_N():r:d MEM0:r:d
+
+PATTERN   : VV1 0xF2 V0F38 VNP  W0 mode64 VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():rw:d REG1=VGPR32_N():r:d MEM0:r:d
+
+PATTERN   : VV1 0xF2 V0F38 VNP  not64 VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():rw:d REG1=VGPR32_N():r:d REG2=VGPR32_B():r:d
+
+PATTERN   : VV1 0xF2 V0F38 VNP  W0 mode64 VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():rw:d REG1=VGPR32_N():r:d REG2=VGPR32_B():r:d
+
+# 64b
+PATTERN   : VV1 0xF2 V0F38 VNP W1 VL128  mode64 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR64_R():rw:q REG1=VGPR64_N():r:q MEM0:r:q
+
+PATTERN   : VV1 0xF2 V0F38 VNP W1 VL128  mode64 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR64_R():rw:q REG1=VGPR64_N():r:q REG2=VGPR64_B():r:q
+}
+
+{
+ICLASS    : BLSR
+CPL       : 3
+CATEGORY  : BMI1
+EXTENSION : BMI1
+FLAGS     : MUST [ of-0 sf-mod zf-mod af-u pf-u cf-mod ]
+
+# 32b
+PATTERN   : VV1 0xF3 V0F38 VNP not64 VL128  MOD[mm] MOD!=3 REG[0b001] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_N():w:d MEM0:r:d
+
+PATTERN   : VV1 0xF3 V0F38 VNP W0 mode64 VL128  MOD[mm] MOD!=3 REG[0b001] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_N():w:d MEM0:r:d
+
+PATTERN   : VV1 0xF3 V0F38 VNP not64 VL128  MOD[0b11] MOD=3 REG[0b001] RM[nnn]
+OPERANDS  : REG0=VGPR32_N():w:d  REG1=VGPR32_B():r:d
+
+PATTERN   : VV1 0xF3 V0F38 VNP W0 mode64 VL128  MOD[0b11] MOD=3 REG[0b001] RM[nnn]
+OPERANDS  : REG0=VGPR32_N():w:d  REG1=VGPR32_B():r:d
+
+# 64b
+PATTERN   : VV1 0xF3 V0F38 VNP W1 VL128 mode64 MOD[mm] MOD!=3 REG[0b001] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR64_N():w:q MEM0:r:q
+
+PATTERN   : VV1 0xF3 V0F38 VNP W1 VL128 mode64 MOD[0b11] MOD=3 REG[0b001] RM[nnn]
+OPERANDS  : REG0=VGPR64_N():w:q  REG1=VGPR64_B():r:q
+
+}
+
+{
+ICLASS    : BLSMSK
+CPL       : 3
+CATEGORY  : BMI1
+EXTENSION : BMI1
+FLAGS     : MUST [ of-0 sf-mod zf-0 af-u pf-u cf-mod ]
+
+#32b
+PATTERN   : VV1 0xF3 V0F38 VNP not64 VL128 MOD[mm] MOD!=3 REG[0b010] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_N():w:d MEM0:r:d
+
+PATTERN   : VV1 0xF3 V0F38 VNP W0 mode64 VL128 MOD[mm] MOD!=3 REG[0b010] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_N():w:d MEM0:r:d
+
+PATTERN   : VV1 0xF3 V0F38 VNP not64 VL128 MOD[0b11] MOD=3 REG[0b010] RM[nnn]
+OPERANDS  : REG0=VGPR32_N():w:d  REG1=VGPR32_B():r:d
+
+PATTERN   : VV1 0xF3 V0F38 VNP W0 mode64 VL128 MOD[0b11] MOD=3 REG[0b010] RM[nnn]
+OPERANDS  : REG0=VGPR32_N():w:d  REG1=VGPR32_B():r:d
+
+#64b
+PATTERN   : VV1 0xF3 V0F38 VNP W1 VL128 mode64 MOD[mm] MOD!=3 REG[0b010] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR64_N():w:q MEM0:r:q
+
+PATTERN   : VV1 0xF3 V0F38 VNP W1 VL128 mode64 MOD[0b11] MOD=3 REG[0b010] RM[nnn]
+OPERANDS  : REG0=VGPR64_N():w:q  REG1=VGPR64_B():r:q
+}
+
+{
+ICLASS    : BLSI
+CPL       : 3
+CATEGORY  : BMI1
+EXTENSION : BMI1
+FLAGS     : MUST [ of-0 sf-mod zf-mod af-u pf-u cf-0 ]
+
+# 32b
+PATTERN   : VV1 0xF3 V0F38 VNP not64 VL128 MOD[mm] MOD!=3 REG[0b011] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_N():w:d MEM0:r:d
+
+PATTERN   : VV1 0xF3 V0F38 VNP W0 mode64 VL128 MOD[mm] MOD!=3 REG[0b011] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_N():w:d MEM0:r:d
+
+PATTERN   : VV1 0xF3 V0F38 VNP not64 VL128 MOD[0b11] MOD=3 REG[0b011] RM[nnn]
+OPERANDS  : REG0=VGPR32_N():w:d  REG1=VGPR32_B():r:d
+
+PATTERN   : VV1 0xF3 V0F38 VNP W0 mode64 VL128 MOD[0b11] MOD=3 REG[0b011] RM[nnn]
+OPERANDS  : REG0=VGPR32_N():w:d  REG1=VGPR32_B():r:d
+
+# 64b
+PATTERN   : VV1 0xF3 V0F38 VNP W1 VL128 mode64 MOD[mm] MOD!=3 REG[0b011] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR64_N():w:q MEM0:r:q
+
+PATTERN   : VV1 0xF3 V0F38 VNP W1 VL128 mode64 MOD[0b11] MOD=3 REG[0b011] RM[nnn]
+OPERANDS  : REG0=VGPR64_N():w:q  REG1=VGPR64_B():r:q
+}
+
+{
+ICLASS    : BZHI
+CPL       : 3
+CATEGORY  : BMI2
+EXTENSION : BMI2
+FLAGS     : MUST [ of-0 sf-mod zf-mod af-u pf-u cf-mod ]
+
+# 32b
+PATTERN   : VV1 0xF5 V0F38 VNP not64 VL128  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():w:d MEM0:r:d REG1=VGPR32_N():r:d
+
+PATTERN   : VV1 0xF5 V0F38 VNP W0 mode64 VL128  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():w:d MEM0:r:d REG1=VGPR32_N():r:d
+
+PATTERN   : VV1 0xF5 V0F38 VNP not64 VL128  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():w:d REG1=VGPR32_B():r:d REG2=VGPR32_N():r:d
+
+PATTERN   : VV1 0xF5 V0F38 VNP W0 mode64 VL128  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():w:d REG1=VGPR32_B():r:d REG2=VGPR32_N():r:d
+
+# 64b
+PATTERN   : VV1 0xF5 V0F38 VNP W1 VL128 mode64  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR64_R():w:q  MEM0:r:q REG1=VGPR64_N():r:q
+
+PATTERN   : VV1 0xF5 V0F38 VNP W1 VL128 mode64 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR64_R():w:q REG1=VGPR64_B():r:q REG2=VGPR64_N():r:q
+}
+
+{
+ICLASS    : BEXTR
+CPL       : 3
+CATEGORY  : BMI1
+EXTENSION : BMI1
+FLAGS     : MUST [ of-u sf-u zf-mod af-u pf-u cf-u ]
+
+# 32b
+PATTERN   : VV1 0xF7 V0F38 VNP not64 VL128  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():w:d MEM0:r:d REG1=VGPR32_N():r:d
+
+PATTERN   : VV1 0xF7 V0F38 VNP W0 mode64 VL128  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():w:d MEM0:r:d REG1=VGPR32_N():r:d
+
+PATTERN   : VV1 0xF7 V0F38 VNP not64 VL128  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():w:d REG1=VGPR32_B():r:d REG2=VGPR32_N():r:d
+
+PATTERN   : VV1 0xF7 V0F38 VNP W0 mode64 VL128  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():w:d REG1=VGPR32_B():r:d REG2=VGPR32_N():r:d
+
+# 64b
+PATTERN   : VV1 0xF7 V0F38 VNP W1 VL128 mode64  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR64_R():w:q  MEM0:r:q REG1=VGPR64_N():r:q
+
+PATTERN   : VV1 0xF7 V0F38 VNP W1 VL128 mode64 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR64_R():w:q REG1=VGPR64_B():r:q REG2=VGPR64_N():r:q
+}
+
+
+
+{
+ICLASS    : SHLX
+CPL       : 3
+CATEGORY  : BMI2
+EXTENSION : BMI2
+
+# 32b
+PATTERN   : VV1 0xF7 V0F38 V66 not64 VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():w:d MEM0:r:d REG1=VGPR32_N():r:d
+
+PATTERN   : VV1 0xF7 V0F38 V66 W0 mode64 VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():w:d MEM0:r:d REG1=VGPR32_N():r:d
+
+PATTERN   : VV1 0xF7 V0F38 V66 not64 VL128  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():w:d REG1=VGPR32_B():r:d REG2=VGPR32_N():r:d
+
+PATTERN   : VV1 0xF7 V0F38 V66 W0 mode64 VL128  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():w:d REG1=VGPR32_B():r:d REG2=VGPR32_N():r:d
+
+# 64b
+PATTERN   : VV1 0xF7 V0F38 V66  W1 VL128 mode64  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR64_R():w:q MEM0:r:q REG1=VGPR64_N():r:q
+
+PATTERN   : VV1 0xF7 V0F38 V66  W1 VL128 mode64 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR64_R():w:q REG1=VGPR64_B():r:q  REG2=VGPR64_N():r:q
+}
+{
+ICLASS    : SARX
+CPL       : 3
+CATEGORY  : BMI2
+EXTENSION : BMI2
+
+# 32b
+PATTERN   : VV1 0xF7 V0F38 VF3 not64 VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():w:d MEM0:r:d REG1=VGPR32_N():r:d
+
+PATTERN   : VV1 0xF7 V0F38 VF3 W0 mode64 VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():w:d MEM0:r:d REG1=VGPR32_N():r:d
+
+PATTERN   : VV1 0xF7 V0F38 VF3 not64 VL128  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():w:d REG1=VGPR32_B():r:d REG2=VGPR32_N():r:d
+
+PATTERN   : VV1 0xF7 V0F38 VF3 W0 mode64 VL128  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():w:d REG1=VGPR32_B():r:d REG2=VGPR32_N():r:d
+
+# 64b
+PATTERN   : VV1 0xF7 V0F38 VF3  W1 VL128 mode64  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR64_R():w:q MEM0:r:q REG1=VGPR64_N():r:q
+
+PATTERN   : VV1 0xF7 V0F38 VF3  W1 VL128 mode64 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR64_R():w:q REG1=VGPR64_B():r:q  REG2=VGPR64_N():r:q
+}
+{
+ICLASS    : SHRX
+CPL       : 3
+CATEGORY  : BMI2
+EXTENSION : BMI2
+
+# 32b
+PATTERN   : VV1 0xF7 V0F38 VF2 not64 VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():w:d MEM0:r:d REG1=VGPR32_N():r:d
+
+PATTERN   : VV1 0xF7 V0F38 VF2 W0 mode64 VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():w:d MEM0:r:d REG1=VGPR32_N():r:d
+
+PATTERN   : VV1 0xF7 V0F38 VF2 not64 VL128  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():w:d REG1=VGPR32_B():r:d REG2=VGPR32_N():r:d
+
+PATTERN   : VV1 0xF7 V0F38 VF2 W0 mode64 VL128  MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():w:d REG1=VGPR32_B():r:d REG2=VGPR32_N():r:d
+
+# 64b
+PATTERN   : VV1 0xF7 V0F38 VF2  W1 VL128 mode64  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR64_R():w:q MEM0:r:q REG1=VGPR64_N():r:q
+
+PATTERN   : VV1 0xF7 V0F38 VF2  W1 VL128 mode64 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR64_R():w:q REG1=VGPR64_B():r:q  REG2=VGPR64_N():r:q
+}
+
+
+
+{
+ICLASS    : MULX
+CPL       : 3
+CATEGORY  : BMI2
+EXTENSION : BMI2
+
+# reg:w vvvv:w rm:r rdx:r
+# 32b
+PATTERN   : VV1 0xF6 VF2 V0F38 not64 VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():w:d REG1=VGPR32_N():w:d REG2=VGPR32_B():r:d REG3=XED_REG_EDX:r:SUPP
+
+PATTERN   : VV1 0xF6 VF2 V0F38 W0 mode64 VL128 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR32_R():w:d REG1=VGPR32_N():w:d REG2=VGPR32_B():r:d REG3=XED_REG_EDX:r:SUPP
+PATTERN   : VV1 0xF6 VF2 V0F38 not64 VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():w:d REG1=VGPR32_N():w:d MEM0:r:d  REG2=XED_REG_EDX:r:SUPP
+
+PATTERN   : VV1 0xF6 VF2 V0F38 W0 mode64 VL128 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR32_R():w:d REG1=VGPR32_N():w:d MEM0:r:d  REG2=XED_REG_EDX:r:SUPP
+
+# 64b
+PATTERN   : VV1 0xF6 VF2 V0F38 W1 VL128 mode64 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=VGPR64_R():w:q REG1=VGPR64_N():w:q REG2=VGPR64_B():r:q REG3=XED_REG_RDX:r:SUPP
+PATTERN   : VV1 0xF6 VF2 V0F38 W1 VL128 mode64  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=VGPR64_R():w:q REG1=VGPR64_N():w:q MEM0:r:q REG2=XED_REG_RDX:r:SUPP
+}
+
+{
+ICLASS    : RORX
+CPL       : 3
+CATEGORY  : BMI2
+EXTENSION : BMI2
+
+# reg(w) rm(r) / vvvv must be 1111. / 2010-01-08 CART change
+
+# 32b
+PATTERN   : VV1 0xF0 VF2 V0F3A not64 VL128 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]  UIMM8()
+OPERANDS  : REG0=VGPR32_R():w:d REG1=VGPR32_B():r:d IMM0:r:b
+
+PATTERN   : VV1 0xF0 VF2 V0F3A W0 mode64 VL128 NOVSR MOD[0b11] MOD=3 REG[rrr] RM[nnn]  UIMM8()
+OPERANDS  : REG0=VGPR32_R():w:d REG1=VGPR32_B():r:d IMM0:r:b
+PATTERN   : VV1 0xF0 VF2 V0F3A not64 VL128 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM() UIMM8()
+OPERANDS  : REG0=VGPR32_R():w:d MEM0:r:d IMM0:r:b
+
+PATTERN   : VV1 0xF0 VF2 V0F3A W0 mode64 VL128 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM() UIMM8()
+OPERANDS  : REG0=VGPR32_R():w:d MEM0:r:d IMM0:r:b
+
+# 64b
+PATTERN   : VV1 0xF0 VF2 V0F3A W1 VL128 NOVSR mode64 MOD[0b11] MOD=3 REG[rrr] RM[nnn]  UIMM8()
+OPERANDS  : REG0=VGPR64_R():w:q REG1=VGPR64_B():r:q IMM0:r:b
+PATTERN   : VV1 0xF0 VF2 V0F3A W1 VL128 NOVSR mode64 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM() UIMM8()
+OPERANDS  : REG0=VGPR64_R():w:q MEM0:r:q IMM0:r:b
+}
+
+
+###FILE: ./datafiles/avxhsw/hsw-vshift-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+AVX_INSTRUCTIONS()::
+
+
+
+
+{
+ICLASS    : VPSLLVD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x47  VL128 V0F38 V66  W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq MEM0:r:dq
+
+PATTERN : VV1 0x47  VL128 V0F38 V66 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq REG2=XMM_B():r:dq
+
+PATTERN : VV1 0x47  VL256 V0F38 V66  W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq REG1=YMM_N():r:qq MEM0:r:qq
+
+PATTERN : VV1 0x47  VL256 V0F38 V66 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq REG1=YMM_N():r:qq REG2=YMM_B():r:qq
+
+}
+{
+ICLASS    : VPSLLVQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x47  VL128 V0F38 V66  W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq MEM0:r:dq
+
+PATTERN : VV1 0x47  VL128 V0F38 V66 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq REG2=XMM_B():r:dq
+
+PATTERN : VV1 0x47  VL256 V0F38 V66  W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq REG1=YMM_N():r:qq MEM0:r:qq
+
+PATTERN : VV1 0x47  VL256 V0F38 V66 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq REG1=YMM_N():r:qq REG2=YMM_B():r:qq
+
+}
+
+{
+ICLASS    : VPSRLVD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x45  VL128 V0F38 V66  W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq MEM0:r:dq
+
+PATTERN : VV1 0x45  VL128 V0F38 V66 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq REG2=XMM_B():r:dq
+
+PATTERN : VV1 0x45  VL256 V0F38 V66  W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq REG1=YMM_N():r:qq MEM0:r:qq
+
+PATTERN : VV1 0x45  VL256 V0F38 V66 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq REG1=YMM_N():r:qq REG2=YMM_B():r:qq
+
+}
+{
+ICLASS    : VPSRLVQ
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x45  VL128 V0F38 V66  W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq MEM0:r:dq
+
+PATTERN : VV1 0x45  VL128 V0F38 V66 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq REG2=XMM_B():r:dq
+
+PATTERN : VV1 0x45  VL256 V0F38 V66  W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq REG1=YMM_N():r:qq MEM0:r:qq
+
+PATTERN : VV1 0x45  VL256 V0F38 V66 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq REG1=YMM_N():r:qq REG2=YMM_B():r:qq
+
+}
+
+{
+ICLASS    : VPSRAVD
+CPL       : 3
+CATEGORY  : AVX2
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-4
+PATTERN : VV1 0x46  VL128 V0F38 V66  W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq MEM0:r:dq
+
+PATTERN : VV1 0x46  VL128 V0F38 V66 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():w:dq REG1=XMM_N():r:dq REG2=XMM_B():r:dq
+
+PATTERN : VV1 0x46  VL256 V0F38 V66  W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():w:qq REG1=YMM_N():r:qq MEM0:r:qq
+
+PATTERN : VV1 0x46  VL256 V0F38 V66 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():w:qq REG1=YMM_N():r:qq REG2=YMM_B():r:qq
+
+}
+
+
+
+
+###FILE: ./datafiles/avxhsw/movnt-load-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+AVX_INSTRUCTIONS()::
+
+
+{
+ICLASS    : VMOVNTDQA
+CPL       : 3
+CATEGORY  : DATAXFER
+EXTENSION : AVX2
+EXCEPTIONS: avx-type-1
+ATTRIBUTES :  REQUIRES_ALIGNMENT NOTSX NONTEMPORAL
+
+PATTERN : VV1 0x2A  V66 V0F38 VL256 NOVSR MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  :  REG0=YMM_R():w:qq MEM0:r:qq
+}
+
+
+
+
+
+###FILE: ./datafiles/avxhsw/vmfunc-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+INSTRUCTIONS()::
+
+{
+ICLASS    : VMFUNC
+CPL       : 3
+CATEGORY  : VTX
+EXTENSION : VMFUNC
+ISA_SET   : VMFUNC
+ATTRIBUTES :
+PATTERN   : 0x0F 0x01 MOD[0b11] MOD=3 REG[0b010] RM[0b100] no_refining_prefix
+OPERANDS  : REG0=XED_REG_EAX:r:SUPP
+}
+
+
+###FILE: ./datafiles/avxhsw/rtm.xed
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2017 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+INSTRUCTIONS()::
+
+{
+ICLASS    : XBEGIN
+CPL       : 3
+CATEGORY  : COND_BR
+EXTENSION : RTM
+COMMENT   : Not always a branch. If aborts, then branches & eax is written
+
+PATTERN   : 0xC7 MOD[0b11] MOD=3 REG[0b111] RM[0b000] BRDISPz()
+OPERANDS  : RELBR:r:z REG0=rIP():rw:SUPP REG1=XED_REG_EAX:cw:SUPP
+}
+
+{
+ICLASS    : XEND
+CPL       : 3
+CATEGORY  : COND_BR
+EXTENSION : RTM
+COMMENT   : Transaction end. may branch
+PATTERN   : 0x0F 0x01 MOD[0b11] MOD=3 REG[0b010] RM[0b101]  no_refining_prefix
+OPERANDS  :
+}
+
+{
+ICLASS    : XABORT
+CPL       : 3
+CATEGORY  : UNCOND_BR
+EXTENSION : RTM
+COMMENT   : Transaction abort. Branches. NOP outside of transaction; Thus eax is rcw.
+PATTERN   : 0xC6 MOD[0b11] MOD=3 REG[0b111] RM[0b000]  UIMM8()
+OPERANDS  : REG0=XED_REG_EAX:rcw:SUPP IMM0:r:b
+}
+
+
+{
+ICLASS    : XTEST
+CPL       : 3
+CATEGORY  : LOGICAL
+EXTENSION : RTM
+COMMENT   : test if in RTM transaction mode
+FLAGS     : MUST [ of-0 sf-0 zf-mod af-0 pf-0 cf-0 ]
+PATTERN   : 0x0F 0x01 MOD[0b11] MOD=3 REG[0b010] RM[0b110]  no_refining_prefix
+OPERANDS  :
+}
+
+
+###FILE: ./datafiles/avx/avx-fma-isa.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+AVX_INSTRUCTIONS()::
+
+# Issues: encoder is at a loss for vmaddps xmm0,xmm0,xmm0,xmm0.
+# Encoder must enforce equality between two parameters. Never had to do this before.
+#   Extra check?
+# Decoder must rip off suffixes _DDMR, _DDRM, _DRMD  in disassembly (eventually)
+#############################################################################################
+# Operand orders:
+#             A  =  B   *  C     +  D
+#Type 1)   reg0  reg0  mem/reg1  reg2          DDMR  312 or 132
+#Type 2)   reg0  reg0  reg1      mem/reg2      DDRM  123 or 213
+#Type 3)   reg0  reg1  mem/reg2  reg0          DRMD  321 or 231
+
+# dst is in MODRM.REG
+# regsrc is in VEX.vvvv
+# memop is in MODRM.RM
+############################################################################################
+
+
+
+
+
+
+
+
+
+
+
+
+##########################################################
+
+
+
+
+
+
+
+
+
+
+
+
+##################################################################
+
+
+
+
+
+
+
+
+
+
+
+
+
+##################################################################
+{
+ICLASS    : VFMADD132PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0x98 VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0x98 VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0x98 VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0x98 VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+{
+ICLASS    : VFMADD132PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0x98 VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0x98 VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+
+# R/M 256
+PATTERN : VV1 0x98 VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0x98 VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VFMADD132SD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0x99 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 MEM0:r:q:f64
+# R/R 128
+PATTERN : VV1 0x99 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 REG2=XMM_B():r:q:f64
+}
+{
+ICLASS    : VFMADD132SS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0x99  V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 MEM0:r:d:f32
+# R/R 128
+PATTERN : VV1 0x99  V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 REG2=XMM_B():r:d:f32
+
+}
+
+{
+ICLASS    : VFMADD213PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xA8 VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64    MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0xA8 VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0xA8 VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64    MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0xA8 VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+{
+ICLASS    : VFMADD213PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xA8 VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0xA8 VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+
+# R/M 256
+PATTERN : VV1 0xA8 VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0xA8 VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VFMADD213SD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0xA9  V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64     MEM0:r:q:f64
+# R/R 128
+PATTERN : VV1 0xA9  V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 REG2=XMM_B():r:q:f64
+
+}
+{
+ICLASS    : VFMADD213SS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0xA9  V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32     MEM0:r:d:f32
+# R/R 128
+PATTERN : VV1 0xA9  V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 REG2=XMM_B():r:d:f32
+}
+
+{
+ICLASS    : VFMADD231PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xB8 VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0xB8 VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0xB8 VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0xB8 VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+
+}
+{
+ICLASS    : VFMADD231PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xB8 VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0xB8 VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+# R/M 256
+PATTERN : VV1 0xB8 VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0xB8 VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+
+}
+{
+ICLASS    : VFMADD231SD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0xB9 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 MEM0:r:q:f64
+# R/R 128
+PATTERN : VV1 0xB9 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 REG2=XMM_B():r:q:f64
+
+}
+{
+ICLASS    : VFMADD231SS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0xB9 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 MEM0:r:d:f32
+# R/R 128
+PATTERN : VV1 0xB9 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 REG2=XMM_B():r:d:f32
+
+}
+
+
+###################################################
+{
+ICLASS    : VFMADDSUB132PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0x96 VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0x96 VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0x96 VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0x96 VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+{
+ICLASS    : VFMADDSUB213PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xA6 VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64    MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0xA6 VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0xA6 VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64    MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0xA6 VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+{
+ICLASS    : VFMADDSUB231PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xB6 VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0xB6 VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0xB6 VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0xB6 VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+
+}
+
+{
+ICLASS    : VFMADDSUB132PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0x96 VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0x96 VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+
+# R/M 256
+PATTERN : VV1 0x96 VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0x96 VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VFMADDSUB213PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xA6 VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0xA6 VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+
+# R/M 256
+PATTERN : VV1 0xA6 VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0xA6 VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VFMADDSUB231PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xB6 VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0xB6 VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+# R/M 256
+PATTERN : VV1 0xB6 VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0xB6 VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+
+}
+###################################################
+
+{
+ICLASS    : VFMSUBADD132PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0x97 VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0x97 VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0x97 VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0x97 VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+{
+ICLASS    : VFMSUBADD213PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xA7 VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64    MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0xA7 VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0xA7 VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64    MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0xA7 VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+{
+ICLASS    : VFMSUBADD231PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xB7 VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0xB7 VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0xB7 VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0xB7 VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+
+}
+
+{
+ICLASS    : VFMSUBADD132PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0x97 VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0x97 VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+
+# R/M 256
+PATTERN : VV1 0x97 VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0x97 VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VFMSUBADD213PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xA7 VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0xA7 VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+
+# R/M 256
+PATTERN : VV1 0xA7 VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0xA7 VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VFMSUBADD231PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xB7 VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0xB7 VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+# R/M 256
+PATTERN : VV1 0xB7 VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0xB7 VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+
+}
+
+
+###################################################
+
+{
+ICLASS    : VFMSUB132PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0x9A VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0x9A VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0x9A VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0x9A VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+{
+ICLASS    : VFMSUB132PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0x9A VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0x9A VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+
+# R/M 256
+PATTERN : VV1 0x9A VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0x9A VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VFMSUB132SD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0x9B V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 MEM0:r:q:f64
+# R/R 128
+PATTERN : VV1 0x9B V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 REG2=XMM_B():r:q:f64
+}
+{
+ICLASS    : VFMSUB132SS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0x9B  V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 MEM0:r:d:f32
+# R/R 128
+PATTERN : VV1 0x9B  V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 REG2=XMM_B():r:d:f32
+
+}
+
+{
+ICLASS    : VFMSUB213PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xAA VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64    MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0xAA VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0xAA VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64    MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0xAA VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+{
+ICLASS    : VFMSUB213PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xAA VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0xAA VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+
+# R/M 256
+PATTERN : VV1 0xAA VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0xAA VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VFMSUB213SD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0xAB  V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64     MEM0:r:q:f64
+# R/R 128
+PATTERN : VV1 0xAB  V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 REG2=XMM_B():r:q:f64
+
+}
+{
+ICLASS    : VFMSUB213SS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0xAB  V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32     MEM0:r:d:f32
+# R/R 128
+PATTERN : VV1 0xAB  V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 REG2=XMM_B():r:d:f32
+}
+
+{
+ICLASS    : VFMSUB231PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xBA VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0xBA VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0xBA VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0xBA VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+
+}
+{
+ICLASS    : VFMSUB231PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xBA VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0xBA VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+# R/M 256
+PATTERN : VV1 0xBA VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0xBA VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+
+}
+{
+ICLASS    : VFMSUB231SD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0xBB V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 MEM0:r:q:f64
+# R/R 128
+PATTERN : VV1 0xBB V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 REG2=XMM_B():r:q:f64
+
+}
+{
+ICLASS    : VFMSUB231SS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0xBB V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 MEM0:r:d:f32
+# R/R 128
+PATTERN : VV1 0xBB V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 REG2=XMM_B():r:d:f32
+
+}
+
+###################################################
+
+
+{
+ICLASS    : VFNMADD132PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0x9C VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0x9C VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0x9C VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0x9C VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+{
+ICLASS    : VFNMADD132PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0x9C VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0x9C VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+
+# R/M 256
+PATTERN : VV1 0x9C VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0x9C VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VFNMADD132SD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0x9D V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 MEM0:r:q:f64
+# R/R 128
+PATTERN : VV1 0x9D V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 REG2=XMM_B():r:q:f64
+}
+{
+ICLASS    : VFNMADD132SS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0x9D  V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 MEM0:r:d:f32
+# R/R 128
+PATTERN : VV1 0x9D  V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 REG2=XMM_B():r:d:f32
+
+}
+
+{
+ICLASS    : VFNMADD213PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xAC VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64    MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0xAC VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0xAC VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64    MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0xAC VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+{
+ICLASS    : VFNMADD213PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xAC VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0xAC VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+
+# R/M 256
+PATTERN : VV1 0xAC VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0xAC VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VFNMADD213SD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0xAD  V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64     MEM0:r:q:f64
+# R/R 128
+PATTERN : VV1 0xAD  V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 REG2=XMM_B():r:q:f64
+
+}
+{
+ICLASS    : VFNMADD213SS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0xAD  V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32     MEM0:r:d:f32
+# R/R 128
+PATTERN : VV1 0xAD  V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 REG2=XMM_B():r:d:f32
+}
+
+{
+ICLASS    : VFNMADD231PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xBC VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0xBC VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0xBC VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0xBC VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+
+}
+{
+ICLASS    : VFNMADD231PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xBC VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0xBC VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+# R/M 256
+PATTERN : VV1 0xBC VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0xBC VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+
+}
+{
+ICLASS    : VFNMADD231SD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0xBD V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 MEM0:r:q:f64
+# R/R 128
+PATTERN : VV1 0xBD V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 REG2=XMM_B():r:q:f64
+
+}
+{
+ICLASS    : VFNMADD231SS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0xBD V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 MEM0:r:d:f32
+# R/R 128
+PATTERN : VV1 0xBD V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 REG2=XMM_B():r:d:f32
+
+}
+
+###################################################
+
+
+{
+ICLASS    : VFNMSUB132PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0x9E VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0x9E VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0x9E VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0x9E VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+{
+ICLASS    : VFNMSUB132PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0x9E VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0x9E VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+
+# R/M 256
+PATTERN : VV1 0x9E VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0x9E VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VFNMSUB132SD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0x9F V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 MEM0:r:q:f64
+# R/R 128
+PATTERN : VV1 0x9F V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 REG2=XMM_B():r:q:f64
+}
+{
+ICLASS    : VFNMSUB132SS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0x9F  V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 MEM0:r:d:f32
+# R/R 128
+PATTERN : VV1 0x9F  V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 REG2=XMM_B():r:d:f32
+
+}
+
+{
+ICLASS    : VFNMSUB213PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xAE VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64    MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0xAE VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0xAE VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64    MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0xAE VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+}
+{
+ICLASS    : VFNMSUB213PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xAE VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0xAE VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+
+# R/M 256
+PATTERN : VV1 0xAE VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0xAE VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+}
+{
+ICLASS    : VFNMSUB213SD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0xAF  V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64     MEM0:r:q:f64
+# R/R 128
+PATTERN : VV1 0xAF  V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 REG2=XMM_B():r:q:f64
+
+}
+{
+ICLASS    : VFNMSUB213SS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0xAF  V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32     MEM0:r:d:f32
+# R/R 128
+PATTERN : VV1 0xAF  V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 REG2=XMM_B():r:d:f32
+}
+
+{
+ICLASS    : VFNMSUB231PD
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xBE VL128 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 MEM0:r:dq:f64
+# R/R 128
+PATTERN : VV1 0xBE VL128 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:dq:f64 REG2=XMM_B():r:dq:f64
+
+
+# R/M 256
+PATTERN : VV1 0xBE VL256 V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 MEM0:r:qq:f64
+# R/R 256
+PATTERN : VV1 0xBE VL256 V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f64 REG1=YMM_N():r:qq:f64 REG2=YMM_B():r:qq:f64
+
+}
+{
+ICLASS    : VFNMSUB231PS
+EXCEPTIONS: avx-type-2
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR
+# R/M 128
+PATTERN : VV1 0xBE VL128 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 MEM0:r:dq:f32
+# R/R 128
+PATTERN : VV1 0xBE VL128 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:dq:f32 REG2=XMM_B():r:dq:f32
+
+# R/M 256
+PATTERN : VV1 0xBE VL256 V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 MEM0:r:qq:f32
+# R/R 256
+PATTERN : VV1 0xBE VL256 V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=YMM_R():rw:qq:f32 REG1=YMM_N():r:qq:f32 REG2=YMM_B():r:qq:f32
+
+}
+{
+ICLASS    : VFNMSUB231SD
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0xBF V66 V0F38 W1 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 MEM0:r:q:f64
+# R/R 128
+PATTERN : VV1 0xBF V66 V0F38 W1 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f64 REG1=XMM_N():r:q:f64 REG2=XMM_B():r:q:f64
+
+}
+{
+ICLASS    : VFNMSUB231SS
+EXCEPTIONS: avx-type-3
+CPL       : 3
+CATEGORY  : VFMA
+EXTENSION : FMA
+ATTRIBUTES: MXCSR simd_scalar
+# R/M 128
+PATTERN : VV1 0xBF V66 V0F38 W0 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM()
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 MEM0:r:d:f32
+# R/R 128
+PATTERN : VV1 0xBF V66 V0F38 W0 MOD[0b11] MOD=3 REG[rrr] RM[nnn]
+OPERANDS  : REG0=XMM_R():rw:dq:f32 REG1=XMM_N():r:d:f32 REG2=XMM_B():r:d:f32
+
+}
+
+###################################################
+
+
+
+
+
+
+###FILE: ./datafiles/bdw/adox-adcx.xed.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+INSTRUCTIONS()::
+
+{
+ICLASS    : ADCX
+CPL       : 3
+CATEGORY  : ADOX_ADCX
+EXTENSION : ADOX_ADCX
+ISA_SET   : ADOX_ADCX
+
+FLAGS     : MUST [ cf-tst cf-mod ]
+
+# reg:rw rm:r
+# 32b
+PATTERN   : 0x0F 0x38 0xF6  MOD[0b11] MOD=3 REG[rrr] RM[nnn] osz_refining_prefix W0  IMMUNE66()
+OPERANDS  : REG0=GPR32_R():rw:d REG1=GPR32_B():r:d
+PATTERN   : 0x0F 0x38 0xF6   MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() osz_refining_prefix W0  IMMUNE66()
+OPERANDS  : REG0=GPR32_R():rw:d MEM0:r:d
+
+# 64b
+PATTERN   : 0x0F 0x38 0xF6  MOD[0b11] MOD=3 REG[rrr] RM[nnn] osz_refining_prefix  W1 IMMUNE66()
+OPERANDS  : REG0=GPR64_R():rw:q  REG1=GPR64_B():r:q
+PATTERN   : 0x0F 0x38 0xF6  MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM() osz_refining_prefix  W1  IMMUNE66()
+OPERANDS  : REG0=GPR64_R():rw:q  MEM0:r:q
+}
+
+
+
+{
+ICLASS    : ADOX
+CPL       : 3
+CATEGORY  : ADOX_ADCX
+EXTENSION : ADOX_ADCX
+ISA_SET   : ADOX_ADCX
+
+FLAGS     : MUST [ of-tst of-mod ]
+
+# reg:rw rm:r
+# 32b
+PATTERN   : 0x0F 0x38 0xF6  MOD[0b11] MOD=3 REG[rrr] RM[nnn] refining_f3  W0 IMMUNE66()
+OPERANDS  : REG0=GPR32_R():rw:d  REG1=GPR32_B():r:d
+PATTERN   : 0x0F 0x38 0xF6  MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() refining_f3 W0 IMMUNE66()
+OPERANDS  : REG0=GPR32_R():rw:d MEM0:r:d
+
+# 64b
+PATTERN   : 0x0F 0x38 0xF6 MOD[0b11] MOD=3 REG[rrr] RM[nnn] refining_f3 W1 IMMUNE66()
+OPERANDS  : REG0=GPR64_R():rw:q  REG1=GPR64_B():r:q
+PATTERN   : 0x0F 0x38 0xF6 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM() refining_f3 W1   IMMUNE66()
+OPERANDS  : REG0=GPR64_R():rw:q  MEM0:r:q
+}
+
+
+
+###FILE: ./datafiles/bdw/rdseed.xed.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+INSTRUCTIONS()::
+
+{
+ICLASS    : RDSEED
+CPL       : 3
+CATEGORY  : RDSEED
+EXTENSION : RDSEED
+ISA_SET   : RDSEED
+FLAGS     : MUST [ cf-mod zf-0 of-0 af-0 pf-0 sf-0 ]
+PATTERN   : 0x0F 0xC7  MOD[0b11] MOD=3 REG[0b111] RM[nnn] not_refining
+OPERANDS  : REG0=GPRv_B():w
+}
+
+
+
+###FILE: ./datafiles/bdw/smap.xed.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+
+INSTRUCTIONS()::
+
+{
+ICLASS    : CLAC
+CPL       : 0
+CATEGORY  : SMAP
+EXTENSION : SMAP
+FLAGS     : MUST [ ac-0 ]
+# 0F 01 CA = 1100_1010 = 11_001_010
+PATTERN   : 0x0F 0x01  MOD[0b11] MOD=3 REG[0b001] RM[0b010] no_refining_prefix
+OPERANDS  :
+}
+
+{
+ICLASS    : STAC
+CPL       : 0
+CATEGORY  : SMAP
+EXTENSION : SMAP
+FLAGS     : MUST [ ac-1 ]
+# 0F 01 CB = 1100_1011 = 11_001_011
+PATTERN   : 0x0F 0x01  MOD[0b11] MOD=3 REG[0b001] RM[0b011] no_refining_prefix
+OPERANDS  :
+}
+
+
+
+###FILE: ./datafiles/sgx/sgx-isa.xed.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+
+INSTRUCTIONS()::
+
+# Both read EAX
+# Both may read or write or r/w  RBX, RCX, RDX
+# ENCLU 0f 01 D7
+# D7 =  1101 0111
+
+# ENCLS 0f 01 CF
+# CF = 1100_1111
+
+
+
+{
+ICLASS: ENCLU
+CPL: 3
+CATEGORY:  SGX
+EXTENSION: SGX
+ISA_SET:   SGX
+COMMENT:   May set flags
+PATTERN: 0x0F 0x01 MOD[0b11] MOD=3 REG[0b010] RM[0b111] no_refining_prefix
+OPERANDS: REG0=XED_REG_EAX:r:SUPP    \
+          REG1=XED_REG_RBX:crw:SUPP  \
+          REG2=XED_REG_RCX:crw:SUPP  \
+          REG3=XED_REG_RDX:crw:SUPP
+}
+
+{
+
+ICLASS: ENCLS
+CPL: 0
+CATEGORY:  SGX
+EXTENSION: SGX
+ISA_SET:   SGX
+COMMENT:   May set flags
+PATTERN: 0x0F 0x01 MOD[0b11] MOD=3 REG[0b001] RM[0b111] no_refining_prefix
+OPERANDS: REG0=XED_REG_EAX:r:SUPP    \
+          REG1=XED_REG_RBX:crw:SUPP  \
+          REG2=XED_REG_RCX:crw:SUPP  \
+          REG3=XED_REG_RDX:crw:SUPP
+
+}
+
+
+###FILE: ./datafiles/clflushopt/clflushopt.xed.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+
+INSTRUCTIONS()::
+
+{
+ICLASS: CLFLUSHOPT
+CPL: 3
+CATEGORY:  CLFLUSHOPT
+EXTENSION: CLFLUSHOPT
+ISA_SET:   CLFLUSHOPT
+ATTRIBUTES: PREFETCH  # check TSX-friendlyness
+PATTERN   : 0x0F 0xAE  MOD[mm] MOD!=3 REG[0b111] RM[nnn]  osz_refining_prefix REFINING66() MODRM()
+OPERANDS  : MEM0:r:mprefetch
+}
+
+
+
+
+###FILE: ./datafiles/pku/pku-isa.xed.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+
+
+INSTRUCTIONS()::
+
+{
+ICLASS:      RDPKRU
+CPL:         3
+CATEGORY:    PKU
+EXTENSION:   PKU
+ISA_SET:     PKU
+ATTRIBUTES:
+PATTERN:    0x0F 0x01 MOD[0b11] MOD=3 REG[0b101] RM[0b110]  no_refining_prefix
+OPERANDS:    REG0=XED_REG_EDX:w:SUPP REG1=XED_REG_EAX:w:SUPP REG2=XED_REG_ECX:r:SUPP
+}
+
+
+{
+ICLASS:      WRPKRU
+CPL:         3
+CATEGORY:    PKU
+EXTENSION:   PKU
+ISA_SET:     PKU
+ATTRIBUTES:
+PATTERN:    0x0F 0x01 MOD[0b11] MOD=3 REG[0b101] RM[0b111]  no_refining_prefix
+OPERANDS:    REG0=XED_REG_EDX:r:SUPP REG1=XED_REG_EAX:r:SUPP REG2=XED_REG_ECX:r:SUPP
+}
+
+
+
+###FILE: ./datafiles/clwb/clwb.xed.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+
+INSTRUCTIONS()::
+
+{
+ICLASS: CLWB
+CPL: 3
+CATEGORY:  CLWB
+EXTENSION: CLWB
+ISA_SET:   CLWB
+ATTRIBUTES: PREFETCH  # check TSX-friendlyness
+PATTERN   : 0x0F 0xAE  MOD[mm] MOD!=3 REG[0b110] RM[nnn]  osz_refining_prefix REFINING66() MODRM()
+OPERANDS  : MEM0:r:mprefetch
+}
+
+
+
+
+###FILE: ./datafiles/knl/knl-fixup.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+
+INSTRUCTIONS()::
+UDELETE     : PREFETCH_RESERVED_0F0Dr2
+
+
+###FILE: ./datafiles/knl/knl-isa.xed.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+#
+#
+#
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#
+#
+#
+EVEX_INSTRUCTIONS()::
+# EMITTING VEXP2PD (VEXP2PD-512-1)
+{
+ICLASS:      VEXP2PD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MASKOP_EVEX
+PATTERN:    EVV 0xC8 V66 V0F38 MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  VL512  W1  NOEVSR
+OPERANDS:    REG0=ZMM_R3():w:zf64 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_B3():r:zf64
+IFORM:       VEXP2PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER
+}
+
+{
+ICLASS:      VEXP2PD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MASKOP_EVEX
+PATTERN:    EVV 0xC8 V66 V0F38 MOD[0b11] MOD=3 BCRC=1 REG[rrr] RM[nnn] FIX_ROUND_LEN512() SAE()  W1  NOEVSR
+OPERANDS:    REG0=ZMM_R3():w:zf64:TXT=SAESTR REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_B3():r:zf64
+IFORM:       VEXP2PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER
+}
+
+{
+ICLASS:      VEXP2PD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_FULL BROADCAST_ENABLED
+PATTERN:    EVV 0xC8 V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  VL512  W1  NOEVSR  ESIZE_64_BITS() NELEM_FULL()
+OPERANDS:    REG0=ZMM_R3():w:zf64 REG1=MASK1():r:mskw:TXT=ZEROSTR MEM0:r:vv:f64:TXT=BCASTSTR
+IFORM:       VEXP2PD_ZMMf64_MASKmskw_MEMf64_AVX512ER
+}
+
+
+# EMITTING VEXP2PS (VEXP2PS-512-1)
+{
+ICLASS:      VEXP2PS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MASKOP_EVEX
+PATTERN:    EVV 0xC8 V66 V0F38 MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  VL512  W0  NOEVSR
+OPERANDS:    REG0=ZMM_R3():w:zf32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_B3():r:zf32
+IFORM:       VEXP2PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER
+}
+
+{
+ICLASS:      VEXP2PS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MASKOP_EVEX
+PATTERN:    EVV 0xC8 V66 V0F38 MOD[0b11] MOD=3 BCRC=1 REG[rrr] RM[nnn] FIX_ROUND_LEN512() SAE()  W0  NOEVSR
+OPERANDS:    REG0=ZMM_R3():w:zf32:TXT=SAESTR REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_B3():r:zf32
+IFORM:       VEXP2PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER
+}
+
+{
+ICLASS:      VEXP2PS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_FULL BROADCAST_ENABLED
+PATTERN:    EVV 0xC8 V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  VL512  W0  NOEVSR  ESIZE_32_BITS() NELEM_FULL()
+OPERANDS:    REG0=ZMM_R3():w:zf32 REG1=MASK1():r:mskw:TXT=ZEROSTR MEM0:r:vv:f32:TXT=BCASTSTR
+IFORM:       VEXP2PS_ZMMf32_MASKmskw_MEMf32_AVX512ER
+}
+
+
+# EMITTING VGATHERPF0DPD (VGATHERPF0DPD-512-1)
+{
+ICLASS:      VGATHERPF0DPD
+CPL:         3
+CATEGORY:    GATHER
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512PF_512
+EXCEPTIONS:     AVX512-E12NP
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION SPECIAL_AGEN_REQUIRED DWORD_INDICES GATHER PREFETCH MASKOP_EVEX DISP8_GSCAT
+PATTERN:    EVV 0xC6 V66 V0F38 MOD[mm] MOD!=3 REG[0b001] RM[nnn] BCRC=0   VL512  W1 RM=4 UISA_VMODRM_YMM() eanot16  NOVSR  ZEROING=0  ESIZE_64_BITS() NELEM_GSCAT()
+OPERANDS:    MEM0:r:b:f64 REG0=MASKNOT0():rw:mskw
+IFORM:       VGATHERPF0DPD_MEMf64_MASKmskw_AVX512PF_VL512
+}
+
+
+# EMITTING VGATHERPF0DPS (VGATHERPF0DPS-512-1)
+{
+ICLASS:      VGATHERPF0DPS
+CPL:         3
+CATEGORY:    GATHER
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512PF_512
+EXCEPTIONS:     AVX512-E12NP
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION SPECIAL_AGEN_REQUIRED DWORD_INDICES GATHER PREFETCH MASKOP_EVEX DISP8_GSCAT
+PATTERN:    EVV 0xC6 V66 V0F38 MOD[mm] MOD!=3 REG[0b001] RM[nnn] BCRC=0   VL512  W0 RM=4 UISA_VMODRM_ZMM() eanot16  NOVSR  ZEROING=0  ESIZE_32_BITS() NELEM_GSCAT()
+OPERANDS:    MEM0:r:b:f32 REG0=MASKNOT0():rw:mskw
+IFORM:       VGATHERPF0DPS_MEMf32_MASKmskw_AVX512PF_VL512
+}
+
+
+# EMITTING VGATHERPF0QPD (VGATHERPF0QPD-512-1)
+{
+ICLASS:      VGATHERPF0QPD
+CPL:         3
+CATEGORY:    GATHER
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512PF_512
+EXCEPTIONS:     AVX512-E12NP
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION SPECIAL_AGEN_REQUIRED QWORD_INDICES GATHER PREFETCH MASKOP_EVEX DISP8_GSCAT
+PATTERN:    EVV 0xC7 V66 V0F38 MOD[mm] MOD!=3 REG[0b001] RM[nnn] BCRC=0   VL512  W1 RM=4 UISA_VMODRM_ZMM() eanot16  NOVSR  ZEROING=0  ESIZE_64_BITS() NELEM_GSCAT()
+OPERANDS:    MEM0:r:b:f64 REG0=MASKNOT0():rw:mskw
+IFORM:       VGATHERPF0QPD_MEMf64_MASKmskw_AVX512PF_VL512
+}
+
+
+# EMITTING VGATHERPF0QPS (VGATHERPF0QPS-512-1)
+{
+ICLASS:      VGATHERPF0QPS
+CPL:         3
+CATEGORY:    GATHER
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512PF_512
+EXCEPTIONS:     AVX512-E12NP
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION SPECIAL_AGEN_REQUIRED QWORD_INDICES GATHER PREFETCH MASKOP_EVEX DISP8_GSCAT
+PATTERN:    EVV 0xC7 V66 V0F38 MOD[mm] MOD!=3 REG[0b001] RM[nnn] BCRC=0   VL512  W0 RM=4 UISA_VMODRM_ZMM() eanot16  NOVSR  ZEROING=0  ESIZE_32_BITS() NELEM_GSCAT()
+OPERANDS:    MEM0:r:b:f32 REG0=MASKNOT0():rw:mskw
+IFORM:       VGATHERPF0QPS_MEMf32_MASKmskw_AVX512PF_VL512
+}
+
+
+# EMITTING VGATHERPF1DPD (VGATHERPF1DPD-512-1)
+{
+ICLASS:      VGATHERPF1DPD
+CPL:         3
+CATEGORY:    GATHER
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512PF_512
+EXCEPTIONS:     AVX512-E12NP
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION SPECIAL_AGEN_REQUIRED DWORD_INDICES GATHER PREFETCH MASKOP_EVEX DISP8_GSCAT
+PATTERN:    EVV 0xC6 V66 V0F38 MOD[mm] MOD!=3 REG[0b010] RM[nnn] BCRC=0   VL512  W1 RM=4 UISA_VMODRM_YMM() eanot16  NOVSR  ZEROING=0  ESIZE_64_BITS() NELEM_GSCAT()
+OPERANDS:    MEM0:r:b:f64 REG0=MASKNOT0():rw:mskw
+IFORM:       VGATHERPF1DPD_MEMf64_MASKmskw_AVX512PF_VL512
+}
+
+
+# EMITTING VGATHERPF1DPS (VGATHERPF1DPS-512-1)
+{
+ICLASS:      VGATHERPF1DPS
+CPL:         3
+CATEGORY:    GATHER
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512PF_512
+EXCEPTIONS:     AVX512-E12NP
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION SPECIAL_AGEN_REQUIRED DWORD_INDICES GATHER PREFETCH MASKOP_EVEX DISP8_GSCAT
+PATTERN:    EVV 0xC6 V66 V0F38 MOD[mm] MOD!=3 REG[0b010] RM[nnn] BCRC=0   VL512  W0 RM=4 UISA_VMODRM_ZMM() eanot16  NOVSR  ZEROING=0  ESIZE_32_BITS() NELEM_GSCAT()
+OPERANDS:    MEM0:r:b:f32 REG0=MASKNOT0():rw:mskw
+IFORM:       VGATHERPF1DPS_MEMf32_MASKmskw_AVX512PF_VL512
+}
+
+
+# EMITTING VGATHERPF1QPD (VGATHERPF1QPD-512-1)
+{
+ICLASS:      VGATHERPF1QPD
+CPL:         3
+CATEGORY:    GATHER
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512PF_512
+EXCEPTIONS:     AVX512-E12NP
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION SPECIAL_AGEN_REQUIRED QWORD_INDICES GATHER PREFETCH MASKOP_EVEX DISP8_GSCAT
+PATTERN:    EVV 0xC7 V66 V0F38 MOD[mm] MOD!=3 REG[0b010] RM[nnn] BCRC=0   VL512  W1 RM=4 UISA_VMODRM_ZMM() eanot16  NOVSR  ZEROING=0  ESIZE_64_BITS() NELEM_GSCAT()
+OPERANDS:    MEM0:r:b:f64 REG0=MASKNOT0():rw:mskw
+IFORM:       VGATHERPF1QPD_MEMf64_MASKmskw_AVX512PF_VL512
+}
+
+
+# EMITTING VGATHERPF1QPS (VGATHERPF1QPS-512-1)
+{
+ICLASS:      VGATHERPF1QPS
+CPL:         3
+CATEGORY:    GATHER
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512PF_512
+EXCEPTIONS:     AVX512-E12NP
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION SPECIAL_AGEN_REQUIRED QWORD_INDICES GATHER PREFETCH MASKOP_EVEX DISP8_GSCAT
+PATTERN:    EVV 0xC7 V66 V0F38 MOD[mm] MOD!=3 REG[0b010] RM[nnn] BCRC=0   VL512  W0 RM=4 UISA_VMODRM_ZMM() eanot16  NOVSR  ZEROING=0  ESIZE_32_BITS() NELEM_GSCAT()
+OPERANDS:    MEM0:r:b:f32 REG0=MASKNOT0():rw:mskw
+IFORM:       VGATHERPF1QPS_MEMf32_MASKmskw_AVX512PF_VL512
+}
+
+
+# EMITTING VRCP28PD (VRCP28PD-512-1)
+{
+ICLASS:      VRCP28PD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MASKOP_EVEX
+PATTERN:    EVV 0xCA V66 V0F38 MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  VL512  W1  NOEVSR
+OPERANDS:    REG0=ZMM_R3():w:zf64 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_B3():r:zf64
+IFORM:       VRCP28PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER
+}
+
+{
+ICLASS:      VRCP28PD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MASKOP_EVEX
+PATTERN:    EVV 0xCA V66 V0F38 MOD[0b11] MOD=3 BCRC=1 REG[rrr] RM[nnn] FIX_ROUND_LEN512() SAE()  W1  NOEVSR
+OPERANDS:    REG0=ZMM_R3():w:zf64:TXT=SAESTR REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_B3():r:zf64
+IFORM:       VRCP28PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER
+}
+
+{
+ICLASS:      VRCP28PD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_FULL BROADCAST_ENABLED
+PATTERN:    EVV 0xCA V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  VL512  W1  NOEVSR  ESIZE_64_BITS() NELEM_FULL()
+OPERANDS:    REG0=ZMM_R3():w:zf64 REG1=MASK1():r:mskw:TXT=ZEROSTR MEM0:r:vv:f64:TXT=BCASTSTR
+IFORM:       VRCP28PD_ZMMf64_MASKmskw_MEMf64_AVX512ER
+}
+
+
+# EMITTING VRCP28PS (VRCP28PS-512-1)
+{
+ICLASS:      VRCP28PS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MASKOP_EVEX
+PATTERN:    EVV 0xCA V66 V0F38 MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  VL512  W0  NOEVSR
+OPERANDS:    REG0=ZMM_R3():w:zf32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_B3():r:zf32
+IFORM:       VRCP28PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER
+}
+
+{
+ICLASS:      VRCP28PS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MASKOP_EVEX
+PATTERN:    EVV 0xCA V66 V0F38 MOD[0b11] MOD=3 BCRC=1 REG[rrr] RM[nnn] FIX_ROUND_LEN512() SAE()  W0  NOEVSR
+OPERANDS:    REG0=ZMM_R3():w:zf32:TXT=SAESTR REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_B3():r:zf32
+IFORM:       VRCP28PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER
+}
+
+{
+ICLASS:      VRCP28PS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_FULL BROADCAST_ENABLED
+PATTERN:    EVV 0xCA V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  VL512  W0  NOEVSR  ESIZE_32_BITS() NELEM_FULL()
+OPERANDS:    REG0=ZMM_R3():w:zf32 REG1=MASK1():r:mskw:TXT=ZEROSTR MEM0:r:vv:f32:TXT=BCASTSTR
+IFORM:       VRCP28PS_ZMMf32_MASKmskw_MEMf32_AVX512ER
+}
+
+
+# EMITTING VRCP28SD (VRCP28SD-128-1)
+{
+ICLASS:      VRCP28SD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR SIMD_SCALAR MASKOP_EVEX
+PATTERN:    EVV 0xCB V66 V0F38 MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  W1
+OPERANDS:    REG0=XMM_R3():w:dq:f64 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f64 REG3=XMM_B3():r:dq:f64
+IFORM:       VRCP28SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512ER
+}
+
+{
+ICLASS:      VRCP28SD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR SIMD_SCALAR MASKOP_EVEX
+PATTERN:    EVV 0xCB V66 V0F38 MOD[0b11] MOD=3 BCRC=1 REG[rrr] RM[nnn] FIX_ROUND_LEN128() SAE()  W1
+OPERANDS:    REG0=XMM_R3():w:dq:f64:TXT=SAESTR REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f64 REG3=XMM_B3():r:dq:f64
+IFORM:       VRCP28SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512ER
+}
+
+{
+ICLASS:      VRCP28SD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR SIMD_SCALAR MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_SCALAR
+PATTERN:    EVV 0xCB V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] BCRC=0 MODRM()  W1    ESIZE_64_BITS() NELEM_SCALAR()
+OPERANDS:    REG0=XMM_R3():w:dq:f64 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f64 MEM0:r:q:f64
+IFORM:       VRCP28SD_XMMf64_MASKmskw_XMMf64_MEMf64_AVX512ER
+}
+
+
+# EMITTING VRCP28SS (VRCP28SS-128-1)
+{
+ICLASS:      VRCP28SS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR SIMD_SCALAR MASKOP_EVEX
+PATTERN:    EVV 0xCB V66 V0F38 MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  W0
+OPERANDS:    REG0=XMM_R3():w:dq:f32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f32 REG3=XMM_B3():r:dq:f32
+IFORM:       VRCP28SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512ER
+}
+
+{
+ICLASS:      VRCP28SS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR SIMD_SCALAR MASKOP_EVEX
+PATTERN:    EVV 0xCB V66 V0F38 MOD[0b11] MOD=3 BCRC=1 REG[rrr] RM[nnn] FIX_ROUND_LEN128() SAE()  W0
+OPERANDS:    REG0=XMM_R3():w:dq:f32:TXT=SAESTR REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f32 REG3=XMM_B3():r:dq:f32
+IFORM:       VRCP28SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512ER
+}
+
+{
+ICLASS:      VRCP28SS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR SIMD_SCALAR MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_SCALAR
+PATTERN:    EVV 0xCB V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] BCRC=0 MODRM()  W0    ESIZE_32_BITS() NELEM_SCALAR()
+OPERANDS:    REG0=XMM_R3():w:dq:f32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f32 MEM0:r:d:f32
+IFORM:       VRCP28SS_XMMf32_MASKmskw_XMMf32_MEMf32_AVX512ER
+}
+
+
+# EMITTING VRSQRT28PD (VRSQRT28PD-512-1)
+{
+ICLASS:      VRSQRT28PD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MASKOP_EVEX
+PATTERN:    EVV 0xCC V66 V0F38 MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  VL512  W1  NOEVSR
+OPERANDS:    REG0=ZMM_R3():w:zf64 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_B3():r:zf64
+IFORM:       VRSQRT28PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER
+}
+
+{
+ICLASS:      VRSQRT28PD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MASKOP_EVEX
+PATTERN:    EVV 0xCC V66 V0F38 MOD[0b11] MOD=3 BCRC=1 REG[rrr] RM[nnn] FIX_ROUND_LEN512() SAE()  W1  NOEVSR
+OPERANDS:    REG0=ZMM_R3():w:zf64:TXT=SAESTR REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_B3():r:zf64
+IFORM:       VRSQRT28PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER
+}
+
+{
+ICLASS:      VRSQRT28PD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_FULL BROADCAST_ENABLED
+PATTERN:    EVV 0xCC V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  VL512  W1  NOEVSR  ESIZE_64_BITS() NELEM_FULL()
+OPERANDS:    REG0=ZMM_R3():w:zf64 REG1=MASK1():r:mskw:TXT=ZEROSTR MEM0:r:vv:f64:TXT=BCASTSTR
+IFORM:       VRSQRT28PD_ZMMf64_MASKmskw_MEMf64_AVX512ER
+}
+
+
+# EMITTING VRSQRT28PS (VRSQRT28PS-512-1)
+{
+ICLASS:      VRSQRT28PS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MASKOP_EVEX
+PATTERN:    EVV 0xCC V66 V0F38 MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  VL512  W0  NOEVSR
+OPERANDS:    REG0=ZMM_R3():w:zf32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_B3():r:zf32
+IFORM:       VRSQRT28PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER
+}
+
+{
+ICLASS:      VRSQRT28PS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MASKOP_EVEX
+PATTERN:    EVV 0xCC V66 V0F38 MOD[0b11] MOD=3 BCRC=1 REG[rrr] RM[nnn] FIX_ROUND_LEN512() SAE()  W0  NOEVSR
+OPERANDS:    REG0=ZMM_R3():w:zf32:TXT=SAESTR REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_B3():r:zf32
+IFORM:       VRSQRT28PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER
+}
+
+{
+ICLASS:      VRSQRT28PS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_FULL BROADCAST_ENABLED
+PATTERN:    EVV 0xCC V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  VL512  W0  NOEVSR  ESIZE_32_BITS() NELEM_FULL()
+OPERANDS:    REG0=ZMM_R3():w:zf32 REG1=MASK1():r:mskw:TXT=ZEROSTR MEM0:r:vv:f32:TXT=BCASTSTR
+IFORM:       VRSQRT28PS_ZMMf32_MASKmskw_MEMf32_AVX512ER
+}
+
+
+# EMITTING VRSQRT28SD (VRSQRT28SD-128-1)
+{
+ICLASS:      VRSQRT28SD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR SIMD_SCALAR MASKOP_EVEX
+PATTERN:    EVV 0xCD V66 V0F38 MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  W1
+OPERANDS:    REG0=XMM_R3():w:dq:f64 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f64 REG3=XMM_B3():r:dq:f64
+IFORM:       VRSQRT28SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512ER
+}
+
+{
+ICLASS:      VRSQRT28SD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR SIMD_SCALAR MASKOP_EVEX
+PATTERN:    EVV 0xCD V66 V0F38 MOD[0b11] MOD=3 BCRC=1 REG[rrr] RM[nnn] FIX_ROUND_LEN128() SAE()  W1
+OPERANDS:    REG0=XMM_R3():w:dq:f64:TXT=SAESTR REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f64 REG3=XMM_B3():r:dq:f64
+IFORM:       VRSQRT28SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512ER
+}
+
+{
+ICLASS:      VRSQRT28SD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR SIMD_SCALAR MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_SCALAR
+PATTERN:    EVV 0xCD V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] BCRC=0 MODRM()  W1    ESIZE_64_BITS() NELEM_SCALAR()
+OPERANDS:    REG0=XMM_R3():w:dq:f64 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f64 MEM0:r:q:f64
+IFORM:       VRSQRT28SD_XMMf64_MASKmskw_XMMf64_MEMf64_AVX512ER
+}
+
+
+# EMITTING VRSQRT28SS (VRSQRT28SS-128-1)
+{
+ICLASS:      VRSQRT28SS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR SIMD_SCALAR MASKOP_EVEX
+PATTERN:    EVV 0xCD V66 V0F38 MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  W0
+OPERANDS:    REG0=XMM_R3():w:dq:f32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f32 REG3=XMM_B3():r:dq:f32
+IFORM:       VRSQRT28SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512ER
+}
+
+{
+ICLASS:      VRSQRT28SS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR SIMD_SCALAR MASKOP_EVEX
+PATTERN:    EVV 0xCD V66 V0F38 MOD[0b11] MOD=3 BCRC=1 REG[rrr] RM[nnn] FIX_ROUND_LEN128() SAE()  W0
+OPERANDS:    REG0=XMM_R3():w:dq:f32:TXT=SAESTR REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f32 REG3=XMM_B3():r:dq:f32
+IFORM:       VRSQRT28SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512ER
+}
+
+{
+ICLASS:      VRSQRT28SS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512ER_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MXCSR SIMD_SCALAR MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_SCALAR
+PATTERN:    EVV 0xCD V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] BCRC=0 MODRM()  W0    ESIZE_32_BITS() NELEM_SCALAR()
+OPERANDS:    REG0=XMM_R3():w:dq:f32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f32 MEM0:r:d:f32
+IFORM:       VRSQRT28SS_XMMf32_MASKmskw_XMMf32_MEMf32_AVX512ER
+}
+
+
+# EMITTING VSCATTERPF0DPD (VSCATTERPF0DPD-512-1)
+{
+ICLASS:      VSCATTERPF0DPD
+CPL:         3
+CATEGORY:    SCATTER
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512PF_512
+EXCEPTIONS:     AVX512-E12NP
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION SPECIAL_AGEN_REQUIRED DWORD_INDICES SCATTER PREFETCH MASKOP_EVEX DISP8_GSCAT
+PATTERN:    EVV 0xC6 V66 V0F38 MOD[mm] MOD!=3 REG[0b101] RM[nnn] BCRC=0   VL512  W1 RM=4 UISA_VMODRM_YMM() eanot16  NOVSR  ZEROING=0  ESIZE_64_BITS() NELEM_GSCAT()
+OPERANDS:    MEM0:r:b:f64 REG0=MASKNOT0():rw:mskw
+IFORM:       VSCATTERPF0DPD_MEMf64_MASKmskw_AVX512PF_VL512
+}
+
+
+# EMITTING VSCATTERPF0DPS (VSCATTERPF0DPS-512-1)
+{
+ICLASS:      VSCATTERPF0DPS
+CPL:         3
+CATEGORY:    SCATTER
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512PF_512
+EXCEPTIONS:     AVX512-E12NP
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION SPECIAL_AGEN_REQUIRED DWORD_INDICES SCATTER PREFETCH MASKOP_EVEX DISP8_GSCAT
+PATTERN:    EVV 0xC6 V66 V0F38 MOD[mm] MOD!=3 REG[0b101] RM[nnn] BCRC=0   VL512  W0 RM=4 UISA_VMODRM_ZMM() eanot16  NOVSR  ZEROING=0  ESIZE_32_BITS() NELEM_GSCAT()
+OPERANDS:    MEM0:r:b:f32 REG0=MASKNOT0():rw:mskw
+IFORM:       VSCATTERPF0DPS_MEMf32_MASKmskw_AVX512PF_VL512
+}
+
+
+# EMITTING VSCATTERPF0QPD (VSCATTERPF0QPD-512-1)
+{
+ICLASS:      VSCATTERPF0QPD
+CPL:         3
+CATEGORY:    SCATTER
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512PF_512
+EXCEPTIONS:     AVX512-E12NP
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION SPECIAL_AGEN_REQUIRED QWORD_INDICES SCATTER PREFETCH MASKOP_EVEX DISP8_GSCAT
+PATTERN:    EVV 0xC7 V66 V0F38 MOD[mm] MOD!=3 REG[0b101] RM[nnn] BCRC=0   VL512  W1 RM=4 UISA_VMODRM_ZMM() eanot16  NOVSR  ZEROING=0  ESIZE_64_BITS() NELEM_GSCAT()
+OPERANDS:    MEM0:r:b:f64 REG0=MASKNOT0():rw:mskw
+IFORM:       VSCATTERPF0QPD_MEMf64_MASKmskw_AVX512PF_VL512
+}
+
+
+# EMITTING VSCATTERPF0QPS (VSCATTERPF0QPS-512-1)
+{
+ICLASS:      VSCATTERPF0QPS
+CPL:         3
+CATEGORY:    SCATTER
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512PF_512
+EXCEPTIONS:     AVX512-E12NP
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION SPECIAL_AGEN_REQUIRED QWORD_INDICES SCATTER PREFETCH MASKOP_EVEX DISP8_GSCAT
+PATTERN:    EVV 0xC7 V66 V0F38 MOD[mm] MOD!=3 REG[0b101] RM[nnn] BCRC=0   VL512  W0 RM=4 UISA_VMODRM_ZMM() eanot16  NOVSR  ZEROING=0  ESIZE_32_BITS() NELEM_GSCAT()
+OPERANDS:    MEM0:r:b:f32 REG0=MASKNOT0():rw:mskw
+IFORM:       VSCATTERPF0QPS_MEMf32_MASKmskw_AVX512PF_VL512
+}
+
+
+# EMITTING VSCATTERPF1DPD (VSCATTERPF1DPD-512-1)
+{
+ICLASS:      VSCATTERPF1DPD
+CPL:         3
+CATEGORY:    SCATTER
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512PF_512
+EXCEPTIONS:     AVX512-E12NP
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION SPECIAL_AGEN_REQUIRED DWORD_INDICES SCATTER PREFETCH MASKOP_EVEX DISP8_GSCAT
+PATTERN:    EVV 0xC6 V66 V0F38 MOD[mm] MOD!=3 REG[0b110] RM[nnn] BCRC=0   VL512  W1 RM=4 UISA_VMODRM_YMM() eanot16  NOVSR  ZEROING=0  ESIZE_64_BITS() NELEM_GSCAT()
+OPERANDS:    MEM0:r:b:f64 REG0=MASKNOT0():rw:mskw
+IFORM:       VSCATTERPF1DPD_MEMf64_MASKmskw_AVX512PF_VL512
+}
+
+
+# EMITTING VSCATTERPF1DPS (VSCATTERPF1DPS-512-1)
+{
+ICLASS:      VSCATTERPF1DPS
+CPL:         3
+CATEGORY:    SCATTER
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512PF_512
+EXCEPTIONS:     AVX512-E12NP
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION SPECIAL_AGEN_REQUIRED DWORD_INDICES SCATTER PREFETCH MASKOP_EVEX DISP8_GSCAT
+PATTERN:    EVV 0xC6 V66 V0F38 MOD[mm] MOD!=3 REG[0b110] RM[nnn] BCRC=0   VL512  W0 RM=4 UISA_VMODRM_ZMM() eanot16  NOVSR  ZEROING=0  ESIZE_32_BITS() NELEM_GSCAT()
+OPERANDS:    MEM0:r:b:f32 REG0=MASKNOT0():rw:mskw
+IFORM:       VSCATTERPF1DPS_MEMf32_MASKmskw_AVX512PF_VL512
+}
+
+
+# EMITTING VSCATTERPF1QPD (VSCATTERPF1QPD-512-1)
+{
+ICLASS:      VSCATTERPF1QPD
+CPL:         3
+CATEGORY:    SCATTER
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512PF_512
+EXCEPTIONS:     AVX512-E12NP
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION SPECIAL_AGEN_REQUIRED QWORD_INDICES SCATTER PREFETCH MASKOP_EVEX DISP8_GSCAT
+PATTERN:    EVV 0xC7 V66 V0F38 MOD[mm] MOD!=3 REG[0b110] RM[nnn] BCRC=0   VL512  W1 RM=4 UISA_VMODRM_ZMM() eanot16  NOVSR  ZEROING=0  ESIZE_64_BITS() NELEM_GSCAT()
+OPERANDS:    MEM0:r:b:f64 REG0=MASKNOT0():rw:mskw
+IFORM:       VSCATTERPF1QPD_MEMf64_MASKmskw_AVX512PF_VL512
+}
+
+
+# EMITTING VSCATTERPF1QPS (VSCATTERPF1QPS-512-1)
+{
+ICLASS:      VSCATTERPF1QPS
+CPL:         3
+CATEGORY:    SCATTER
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512PF_512
+EXCEPTIONS:     AVX512-E12NP
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION SPECIAL_AGEN_REQUIRED QWORD_INDICES SCATTER PREFETCH MASKOP_EVEX DISP8_GSCAT
+PATTERN:    EVV 0xC7 V66 V0F38 MOD[mm] MOD!=3 REG[0b110] RM[nnn] BCRC=0   VL512  W0 RM=4 UISA_VMODRM_ZMM() eanot16  NOVSR  ZEROING=0  ESIZE_32_BITS() NELEM_GSCAT()
+OPERANDS:    MEM0:r:b:f32 REG0=MASKNOT0():rw:mskw
+IFORM:       VSCATTERPF1QPS_MEMf32_MASKmskw_AVX512PF_VL512
+}
+
+
+INSTRUCTIONS()::
+# EMITTING PREFETCHWT1 (PREFETCHWT1-N/A-1)
+{
+ICLASS:      PREFETCHWT1
+CPL:         3
+CATEGORY:    PREFETCHWT1
+EXTENSION:   PREFETCHWT1
+ISA_SET:     PREFETCHWT1
+REAL_OPCODE: Y
+ATTRIBUTES:  PREFETCH
+PATTERN:     0x0F 0x0D MOD[mm] MOD!=3 REG[0b010] RM[nnn]  MODRM()
+OPERANDS:    MEM0:r:b:u8
+IFORM:       PREFETCHWT1_MEMu8
+}
+
+
+
+
+###FILE: ./datafiles/4fmaps-512/4fmaps-512-isa.xed.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+#
+#
+#
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#
+#
+#
+EVEX_INSTRUCTIONS()::
+# EMITTING V4FMADDPS (V4FMADDPS-512-1)
+{
+ICLASS:      V4FMADDPS
+CPL:         3
+CATEGORY:    AVX512_4FMAPS
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512_4FMAPS_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION MULTISOURCE4 DISP8_TUPLE1_4X MXCSR MASKOP_EVEX
+PATTERN:    EVV 0x9A VF2 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] BCRC=0 MODRM()  VL512  W0    ESIZE_32_BITS() NELEM_TUPLE1_4X()
+OPERANDS:    REG0=ZMM_R3():rw:zf32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zf32:MULTISOURCE4 MEM0:r:dq:f32
+IFORM:       V4FMADDPS_ZMMf32_MASKmskw_ZMMf32_MEMf32_AVX512
+}
+
+
+# EMITTING V4FMADDSS (V4FMADDSS-128-1)
+{
+ICLASS:      V4FMADDSS
+CPL:         3
+CATEGORY:    AVX512_4FMAPS
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512_4FMAPS_SCALAR
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  DISP8_TUPLE1_4X MXCSR MULTISOURCE4 MEMORY_FAULT_SUPPRESSION MASKOP_EVEX SIMD_SCALAR
+PATTERN:    EVV 0x9B VF2 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] BCRC=0 MODRM()  W0    ESIZE_32_BITS() NELEM_TUPLE1_4X()
+OPERANDS:    REG0=XMM_R3():rw:dq:f32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f32:MULTISOURCE4 MEM0:r:dq:f32
+IFORM:       V4FMADDSS_XMMf32_MASKmskw_XMMf32_MEMf32_AVX512
+}
+
+
+# EMITTING V4FNMADDPS (V4FNMADDPS-512-1)
+{
+ICLASS:      V4FNMADDPS
+CPL:         3
+CATEGORY:    AVX512_4FMAPS
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512_4FMAPS_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION MULTISOURCE4 DISP8_TUPLE1_4X MXCSR MASKOP_EVEX
+PATTERN:    EVV 0xAA VF2 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] BCRC=0 MODRM()  VL512  W0    ESIZE_32_BITS() NELEM_TUPLE1_4X()
+OPERANDS:    REG0=ZMM_R3():rw:zf32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zf32:MULTISOURCE4 MEM0:r:dq:f32
+IFORM:       V4FNMADDPS_ZMMf32_MASKmskw_ZMMf32_MEMf32_AVX512
+}
+
+
+# EMITTING V4FNMADDSS (V4FNMADDSS-128-1)
+{
+ICLASS:      V4FNMADDSS
+CPL:         3
+CATEGORY:    AVX512_4FMAPS
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512_4FMAPS_SCALAR
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  DISP8_TUPLE1_4X MXCSR MULTISOURCE4 MEMORY_FAULT_SUPPRESSION MASKOP_EVEX SIMD_SCALAR
+PATTERN:    EVV 0xAB VF2 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] BCRC=0 MODRM()  W0    ESIZE_32_BITS() NELEM_TUPLE1_4X()
+OPERANDS:    REG0=XMM_R3():rw:dq:f32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f32:MULTISOURCE4 MEM0:r:dq:f32
+IFORM:       V4FNMADDSS_XMMf32_MASKmskw_XMMf32_MEMf32_AVX512
+}
+
+
+
+
+###FILE: ./datafiles/4vnniw-512/4vnniw-512-isa.xed.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+#
+#
+#
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#
+#
+#
+EVEX_INSTRUCTIONS()::
+# EMITTING VP4DPWSSD (VP4DPWSSD-512-1)
+{
+ICLASS:      VP4DPWSSD
+CPL:         3
+CATEGORY:    AVX512_4VNNIW
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512_4VNNIW_512
+EXCEPTIONS:     AVX512-E4
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION MULTISOURCE4 DISP8_TUPLE1_4X MASKOP_EVEX
+PATTERN:    EVV 0x52 VF2 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() BCRC=0  VL512  W0    ESIZE_32_BITS() NELEM_TUPLE1_4X()
+OPERANDS:    REG0=ZMM_R3():rw:zi32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zi16:MULTISOURCE4 MEM0:r:dq:u32
+IFORM:       VP4DPWSSD_ZMMi32_MASKmskw_ZMMi16_MEMu32_AVX512
+}
+
+
+# EMITTING VP4DPWSSDS (VP4DPWSSDS-512-1)
+{
+ICLASS:      VP4DPWSSDS
+CPL:         3
+CATEGORY:    AVX512_4VNNIW
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512_4VNNIW_512
+EXCEPTIONS:     AVX512-E4
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION MULTISOURCE4 DISP8_TUPLE1_4X MASKOP_EVEX
+PATTERN:    EVV 0x53 VF2 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] MODRM() BCRC=0  VL512  W0    ESIZE_32_BITS() NELEM_TUPLE1_4X()
+OPERANDS:    REG0=ZMM_R3():rw:zi32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zi16:MULTISOURCE4 MEM0:r:dq:u32
+IFORM:       VP4DPWSSDS_ZMMi32_MASKmskw_ZMMi16_MEMu32_AVX512
+}
+
+
+
+
+###FILE: ./datafiles/vpopcntdq-512/vpopcntdq-512-isa.xed.txt
+
+#BEGIN_LEGAL
+#
+#Copyright (c) 2016 Intel Corporation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#END_LEGAL
+#
+#
+#
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#
+#
+#
+EVEX_INSTRUCTIONS()::
+# EMITTING VPOPCNTD (VPOPCNTD-512-1)
+{
+ICLASS:      VPOPCNTD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512_VPOPCNTDQ_512
+EXCEPTIONS:     AVX512-E4
+REAL_OPCODE: Y
+ATTRIBUTES:  MASKOP_EVEX
+PATTERN:    EVV 0x55 V66 V0F38 MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  VL512  W0  NOEVSR
+OPERANDS:    REG0=ZMM_R3():w:zu32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_B3():r:zu32
+IFORM:       VPOPCNTD_ZMMu32_MASKmskw_ZMMu32_AVX512
+}
+
+{
+ICLASS:      VPOPCNTD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512_VPOPCNTDQ_512
+EXCEPTIONS:     AVX512-E4
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_FULL BROADCAST_ENABLED
+PATTERN:    EVV 0x55 V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  VL512  W0  NOEVSR  ESIZE_32_BITS() NELEM_FULL()
+OPERANDS:    REG0=ZMM_R3():w:zu32 REG1=MASK1():r:mskw:TXT=ZEROSTR MEM0:r:vv:u32:TXT=BCASTSTR
+IFORM:       VPOPCNTD_ZMMu32_MASKmskw_MEMu32_AVX512
+}
+
+
+# EMITTING VPOPCNTQ (VPOPCNTQ-512-1)
+{
+ICLASS:      VPOPCNTQ
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512_VPOPCNTDQ_512
+EXCEPTIONS:     AVX512-E4
+REAL_OPCODE: Y
+ATTRIBUTES:  MASKOP_EVEX
+PATTERN:    EVV 0x55 V66 V0F38 MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  VL512  W1  NOEVSR
+OPERANDS:    REG0=ZMM_R3():w:zu64 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_B3():r:zu64
+IFORM:       VPOPCNTQ_ZMMu64_MASKmskw_ZMMu64_AVX512
+}
+
+{
+ICLASS:      VPOPCNTQ
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512_VPOPCNTDQ_512
+EXCEPTIONS:     AVX512-E4
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_FULL BROADCAST_ENABLED
+PATTERN:    EVV 0x55 V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  VL512  W1  NOEVSR  ESIZE_64_BITS() NELEM_FULL()
+OPERANDS:    REG0=ZMM_R3():w:zu64 REG1=MASK1():r:mskw:TXT=ZEROSTR MEM0:r:vv:u64:TXT=BCASTSTR
+IFORM:       VPOPCNTQ_ZMMu64_MASKmskw_MEMu64_AVX512
+}
+
+
+
+
+###FILE: ./datafiles/avx512f/avx512-foundation-isa.xed.txt
+
+#BEGIN_LEGAL
+#INTEL CONFIDENTIAL
+#
+#Copyright (c) 2017, Intel Corporation. All rights reserved.
+#
+#The source code contained or described herein and all documents
+#related to the source code ("Material") are owned by Intel Corporation
+#or its suppliers or licensors. Title to the Material remains with
+#Intel Corporation or its suppliers and licensors. The Material
+#contains trade secrets and proprietary and confidential information of
+#Intel or its suppliers and licensors. The Material is protected by
+#worldwide copyright and trade secret laws and treaty provisions. No
+#part of the Material may be used, copied, reproduced, modified,
+#published, uploaded, posted, transmitted, distributed, or disclosed in
+#any way without Intel's prior express written permission.
+#
+#No license under any patent, copyright, trade secret or other
+#intellectual property right is granted to or conferred upon you by
+#disclosure or delivery of the Materials, either expressly, by
+#implication, inducement, estoppel or otherwise. Any license under such
+#intellectual property rights must be express and approved by Intel in
+#writing.
+#END_LEGAL
+#
+#
+#
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#    ***** GENERATED FILE -- DO NOT EDIT! *****
+#
+#
+#
+EVEX_INSTRUCTIONS()::
+# EMITTING VADDPD (VADDPD-512-1)
+{
+ICLASS:      VADDPD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MASKOP_EVEX MXCSR
+PATTERN:    EVV 0x58 V66 V0F MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  VL512  W1
+OPERANDS:    REG0=ZMM_R3():w:zf64 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zf64 REG3=ZMM_B3():r:zf64
+IFORM:       VADDPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512
+}
+
+{
+ICLASS:      VADDPD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MASKOP_EVEX MXCSR
+PATTERN:    EVV 0x58 V66 V0F MOD[0b11] MOD=3 BCRC=1 REG[rrr] RM[nnn] FIX_ROUND_LEN512() AVX512_ROUND()  W1
+OPERANDS:    REG0=ZMM_R3():w:zf64:TXT=ROUNDC REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zf64 REG3=ZMM_B3():r:zf64
+IFORM:       VADDPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512
+}
+
+{
+ICLASS:      VADDPD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_FULL MXCSR BROADCAST_ENABLED
+PATTERN:    EVV 0x58 V66 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  VL512  W1    ESIZE_64_BITS() NELEM_FULL()
+OPERANDS:    REG0=ZMM_R3():w:zf64 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zf64 MEM0:r:vv:f64:TXT=BCASTSTR
+IFORM:       VADDPD_ZMMf64_MASKmskw_ZMMf64_MEMf64_AVX512
+}
+
+
+# EMITTING VADDPS (VADDPS-512-1)
+{
+ICLASS:      VADDPS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MASKOP_EVEX MXCSR
+PATTERN:    EVV 0x58 VNP V0F MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  VL512  W0
+OPERANDS:    REG0=ZMM_R3():w:zf32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zf32 REG3=ZMM_B3():r:zf32
+IFORM:       VADDPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512
+}
+
+{
+ICLASS:      VADDPS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MASKOP_EVEX MXCSR
+PATTERN:    EVV 0x58 VNP V0F MOD[0b11] MOD=3 BCRC=1 REG[rrr] RM[nnn] FIX_ROUND_LEN512() AVX512_ROUND()  W0
+OPERANDS:    REG0=ZMM_R3():w:zf32:TXT=ROUNDC REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zf32 REG3=ZMM_B3():r:zf32
+IFORM:       VADDPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512
+}
+
+{
+ICLASS:      VADDPS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E2
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_FULL MXCSR BROADCAST_ENABLED
+PATTERN:    EVV 0x58 VNP V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  VL512  W0    ESIZE_32_BITS() NELEM_FULL()
+OPERANDS:    REG0=ZMM_R3():w:zf32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zf32 MEM0:r:vv:f32:TXT=BCASTSTR
+IFORM:       VADDPS_ZMMf32_MASKmskw_ZMMf32_MEMf32_AVX512
+}
+
+
+# EMITTING VADDSD (VADDSD-128-1)
+{
+ICLASS:      VADDSD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MASKOP_EVEX MXCSR SIMD_SCALAR
+PATTERN:    EVV 0x58 VF2 V0F MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  W1
+OPERANDS:    REG0=XMM_R3():w:dq:f64 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f64 REG3=XMM_B3():r:dq:f64
+IFORM:       VADDSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512
+}
+
+{
+ICLASS:      VADDSD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MASKOP_EVEX MXCSR SIMD_SCALAR
+PATTERN:    EVV 0x58 VF2 V0F MOD[0b11] MOD=3 BCRC=1 REG[rrr] RM[nnn] FIX_ROUND_LEN128() AVX512_ROUND()  W1
+OPERANDS:    REG0=XMM_R3():w:dq:f64:TXT=ROUNDC REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f64 REG3=XMM_B3():r:dq:f64
+IFORM:       VADDSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512
+}
+
+{
+ICLASS:      VADDSD
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION MASKOP_EVEX MXCSR SIMD_SCALAR DISP8_SCALAR
+PATTERN:    EVV 0x58 VF2 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] BCRC=0 MODRM()  W1    ESIZE_64_BITS() NELEM_SCALAR()
+OPERANDS:    REG0=XMM_R3():w:dq:f64 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f64 MEM0:r:q:f64
+IFORM:       VADDSD_XMMf64_MASKmskw_XMMf64_MEMf64_AVX512
+}
+
+
+# EMITTING VADDSS (VADDSS-128-1)
+{
+ICLASS:      VADDSS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MASKOP_EVEX MXCSR SIMD_SCALAR
+PATTERN:    EVV 0x58 VF3 V0F MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  W0
+OPERANDS:    REG0=XMM_R3():w:dq:f32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f32 REG3=XMM_B3():r:dq:f32
+IFORM:       VADDSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512
+}
+
+{
+ICLASS:      VADDSS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MASKOP_EVEX MXCSR SIMD_SCALAR
+PATTERN:    EVV 0x58 VF3 V0F MOD[0b11] MOD=3 BCRC=1 REG[rrr] RM[nnn] FIX_ROUND_LEN128() AVX512_ROUND()  W0
+OPERANDS:    REG0=XMM_R3():w:dq:f32:TXT=ROUNDC REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f32 REG3=XMM_B3():r:dq:f32
+IFORM:       VADDSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512
+}
+
+{
+ICLASS:      VADDSS
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_SCALAR
+EXCEPTIONS:     AVX512-E3
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION MASKOP_EVEX MXCSR SIMD_SCALAR DISP8_SCALAR
+PATTERN:    EVV 0x58 VF3 V0F MOD[mm] MOD!=3 REG[rrr] RM[nnn] BCRC=0 MODRM()  W0    ESIZE_32_BITS() NELEM_SCALAR()
+OPERANDS:    REG0=XMM_R3():w:dq:f32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=XMM_N3():r:dq:f32 MEM0:r:d:f32
+IFORM:       VADDSS_XMMf32_MASKmskw_XMMf32_MEMf32_AVX512
+}
+
+
+# EMITTING VALIGND (VALIGND-512-1)
+{
+ICLASS:      VALIGND
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E4NF
+REAL_OPCODE: Y
+ATTRIBUTES:  MASKOP_EVEX
+PATTERN:    EVV 0x03 V66 V0F3A MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  VL512  W0   UIMM8()
+OPERANDS:    REG0=ZMM_R3():w:zu32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zu32 REG3=ZMM_B3():r:zu32 IMM0:r:b
+IFORM:       VALIGND_ZMMu32_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512
+}
+
+{
+ICLASS:      VALIGND
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E4NF
+REAL_OPCODE: Y
+ATTRIBUTES:  MASKOP_EVEX DISP8_FULL BROADCAST_ENABLED
+PATTERN:    EVV 0x03 V66 V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  VL512  W0   UIMM8()  ESIZE_32_BITS() NELEM_FULL()
+OPERANDS:    REG0=ZMM_R3():w:zu32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zu32 MEM0:r:vv:u32:TXT=BCASTSTR IMM0:r:b
+IFORM:       VALIGND_ZMMu32_MASKmskw_ZMMu32_MEMu32_IMM8_AVX512
+}
+
+
+# EMITTING VALIGNQ (VALIGNQ-512-1)
+{
+ICLASS:      VALIGNQ
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E4NF
+REAL_OPCODE: Y
+ATTRIBUTES:  MASKOP_EVEX
+PATTERN:    EVV 0x03 V66 V0F3A MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  VL512  W1   UIMM8()
+OPERANDS:    REG0=ZMM_R3():w:zu64 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zu64 REG3=ZMM_B3():r:zu64 IMM0:r:b
+IFORM:       VALIGNQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512
+}
+
+{
+ICLASS:      VALIGNQ
+CPL:         3
+CATEGORY:    AVX512
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E4NF
+REAL_OPCODE: Y
+ATTRIBUTES:  MASKOP_EVEX DISP8_FULL BROADCAST_ENABLED
+PATTERN:    EVV 0x03 V66 V0F3A MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  VL512  W1   UIMM8()  ESIZE_64_BITS() NELEM_FULL()
+OPERANDS:    REG0=ZMM_R3():w:zu64 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zu64 MEM0:r:vv:u64:TXT=BCASTSTR IMM0:r:b
+IFORM:       VALIGNQ_ZMMu64_MASKmskw_ZMMu64_MEMu64_IMM8_AVX512
+}
+
+
+# EMITTING VBLENDMPD (VBLENDMPD-512-1)
+{
+ICLASS:      VBLENDMPD
+CPL:         3
+CATEGORY:    BLEND
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E4
+REAL_OPCODE: Y
+ATTRIBUTES:  MASKOP_EVEX MASK_AS_CONTROL
+PATTERN:    EVV 0x65 V66 V0F38 MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  VL512  W1
+OPERANDS:    REG0=ZMM_R3():w:zf64 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zf64 REG3=ZMM_B3():r:zf64
+IFORM:       VBLENDMPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512
+}
+
+{
+ICLASS:      VBLENDMPD
+CPL:         3
+CATEGORY:    BLEND
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E4
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_FULL BROADCAST_ENABLED MASK_AS_CONTROL
+PATTERN:    EVV 0x65 V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  VL512  W1    ESIZE_64_BITS() NELEM_FULL()
+OPERANDS:    REG0=ZMM_R3():w:zf64 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zf64 MEM0:r:vv:f64:TXT=BCASTSTR
+IFORM:       VBLENDMPD_ZMMf64_MASKmskw_ZMMf64_MEMf64_AVX512
+}
+
+
+# EMITTING VBLENDMPS (VBLENDMPS-512-1)
+{
+ICLASS:      VBLENDMPS
+CPL:         3
+CATEGORY:    BLEND
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E4
+REAL_OPCODE: Y
+ATTRIBUTES:  MASKOP_EVEX MASK_AS_CONTROL
+PATTERN:    EVV 0x65 V66 V0F38 MOD[0b11] MOD=3 BCRC=0 REG[rrr] RM[nnn]  VL512  W0
+OPERANDS:    REG0=ZMM_R3():w:zf32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zf32 REG3=ZMM_B3():r:zf32
+IFORM:       VBLENDMPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512
+}
+
+{
+ICLASS:      VBLENDMPS
+CPL:         3
+CATEGORY:    BLEND
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E4
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_FULL BROADCAST_ENABLED MASK_AS_CONTROL
+PATTERN:    EVV 0x65 V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn]  MODRM()  VL512  W0    ESIZE_32_BITS() NELEM_FULL()
+OPERANDS:    REG0=ZMM_R3():w:zf32 REG1=MASK1():r:mskw:TXT=ZEROSTR REG2=ZMM_N3():r:zf32 MEM0:r:vv:f32:TXT=BCASTSTR
+IFORM:       VBLENDMPS_ZMMf32_MASKmskw_ZMMf32_MEMf32_AVX512
+}
+
+
+# EMITTING VBROADCASTF32X4 (VBROADCASTF32X4-512-1)
+{
+ICLASS:      VBROADCASTF32X4
+CPL:         3
+CATEGORY:    BROADCAST
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E6
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_TUPLE4
+PATTERN:    EVV 0x1A V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] BCRC=0 MODRM()  VL512  W0  NOEVSR  ESIZE_32_BITS() NELEM_TUPLE4()
+OPERANDS:    REG0=ZMM_R3():w:zf32 REG1=MASK1():r:mskw:TXT=ZEROSTR MEM0:r:dq:f32 EMX_BROADCAST_4TO16_32
+IFORM:       VBROADCASTF32X4_ZMMf32_MASKmskw_MEMf32_AVX512
+}
+
+
+# EMITTING VBROADCASTF64X4 (VBROADCASTF64X4-512-1)
+{
+ICLASS:      VBROADCASTF64X4
+CPL:         3
+CATEGORY:    BROADCAST
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E6
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_TUPLE4
+PATTERN:    EVV 0x1B V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] BCRC=0 MODRM()  VL512  W1  NOEVSR  ESIZE_64_BITS() NELEM_TUPLE4()
+OPERANDS:    REG0=ZMM_R3():w:zf64 REG1=MASK1():r:mskw:TXT=ZEROSTR MEM0:r:qq:f64 EMX_BROADCAST_4TO8_64
+IFORM:       VBROADCASTF64X4_ZMMf64_MASKmskw_MEMf64_AVX512
+}
+
+
+# EMITTING VBROADCASTI32X4 (VBROADCASTI32X4-512-1)
+{
+ICLASS:      VBROADCASTI32X4
+CPL:         3
+CATEGORY:    BROADCAST
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E6
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_TUPLE4
+PATTERN:    EVV 0x5A V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] BCRC=0 MODRM()  VL512  W0  NOEVSR  ESIZE_32_BITS() NELEM_TUPLE4()
+OPERANDS:    REG0=ZMM_R3():w:zu32 REG1=MASK1():r:mskw:TXT=ZEROSTR MEM0:r:dq:u32 EMX_BROADCAST_4TO16_32
+IFORM:       VBROADCASTI32X4_ZMMu32_MASKmskw_MEMu32_AVX512
+}
+
+
+# EMITTING VBROADCASTI64X4 (VBROADCASTI64X4-512-1)
+{
+ICLASS:      VBROADCASTI64X4
+CPL:         3
+CATEGORY:    BROADCAST
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E6
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_TUPLE4
+PATTERN:    EVV 0x5B V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] BCRC=0 MODRM()  VL512  W1  NOEVSR  ESIZE_64_BITS() NELEM_TUPLE4()
+OPERANDS:    REG0=ZMM_R3():w:zu64 REG1=MASK1():r:mskw:TXT=ZEROSTR MEM0:r:qq:u64 EMX_BROADCAST_4TO8_64
+IFORM:       VBROADCASTI64X4_ZMMu64_MASKmskw_MEMu64_AVX512
+}
+
+
+# EMITTING VBROADCASTSD (VBROADCASTSD-512-1)
+{
+ICLASS:      VBROADCASTSD
+CPL:         3
+CATEGORY:    BROADCAST
+EXTENSION:   AVX512EVEX
+ISA_SET:     AVX512F_512
+EXCEPTIONS:     AVX512-E6
+REAL_OPCODE: Y
+ATTRIBUTES:  MEMORY_FAULT_SUPPRESSION MASKOP_EVEX DISP8_TUPLE1
+PATTERN:    EVV 0x19 V66 V0F38 MOD[mm] MOD!=3 REG[rrr] RM[nnn] BCRC=0 MODRM()  VL512  W1  NOEVSR  ESIZE_64_BITS() NELEM_TUPLE1()
+OPERANDS:    REG0=ZMM_R3():w:zf64 REG1=MASK1():r:mskw:TXT=ZEROSTR MEM0:r:q:f6