diff -rup binutils.orig/gas/config/tc-s390.c binutils-2.27/gas/config/tc-s390.c
--- binutils.orig/gas/config/tc-s390.c 2017-03-24 14:50:52.004815587 +0000
+++ binutils-2.27/gas/config/tc-s390.c 2017-03-24 14:55:19.129377655 +0000
@@ -282,7 +282,8 @@ s390_parse_cpu (const char * arg
{ STRING_COMMA_LEN ("z10"), 0 },
{ STRING_COMMA_LEN ("z196"), 0 },
{ STRING_COMMA_LEN ("zEC12"), S390_INSTR_FLAG_HTM },
- { STRING_COMMA_LEN ("z13"), S390_INSTR_FLAG_HTM | S390_INSTR_FLAG_VX }
+ { STRING_COMMA_LEN ("z13"), S390_INSTR_FLAG_HTM | S390_INSTR_FLAG_VX },
+ { STRING_COMMA_LEN ("arch12"), S390_INSTR_FLAG_HTM | S390_INSTR_FLAG_VX | S390_INSTR_FLAG_VX2 }
};
static struct
{
@@ -294,7 +295,9 @@ s390_parse_cpu (const char * arg
{ "htm", S390_INSTR_FLAG_HTM, TRUE },
{ "nohtm", S390_INSTR_FLAG_HTM, FALSE },
{ "vx", S390_INSTR_FLAG_VX, TRUE },
- { "novx", S390_INSTR_FLAG_VX, FALSE }
+ { "novx", S390_INSTR_FLAG_VX, FALSE },
+ { "vx2", S390_INSTR_FLAG_VX2, TRUE },
+ { "novx2", S390_INSTR_FLAG_VX2, FALSE }
};
unsigned int icpu;
char *ilp_bak;
diff -rup binutils.orig/gas/doc/as.texinfo binutils-2.27/gas/doc/as.texinfo
--- binutils.orig/gas/doc/as.texinfo 2017-03-24 14:50:52.037815161 +0000
+++ binutils-2.27/gas/doc/as.texinfo 2017-03-24 14:56:29.325474523 +0000
@@ -1656,7 +1656,7 @@ Architecture (esa) or the z/Architecture
@item -march=@var{processor}
Specify which s390 processor variant is the target, @samp{g6}, @samp{g6},
@samp{z900}, @samp{z990}, @samp{z9-109}, @samp{z9-ec}, @samp{z10},
-@samp{z196}, @samp{zEC12}, or @samp{z13}.
+@samp{z196}, @samp{zEC12}, or @samp{z13} (or @samp{arch11}), or @samp{arch12}.
@item -mregnames
@itemx -mno-regnames
Allow or disallow symbolic names for registers.
diff -rup binutils.orig/gas/doc/c-s390.texi binutils-2.27/gas/doc/c-s390.texi
--- binutils.orig/gas/doc/c-s390.texi 2017-03-24 14:50:52.053814956 +0000
+++ binutils-2.27/gas/doc/c-s390.texi 2017-03-24 14:57:53.010397844 +0000
@@ -14,9 +14,11 @@
@cindex s390 support
The s390 version of @code{@value{AS}} supports two architectures modes
-and seven chip levels. The architecture modes are the Enterprise System
+and eleven chip levels. The architecture modes are the Enterprise System
Architecture (ESA) and the newer z/Architecture mode. The chip levels
-are g5, g6, z900, z990, z9-109, z9-ec, z10, z196, zEC12, and z13.
+are g5 (or arch3), g6, z900 (or arch5), z990 (or arch6), z9-109, z9-ec
+(or arch7), z10 (or arch8), z196 (or arch9), zEC12 (or arch10), z13
+(or arch11), and arch12.
@menu
* s390 Options:: Command-line Options.
diff -rup binutils.orig/gas/testsuite/gas/s390/s390.exp binutils-2.27/gas/testsuite/gas/s390/s390.exp
--- binutils.orig/gas/testsuite/gas/s390/s390.exp 2017-03-24 14:50:54.051789225 +0000
+++ binutils-2.27/gas/testsuite/gas/s390/s390.exp 2017-03-24 14:54:05.065330554 +0000
@@ -28,6 +28,7 @@ if [expr [istarget "s390-*-*"] || [ista
run_dump_test "zarch-z196" "{as -m64} {as -march=z196}"
run_dump_test "zarch-zEC12" "{as -m64} {as -march=zEC12}"
run_dump_test "zarch-z13" "{as -m64} {as -march=z13}"
+ run_dump_test "zarch-arch12" "{as -m64} {as -march=arch12}"
run_dump_test "zarch-reloc" "{as -m64}"
run_dump_test "zarch-operands" "{as -m64} {as -march=z9-109}"
run_dump_test "zarch-machine" "{as -m64} {as -march=z900}"
diff --git a/gas/testsuite/gas/s390/zarch-arch12.d b/gas/testsuite/gas/s390/zarch-arch12.d
new file mode 100644
index 0000000..bc2ce18
--- /dev/null
+++ b/gas/testsuite/gas/s390/zarch-arch12.d
@@ -0,0 +1,200 @@
+#name: s390x opcode
+#objdump: -dr
+
+.*: +file format .*
+
+Disassembly of section .text:
+
+.* <foo>:
+.*: e7 f1 40 00 06 85 [ ]*vbperm %v15,%v17,%v20
+.*: e7 f6 9f a0 60 04 [ ]*vllezlf %v15,4000\(%r6,%r9\)
+.*: e7 f1 4d c0 87 b8 [ ]*vmsl %v15,%v17,%v20,%v24,13,12
+.*: e7 f1 43 d0 87 b8 [ ]*vmslg %v15,%v17,%v20,%v24,13
+.*: e7 f1 40 00 06 6c [ ]*vnx %v15,%v17,%v20
+.*: e7 f1 40 00 06 6e [ ]*vnn %v15,%v17,%v20
+.*: e7 f1 40 00 06 6f [ ]*voc %v15,%v17,%v20
+.*: e7 f1 00 00 04 50 [ ]*vpopctb %v15,%v17
+.*: e7 f1 00 00 14 50 [ ]*vpopcth %v15,%v17
+.*: e7 f1 00 00 24 50 [ ]*vpopctf %v15,%v17
+.*: e7 f1 00 00 34 50 [ ]*vpopctg %v15,%v17
+.*: e7 f1 40 00 26 e3 [ ]*vfasb %v15,%v17,%v20
+.*: e7 f1 40 08 26 e3 [ ]*wfasb %v15,%v17,%v20
+.*: e7 f1 40 08 46 e3 [ ]*wfaxb %v15,%v17,%v20
+.*: e7 f1 00 00 24 cb [ ]*wfcsb %v15,%v17
+.*: e7 f1 00 00 44 cb [ ]*wfcxb %v15,%v17
+.*: e7 f1 00 00 24 ca [ ]*wfksb %v15,%v17
+.*: e7 f1 00 00 44 ca [ ]*wfkxb %v15,%v17
+.*: e7 f1 40 00 26 e8 [ ]*vfcesb %v15,%v17,%v20
+.*: e7 f1 40 10 26 e8 [ ]*vfcesbs %v15,%v17,%v20
+.*: e7 f1 40 08 26 e8 [ ]*wfcesb %v15,%v17,%v20
+.*: e7 f1 40 18 26 e8 [ ]*wfcesbs %v15,%v17,%v20
+.*: e7 f1 40 08 46 e8 [ ]*wfcexb %v15,%v17,%v20
+.*: e7 f1 40 18 46 e8 [ ]*wfcexbs %v15,%v17,%v20
+.*: e7 f1 40 04 26 e8 [ ]*vfkesb %v15,%v17,%v20
+.*: e7 f1 40 14 26 e8 [ ]*vfkesbs %v15,%v17,%v20
+.*: e7 f1 40 0c 26 e8 [ ]*wfkesb %v15,%v17,%v20
+.*: e7 f1 40 1c 26 e8 [ ]*wfkesbs %v15,%v17,%v20
+.*: e7 f1 40 04 36 e8 [ ]*vfkedb %v15,%v17,%v20
+.*: e7 f1 40 14 36 e8 [ ]*vfkedbs %v15,%v17,%v20
+.*: e7 f1 40 0c 36 e8 [ ]*wfkedb %v15,%v17,%v20
+.*: e7 f1 40 1c 36 e8 [ ]*wfkedbs %v15,%v17,%v20
+.*: e7 f1 40 0c 46 e8 [ ]*wfkexb %v15,%v17,%v20
+.*: e7 f1 40 1c 46 e8 [ ]*wfkexbs %v15,%v17,%v20
+.*: e7 f1 40 00 26 eb [ ]*vfchsb %v15,%v17,%v20
+.*: e7 f1 40 10 26 eb [ ]*vfchsbs %v15,%v17,%v20
+.*: e7 f1 40 08 26 eb [ ]*wfchsb %v15,%v17,%v20
+.*: e7 f1 40 18 26 eb [ ]*wfchsbs %v15,%v17,%v20
+.*: e7 f1 40 08 46 eb [ ]*wfchxb %v15,%v17,%v20
+.*: e7 f1 40 18 46 eb [ ]*wfchxbs %v15,%v17,%v20
+.*: e7 f1 40 04 26 eb [ ]*vfkhsb %v15,%v17,%v20
+.*: e7 f1 40 14 26 eb [ ]*vfkhsbs %v15,%v17,%v20
+.*: e7 f1 40 0c 26 eb [ ]*wfkhsb %v15,%v17,%v20
+.*: e7 f1 40 1c 26 eb [ ]*wfkhsbs %v15,%v17,%v20
+.*: e7 f1 40 04 36 eb [ ]*vfkhdb %v15,%v17,%v20
+.*: e7 f1 40 14 36 eb [ ]*vfkhdbs %v15,%v17,%v20
+.*: e7 f1 40 0c 36 eb [ ]*wfkhdb %v15,%v17,%v20
+.*: e7 f1 40 1c 36 eb [ ]*wfkhdbs %v15,%v17,%v20
+.*: e7 f1 40 0c 46 eb [ ]*wfkhxb %v15,%v17,%v20
+.*: e7 f1 40 1c 46 eb [ ]*wfkhxbs %v15,%v17,%v20
+.*: e7 f1 40 00 26 ea [ ]*vfchesb %v15,%v17,%v20
+.*: e7 f1 40 10 26 ea [ ]*vfchesbs %v15,%v17,%v20
+.*: e7 f1 40 08 26 ea [ ]*wfchesb %v15,%v17,%v20
+.*: e7 f1 40 18 26 ea [ ]*wfchesbs %v15,%v17,%v20
+.*: e7 f1 40 08 46 ea [ ]*wfchexb %v15,%v17,%v20
+.*: e7 f1 40 18 46 ea [ ]*wfchexbs %v15,%v17,%v20
+.*: e7 f1 40 04 26 ea [ ]*vfkhesb %v15,%v17,%v20
+.*: e7 f1 40 14 26 ea [ ]*vfkhesbs %v15,%v17,%v20
+.*: e7 f1 40 0c 26 ea [ ]*wfkhesb %v15,%v17,%v20
+.*: e7 f1 40 1c 26 ea [ ]*wfkhesbs %v15,%v17,%v20
+.*: e7 f1 40 04 36 ea [ ]*vfkhedb %v15,%v17,%v20
+.*: e7 f1 40 14 36 ea [ ]*vfkhedbs %v15,%v17,%v20
+.*: e7 f1 40 0c 36 ea [ ]*wfkhedb %v15,%v17,%v20
+.*: e7 f1 40 1c 36 ea [ ]*wfkhedbs %v15,%v17,%v20
+.*: e7 f1 40 0c 46 ea [ ]*wfkhexb %v15,%v17,%v20
+.*: e7 f1 40 1c 46 ea [ ]*wfkhexbs %v15,%v17,%v20
+.*: e7 f1 40 00 26 e5 [ ]*vfdsb %v15,%v17,%v20
+.*: e7 f1 40 08 26 e5 [ ]*wfdsb %v15,%v17,%v20
+.*: e7 f1 40 08 46 e5 [ ]*wfdxb %v15,%v17,%v20
+.*: e7 f1 00 cd 24 c7 [ ]*wfisb %v15,%v17,5,12
+.*: e7 f1 00 cd 24 c7 [ ]*wfisb %v15,%v17,5,12
+.*: e7 f1 00 cd 44 c7 [ ]*wfixb %v15,%v17,5,12
+.*: e7 f1 00 0c d4 c4 [ ]*vfll %v15,%v17,13,12
+.*: e7 f1 00 00 24 c4 [ ]*vflls %v15,%v17
+.*: e7 f1 00 08 24 c4 [ ]*wflls %v15,%v17
+.*: e7 f1 00 08 34 c4 [ ]*wflld %v15,%v17
+.*: e7 f1 00 bc d4 c5 [ ]*vflr %v15,%v17,13,12,11
+.*: e7 f1 00 cd 34 c5 [ ]*wflrd %v15,%v17,5,12
+.*: e7 f1 00 cd 34 c5 [ ]*wflrd %v15,%v17,5,12
+.*: e7 f1 00 cd 44 c5 [ ]*wflrx %v15,%v17,5,12
+.*: e7 f1 40 bc d6 ef [ ]*vfmax %v15,%v17,%v20,13,12,11
+.*: e7 f1 40 d0 26 ef [ ]*vfmaxsb %v15,%v17,%v20,13
+.*: e7 f1 40 d0 36 ef [ ]*vfmaxdb %v15,%v17,%v20,13
+.*: e7 f1 40 d8 26 ef [ ]*wfmaxsb %v15,%v17,%v20,13
+.*: e7 f1 40 d8 36 ef [ ]*wfmaxdb %v15,%v17,%v20,13
+.*: e7 f1 40 d8 46 ef [ ]*wfmaxxb %v15,%v17,%v20,13
+.*: e7 f1 40 bc d6 ee [ ]*vfmin %v15,%v17,%v20,13,12,11
+.*: e7 f1 40 d0 26 ee [ ]*vfminsb %v15,%v17,%v20,13
+.*: e7 f1 40 d0 36 ee [ ]*vfmindb %v15,%v17,%v20,13
+.*: e7 f1 40 d8 26 ee [ ]*wfminsb %v15,%v17,%v20,13
+.*: e7 f1 40 d8 36 ee [ ]*wfmindb %v15,%v17,%v20,13
+.*: e7 f1 40 d8 46 ee [ ]*wfminxb %v15,%v17,%v20,13
+.*: e7 f1 40 00 26 e7 [ ]*vfmsb %v15,%v17,%v20
+.*: e7 f1 40 08 26 e7 [ ]*wfmsb %v15,%v17,%v20
+.*: e7 f1 40 08 46 e7 [ ]*wfmxb %v15,%v17,%v20
+.*: e7 f1 42 00 87 8f [ ]*vfmasb %v15,%v17,%v20,%v24
+.*: e7 f1 42 08 87 8f [ ]*wfmasb %v15,%v17,%v20,%v24
+.*: e7 f1 44 08 87 8f [ ]*wfmaxb %v15,%v17,%v20,%v24
+.*: e7 f1 42 00 87 8e [ ]*vfmssb %v15,%v17,%v20,%v24
+.*: e7 f1 42 08 87 8e [ ]*wfmssb %v15,%v17,%v20,%v24
+.*: e7 f1 44 08 87 8e [ ]*wfmsxb %v15,%v17,%v20,%v24
+.*: e7 f1 4c 0d 87 9f [ ]*vfnma %v15,%v17,%v20,%v24,13,12
+.*: e7 f1 42 00 87 9f [ ]*vfnmasb %v15,%v17,%v20,%v24
+.*: e7 f1 42 08 87 9f [ ]*wfnmasb %v15,%v17,%v20,%v24
+.*: e7 f1 43 00 87 9f [ ]*vfnmadb %v15,%v17,%v20,%v24
+.*: e7 f1 43 08 87 9f [ ]*wfnmadb %v15,%v17,%v20,%v24
+.*: e7 f1 44 08 87 9f [ ]*wfnmaxb %v15,%v17,%v20,%v24
+.*: e7 f1 4c 0d 87 9e [ ]*vfnms %v15,%v17,%v20,%v24,13,12
+.*: e7 f1 42 00 87 9e [ ]*vfnmssb %v15,%v17,%v20,%v24
+.*: e7 f1 42 08 87 9e [ ]*wfnmssb %v15,%v17,%v20,%v24
+.*: e7 f1 43 00 87 9e [ ]*vfnmsdb %v15,%v17,%v20,%v24
+.*: e7 f1 43 08 87 9e [ ]*wfnmsdb %v15,%v17,%v20,%v24
+.*: e7 f1 44 08 87 9e [ ]*wfnmsxb %v15,%v17,%v20,%v24
+.*: e7 f1 00 d0 24 cc [ ]*vfpsosb %v15,%v17,13
+.*: e7 f1 00 d8 24 cc [ ]*wfpsosb %v15,%v17,13
+.*: e7 f1 00 00 24 cc [ ]*vflcsb %v15,%v17
+.*: e7 f1 00 08 24 cc [ ]*wflcsb %v15,%v17
+.*: e7 f1 00 10 24 cc [ ]*vflnsb %v15,%v17
+.*: e7 f1 00 18 24 cc [ ]*wflnsb %v15,%v17
+.*: e7 f1 00 20 24 cc [ ]*vflpsb %v15,%v17
+.*: e7 f1 00 28 24 cc [ ]*wflpsb %v15,%v17
+.*: e7 f1 00 d8 44 cc [ ]*wfpsoxb %v15,%v17,13
+.*: e7 f1 00 08 44 cc [ ]*wflcxb %v15,%v17
+.*: e7 f1 00 18 44 cc [ ]*wflnxb %v15,%v17
+.*: e7 f1 00 28 44 cc [ ]*wflpxb %v15,%v17
+.*: e7 f1 00 00 24 ce [ ]*vfsqsb %v15,%v17
+.*: e7 f1 00 08 24 ce [ ]*wfsqsb %v15,%v17
+.*: e7 f1 00 08 44 ce [ ]*wfsqxb %v15,%v17
+.*: e7 f1 40 00 26 e2 [ ]*vfssb %v15,%v17,%v20
+.*: e7 f1 40 08 26 e2 [ ]*wfssb %v15,%v17,%v20
+.*: e7 f1 40 08 46 e2 [ ]*wfsxb %v15,%v17,%v20
+.*: e7 f1 ff d0 24 4a [ ]*vftcisb %v15,%v17,4093
+.*: e7 f1 ff d8 24 4a [ ]*wftcisb %v15,%v17,4093
+.*: e7 f1 ff d8 44 4a [ ]*wftcixb %v15,%v17,4093
+.*: e3 69 b8 f0 fd 38 [ ]*agh %r6,-10000\(%r9,%r11\)
+.*: e3 d6 98 f0 fd 47 [ ]*binh -10000\(%r6,%r9\)
+.*: e3 f6 98 f0 fd 47 [ ]*bi -10000\(%r6,%r9\)
+.*: e3 16 98 f0 fd 47 [ ]*bio -10000\(%r6,%r9\)
+.*: e3 26 98 f0 fd 47 [ ]*bih -10000\(%r6,%r9\)
+.*: e3 26 98 f0 fd 47 [ ]*bih -10000\(%r6,%r9\)
+.*: e3 36 98 f0 fd 47 [ ]*binle -10000\(%r6,%r9\)
+.*: e3 46 98 f0 fd 47 [ ]*bil -10000\(%r6,%r9\)
+.*: e3 46 98 f0 fd 47 [ ]*bil -10000\(%r6,%r9\)
+.*: e3 56 98 f0 fd 47 [ ]*binhe -10000\(%r6,%r9\)
+.*: e3 66 98 f0 fd 47 [ ]*bilh -10000\(%r6,%r9\)
+.*: e3 76 98 f0 fd 47 [ ]*bine -10000\(%r6,%r9\)
+.*: e3 76 98 f0 fd 47 [ ]*bine -10000\(%r6,%r9\)
+.*: e3 86 98 f0 fd 47 [ ]*bie -10000\(%r6,%r9\)
+.*: e3 86 98 f0 fd 47 [ ]*bie -10000\(%r6,%r9\)
+.*: e3 96 98 f0 fd 47 [ ]*binlh -10000\(%r6,%r9\)
+.*: e3 a6 98 f0 fd 47 [ ]*bihe -10000\(%r6,%r9\)
+.*: e3 b6 98 f0 fd 47 [ ]*binl -10000\(%r6,%r9\)
+.*: e3 b6 98 f0 fd 47 [ ]*binl -10000\(%r6,%r9\)
+.*: e3 c6 98 f0 fd 47 [ ]*bile -10000\(%r6,%r9\)
+.*: e3 d6 98 f0 fd 47 [ ]*binh -10000\(%r6,%r9\)
+.*: e3 d6 98 f0 fd 47 [ ]*binh -10000\(%r6,%r9\)
+.*: e3 e6 98 f0 fd 47 [ ]*bino -10000\(%r6,%r9\)
+.*: b9 ec b0 69 [ ]*mgrk %r6,%r9,%r11
+.*: e3 69 b8 f0 fd 84 [ ]*mg %r6,-10000\(%r9,%r11\)
+.*: e3 69 b8 f0 fd 3c [ ]*mgh %r6,-10000\(%r9,%r11\)
+.*: b9 fd b0 69 [ ]*msrkc %r6,%r9,%r11
+.*: b9 ed b0 69 [ ]*msgrkc %r6,%r9,%r11
+.*: e3 69 b8 f0 fd 53 [ ]*msc %r6,-10000\(%r9,%r11\)
+.*: e3 69 b8 f0 fd 83 [ ]*msgc %r6,-10000\(%r9,%r11\)
+.*: e3 69 b8 f0 fd 39 [ ]*sgh %r6,-10000\(%r9,%r11\)
+.*: e6 06 9f a0 f0 37 [ ]*vlrlr %v15,%r6,4000\(%r9\)
+.*: e6 fd 6f a0 f0 35 [ ]*vlrl %v15,4000\(%r6\),253
+.*: e6 06 9f a0 f0 3f [ ]*vstrlr %v15,%r6,4000\(%r9\)
+.*: e6 fd 6f a0 f0 3d [ ]*vstrl %v15,4000\(%r6\),253
+.*: e6 f1 40 cf d6 71 [ ]*vap %v15,%v17,%v20,253,12
+.*: e6 0f 10 d0 02 77 [ ]*vcp %v15,%v17,13
+.*: e6 6f 00 d0 00 50 [ ]*vcvb %r6,%v15,13
+.*: e6 6f 00 d0 00 52 [ ]*vcvbg %r6,%v15,13
+.*: e6 f6 00 cf d0 58 [ ]*vcvd %v15,%r6,253,12
+.*: e6 f6 00 cf d0 5a [ ]*vcvdg %v15,%r6,253,12
+.*: e6 f1 40 cf d6 7a [ ]*vdp %v15,%v17,%v20,253,12
+.*: e6 f0 ff fd c0 49 [ ]*vlip %v15,65533,12
+.*: e6 f1 40 cf d6 78 [ ]*vmp %v15,%v17,%v20,253,12
+.*: e6 f1 40 cf d6 79 [ ]*vmsp %v15,%v17,%v20,253,12
+.*: e6 fd 6f a0 f0 34 [ ]*vpkz %v15,4000\(%r6\),253
+.*: e6 f1 fc bf d4 5b [ ]*vpsop %v15,%v17,253,252,11
+.*: e6 f1 40 cf d6 7b [ ]*vrp %v15,%v17,%v20,253,12
+.*: e6 f1 40 cf d6 7e [ ]*vsdp %v15,%v17,%v20,253,12
+.*: e6 f1 fc bf d4 59 [ ]*vsrp %v15,%v17,253,252,11
+.*: e6 f1 40 cf d6 73 [ ]*vsp %v15,%v17,%v20,253,12
+.*: e6 0f 00 00 00 5f [ ]*vtp %v15
+.*: e6 fd 6f a0 f0 3c [ ]*vupkz %v15,4000\(%r6\),253
+.*: e3 69 b8 f0 fd 4c [ ]*lgg %r6,-10000\(%r9,%r11\)
+.*: e3 69 b8 f0 fd 48 [ ]*llgfsg %r6,-10000\(%r9,%r11\)
+.*: e3 69 b8 f0 fd 4d [ ]*lgsc %r6,-10000\(%r9,%r11\)
+.*: e3 69 b8 f0 fd 49 [ ]*stgsc %r6,-10000\(%r9,%r11\)
+.*: b9 29 90 6b [ ]*kma %r6,%r9,%r11
diff --git a/gas/testsuite/gas/s390/zarch-arch12.s b/gas/testsuite/gas/s390/zarch-arch12.s
new file mode 100644
index 0000000..6ebd2fd
--- /dev/null
+++ b/gas/testsuite/gas/s390/zarch-arch12.s
@@ -0,0 +1,194 @@
+.text
+foo:
+ vbperm %v15,%v17,%v20
+ vllezlf %v15,4000(%r6,%r9)
+ vmsl %v15,%v17,%v20,%v24,13,12
+ vmslg %v15,%v17,%v20,%v24,13
+ vnx %v15,%v17,%v20
+ vnn %v15,%v17,%v20
+ voc %v15,%v17,%v20
+ vpopctb %v15,%v17
+ vpopcth %v15,%v17
+ vpopctf %v15,%v17
+ vpopctg %v15,%v17
+ vfasb %v15,%v17,%v20
+ wfasb %v15,%v17,%v20
+ wfaxb %v15,%v17,%v20
+ wfcsb %v15,%v17
+ wfcxb %v15,%v17
+ wfksb %v15,%v17
+ wfkxb %v15,%v17
+ vfcesb %v15,%v17,%v20
+ vfcesbs %v15,%v17,%v20
+ wfcesb %v15,%v17,%v20
+ wfcesbs %v15,%v17,%v20
+ wfcexb %v15,%v17,%v20
+ wfcexbs %v15,%v17,%v20
+ vfkesb %v15,%v17,%v20
+ vfkesbs %v15,%v17,%v20
+ wfkesb %v15,%v17,%v20
+ wfkesbs %v15,%v17,%v20
+ vfkedb %v15,%v17,%v20
+ vfkedbs %v15,%v17,%v20
+ wfkedb %v15,%v17,%v20
+ wfkedbs %v15,%v17,%v20
+ wfkexb %v15,%v17,%v20
+ wfkexbs %v15,%v17,%v20
+ vfchsb %v15,%v17,%v20
+ vfchsbs %v15,%v17,%v20
+ wfchsb %v15,%v17,%v20
+ wfchsbs %v15,%v17,%v20
+ wfchxb %v15,%v17,%v20
+ wfchxbs %v15,%v17,%v20
+ vfkhsb %v15,%v17,%v20
+ vfkhsbs %v15,%v17,%v20
+ wfkhsb %v15,%v17,%v20
+ wfkhsbs %v15,%v17,%v20
+ vfkhdb %v15,%v17,%v20
+ vfkhdbs %v15,%v17,%v20
+ wfkhdb %v15,%v17,%v20
+ wfkhdbs %v15,%v17,%v20
+ wfkhxb %v15,%v17,%v20
+ wfkhxbs %v15,%v17,%v20
+ vfchesb %v15,%v17,%v20
+ vfchesbs %v15,%v17,%v20
+ wfchesb %v15,%v17,%v20
+ wfchesbs %v15,%v17,%v20
+ wfchexb %v15,%v17,%v20
+ wfchexbs %v15,%v17,%v20
+ vfkhesb %v15,%v17,%v20
+ vfkhesbs %v15,%v17,%v20
+ wfkhesb %v15,%v17,%v20
+ wfkhesbs %v15,%v17,%v20
+ vfkhedb %v15,%v17,%v20
+ vfkhedbs %v15,%v17,%v20
+ wfkhedb %v15,%v17,%v20
+ wfkhedbs %v15,%v17,%v20
+ wfkhexb %v15,%v17,%v20
+ wfkhexbs %v15,%v17,%v20
+ vfdsb %v15,%v17,%v20
+ wfdsb %v15,%v17,%v20
+ wfdxb %v15,%v17,%v20
+ vfisb %v15,%v17,13,12
+ wfisb %v15,%v17,13,12
+ wfixb %v15,%v17,13,12
+ vfll %v15,%v17,13,12
+ vflls %v15,%v17
+ wflls %v15,%v17
+ wflld %v15,%v17
+ vflr %v15,%v17,13,12,11
+ vflrd %v15,%v17,13,12
+ wflrd %v15,%v17,13,12
+ wflrx %v15,%v17,13,12
+ vfmax %v15,%v17,%v20,13,12,11
+ vfmaxsb %v15,%v17,%v20,13
+ vfmaxdb %v15,%v17,%v20,13
+ wfmaxsb %v15,%v17,%v20,13
+ wfmaxdb %v15,%v17,%v20,13
+ wfmaxxb %v15,%v17,%v20,13
+ vfmin %v15,%v17,%v20,13,12,11
+ vfminsb %v15,%v17,%v20,13
+ vfmindb %v15,%v17,%v20,13
+ wfminsb %v15,%v17,%v20,13
+ wfmindb %v15,%v17,%v20,13
+ wfminxb %v15,%v17,%v20,13
+ vfmsb %v15,%v17,%v20
+ wfmsb %v15,%v17,%v20
+ wfmxb %v15,%v17,%v20
+ vfmasb %v15,%v17,%v20,%v24
+ wfmasb %v15,%v17,%v20,%v24
+ wfmaxb %v15,%v17,%v20,%v24
+ vfmssb %v15,%v17,%v20,%v24
+ wfmssb %v15,%v17,%v20,%v24
+ wfmsxb %v15,%v17,%v20,%v24
+ vfnma %v15,%v17,%v20,%v24,13,12
+ vfnmasb %v15,%v17,%v20,%v24
+ wfnmasb %v15,%v17,%v20,%v24
+ vfnmadb %v15,%v17,%v20,%v24
+ wfnmadb %v15,%v17,%v20,%v24
+ wfnmaxb %v15,%v17,%v20,%v24
+ vfnms %v15,%v17,%v20,%v24,13,12
+ vfnmssb %v15,%v17,%v20,%v24
+ wfnmssb %v15,%v17,%v20,%v24
+ vfnmsdb %v15,%v17,%v20,%v24
+ wfnmsdb %v15,%v17,%v20,%v24
+ wfnmsxb %v15,%v17,%v20,%v24
+ vfpsosb %v15,%v17,13
+ wfpsosb %v15,%v17,13
+ vflcsb %v15,%v17
+ wflcsb %v15,%v17
+ vflnsb %v15,%v17
+ wflnsb %v15,%v17
+ vflpsb %v15,%v17
+ wflpsb %v15,%v17
+ wfpsoxb %v15,%v17,13
+ wflcxb %v15,%v17
+ wflnxb %v15,%v17
+ wflpxb %v15,%v17
+ vfsqsb %v15,%v17
+ wfsqsb %v15,%v17
+ wfsqxb %v15,%v17
+ vfssb %v15,%v17,%v20
+ wfssb %v15,%v17,%v20
+ wfsxb %v15,%v17,%v20
+ vftcisb %v15,%v17,4093
+ wftcisb %v15,%v17,4093
+ wftcixb %v15,%v17,4093
+ agh %r6,-10000(%r9,%r11)
+ bic 13,-10000(%r6,%r9)
+ bi -10000(%r6,%r9)
+ bio -10000(%r6,%r9)
+ bih -10000(%r6,%r9)
+ bip -10000(%r6,%r9)
+ binle -10000(%r6,%r9)
+ bil -10000(%r6,%r9)
+ bim -10000(%r6,%r9)
+ binhe -10000(%r6,%r9)
+ bilh -10000(%r6,%r9)
+ bine -10000(%r6,%r9)
+ binz -10000(%r6,%r9)
+ bie -10000(%r6,%r9)
+ biz -10000(%r6,%r9)
+ binlh -10000(%r6,%r9)
+ bihe -10000(%r6,%r9)
+ binl -10000(%r6,%r9)
+ binm -10000(%r6,%r9)
+ bile -10000(%r6,%r9)
+ binh -10000(%r6,%r9)
+ binp -10000(%r6,%r9)
+ bino -10000(%r6,%r9)
+ mgrk %r6,%r9,%r11
+ mg %r6,-10000(%r9,%r11)
+ mgh %r6,-10000(%r9,%r11)
+ msrkc %r6,%r9,%r11
+ msgrkc %r6,%r9,%r11
+ msc %r6,-10000(%r9,%r11)
+ msgc %r6,-10000(%r9,%r11)
+ sgh %r6,-10000(%r9,%r11)
+ vlrlr %v15,%r6,4000(%r9)
+ vlrl %v15,4000(%r6),253
+ vstrlr %v15,%r6,4000(%r9)
+ vstrl %v15,4000(%r6),253
+ vap %v15,%v17,%v20,253,12
+ vcp %v15,%v17,13
+ vcvb %r6,%v15,13
+ vcvbg %r6,%v15,13
+ vcvd %v15,%r6,253,12
+ vcvdg %v15,%r6,253,12
+ vdp %v15,%v17,%v20,253,12
+ vlip %v15,65533,12
+ vmp %v15,%v17,%v20,253,12
+ vmsp %v15,%v17,%v20,253,12
+ vpkz %v15,4000(%r6),253
+ vpsop %v15,%v17,253,252,11
+ vrp %v15,%v17,%v20,253,12
+ vsdp %v15,%v17,%v20,253,12
+ vsrp %v15,%v17,253,252,11
+ vsp %v15,%v17,%v20,253,12
+ vtp %v15
+ vupkz %v15,4000(%r6),253
+ lgg %r6,-10000(%r9,%r11)
+ llgfsg %r6,-10000(%r9,%r11)
+ lgsc %r6,-10000(%r9,%r11)
+ stgsc %r6,-10000(%r9,%r11)
+ kma %r6,%r9,%r11
diff -rup binutils.orig/gas/testsuite/gas/s390/zarch-z13.d binutils-2.27/gas/testsuite/gas/s390/zarch-z13.d
--- binutils.orig/gas/testsuite/gas/s390/zarch-z13.d 2017-03-24 14:50:54.052789212 +0000
+++ binutils-2.27/gas/testsuite/gas/s390/zarch-z13.d 2017-03-24 14:54:05.065330554 +0000
@@ -513,12 +513,12 @@ Disassembly of section .text:
.*: e7 f1 00 bc d4 c7 [ ]*vfi %v15,%v17,13,12,11
.*: e7 f1 00 cd 34 c7 [ ]*wfidb %v15,%v17,5,12
.*: e7 f1 00 cd 34 c7 [ ]*wfidb %v15,%v17,5,12
-.*: e7 f1 00 0c d4 c4 [ ]*vlde %v15,%v17,13,12
-.*: e7 f1 00 00 24 c4 [ ]*vldeb %v15,%v17
-.*: e7 f1 00 08 24 c4 [ ]*wldeb %v15,%v17
-.*: e7 f1 00 bc d4 c5 [ ]*vled %v15,%v17,13,12,11
-.*: e7 f1 00 cd 34 c5 [ ]*wledb %v15,%v17,5,12
-.*: e7 f1 00 cd 34 c5 [ ]*wledb %v15,%v17,5,12
+.*: e7 f1 00 0c d4 c4 [ ]*vfll %v15,%v17,13,12
+.*: e7 f1 00 00 24 c4 [ ]*vflls %v15,%v17
+.*: e7 f1 00 08 24 c4 [ ]*wflls %v15,%v17
+.*: e7 f1 00 bc d4 c5 [ ]*vflr %v15,%v17,13,12,11
+.*: e7 f1 00 cd 34 c5 [ ]*wflrd %v15,%v17,5,12
+.*: e7 f1 00 cd 34 c5 [ ]*wflrd %v15,%v17,5,12
.*: e7 f1 40 0c d6 e7 [ ]*vfm %v15,%v17,%v20,13,12
.*: e7 f1 40 00 36 e7 [ ]*vfmdb %v15,%v17,%v20
.*: e7 f1 40 08 36 e7 [ ]*wfmdb %v15,%v17,%v20
diff -rup binutils.orig/include/opcode/s390.h binutils-2.27/include/opcode/s390.h
--- binutils.orig/include/opcode/s390.h 2017-03-24 14:51:01.896688195 +0000
+++ binutils-2.27/include/opcode/s390.h 2017-03-24 14:54:05.065330554 +0000
@@ -42,14 +42,17 @@ enum s390_opcode_cpu_val
S390_OPCODE_Z196,
S390_OPCODE_ZEC12,
S390_OPCODE_Z13,
+ S390_OPCODE_ARCH12,
S390_OPCODE_MAXCPU
};
/* Instruction specific flags. */
#define S390_INSTR_FLAG_OPTPARM 0x1
+
#define S390_INSTR_FLAG_HTM 0x2
#define S390_INSTR_FLAG_VX 0x4
-#define S390_INSTR_FLAG_FACILITY_MASK 0x6
+#define S390_INSTR_FLAG_VX2 0x8
+#define S390_INSTR_FLAG_FACILITY_MASK 0xe
/* The opcode table is an array of struct s390_opcode. */
diff -rup binutils.orig/opcodes/s390-mkopc.c binutils-2.27/opcodes/s390-mkopc.c
--- binutils.orig/opcodes/s390-mkopc.c 2017-03-24 14:50:54.849778948 +0000
+++ binutils-2.27/opcodes/s390-mkopc.c 2017-03-24 14:54:05.065330554 +0000
@@ -366,6 +366,8 @@ main (void)
min_cpu = S390_OPCODE_ZEC12;
else if (strcmp (cpu_string, "z13") == 0)
min_cpu = S390_OPCODE_Z13;
+ else if (strcmp (cpu_string, "arch12") == 0)
+ min_cpu = S390_OPCODE_ARCH12;
else {
fprintf (stderr, "Couldn't parse cpu string %s\n", cpu_string);
exit (1);
@@ -409,6 +411,10 @@ main (void)
&& (str[2] == 0 || str[2] == ',')) {
flag_bits |= S390_INSTR_FLAG_VX;
str += 2;
+ } else if (strncmp (str, "vx2", 3) == 0
+ && (str[3] == 0 || str[3] == ',')) {
+ flag_bits |= S390_INSTR_FLAG_VX2;
+ str += 3;
} else {
fprintf (stderr, "Couldn't parse flags string %s\n",
flags_string);
diff -rup binutils.orig/opcodes/s390-opc.c binutils-2.27/opcodes/s390-opc.c
--- binutils.orig/opcodes/s390-opc.c 2017-03-24 14:50:54.850778935 +0000
+++ binutils-2.27/opcodes/s390-opc.c 2017-03-24 14:54:05.065330554 +0000
@@ -220,28 +220,30 @@ const struct s390_operand s390_operands[
{ 8, 16, 0 },
#define U8_24 69 /* 8 bit unsigned value starting at 24 */
{ 8, 24, 0 },
-#define U8_32 70 /* 8 bit unsigned value starting at 32 */
+#define U8_28 70 /* 8 bit unsigned value starting at 28 */
+ { 8, 28, 0 },
+#define U8_32 71 /* 8 bit unsigned value starting at 32 */
{ 8, 32, 0 },
-#define U12_16 71 /* 12 bit unsigned value starting at 16 */
+#define U12_16 72 /* 12 bit unsigned value starting at 16 */
{ 12, 16, 0 },
-#define U16_16 72 /* 16 bit unsigned value starting at 16 */
+#define U16_16 73 /* 16 bit unsigned value starting at 16 */
{ 16, 16, 0 },
-#define U16_32 73 /* 16 bit unsigned value starting at 32 */
+#define U16_32 74 /* 16 bit unsigned value starting at 32 */
{ 16, 32, 0 },
-#define U32_16 74 /* 32 bit unsigned value starting at 16 */
+#define U32_16 75 /* 32 bit unsigned value starting at 16 */
{ 32, 16, 0 },
/* PC-relative address operands. */
-#define J12_12 75 /* 12 bit PC relative offset at 12 */
+#define J12_12 76 /* 12 bit PC relative offset at 12 */
{ 12, 12, S390_OPERAND_PCREL },
-#define J16_16 76 /* 16 bit PC relative offset at 16 */
+#define J16_16 77 /* 16 bit PC relative offset at 16 */
{ 16, 16, S390_OPERAND_PCREL },
-#define J16_32 77 /* 16 bit PC relative offset at 32 */
+#define J16_32 78 /* 16 bit PC relative offset at 32 */
{ 16, 32, S390_OPERAND_PCREL },
-#define J24_24 78 /* 24 bit PC relative offset at 24 */
+#define J24_24 79 /* 24 bit PC relative offset at 24 */
{ 24, 24, S390_OPERAND_PCREL },
-#define J32_16 79 /* 32 bit PC relative offset at 16 */
+#define J32_16 80 /* 32 bit PC relative offset at 16 */
{ 32, 16, S390_OPERAND_PCREL },
};
@@ -425,6 +427,7 @@ const struct s390_operand s390_operands[
#define INSTR_RXY_RERRD 6, { RE_8,D20_20,X_12,B_16,0,0 } /* e.g. dsg */
#define INSTR_RXY_FRRD 6, { F_8,D20_20,X_12,B_16,0,0 } /* e.g. ley */
#define INSTR_RXY_URRD 6, { U4_8,D20_20,X_12,B_16,0,0 } /* e.g. pfd */
+#define INSTR_RXY_0RRD 6, { D20_20,X_12,B_16,0,0 } /* e.g. bic */
#define INSTR_RX_0RRD 4, { D_20,X_12,B_16,0,0,0 } /* e.g. be */
#define INSTR_RX_FRRD 4, { F_8,D_20,X_12,B_16,0,0 } /* e.g. ae */
#define INSTR_RX_FERRD 4, { FE_8,D_20,X_12,B_16,0,0 } /* e.g. mxd */
@@ -454,23 +457,29 @@ const struct s390_operand s390_operands[
#define INSTR_VRI_V 6, { V_8,0,0,0,0,0 } /* e.g. vzero */
#define INSTR_VRI_V0UUU 6, { V_8,U8_16,U8_24,U4_32,0,0 } /* e.g. vgm */
#define INSTR_VRI_V0UU 6, { V_8,U8_16,U8_24,0,0,0 } /* e.g. vgmb */
+#define INSTR_VRI_V0UU2 6, { V_8,U16_16,U4_32,0,0,0 } /* e.g. vlip */
#define INSTR_VRI_VVUU 6, { V_8,V_12,U16_16,U4_32,0,0 } /* e.g. vrep */
#define INSTR_VRI_VVU 6, { V_8,V_12,U16_16,0,0,0 } /* e.g. vrepb */
#define INSTR_VRI_VVU2 6, { V_8,V_12,U12_16,0,0,0 } /* e.g. vftcidb */
#define INSTR_VRI_V0IU 6, { V_8,I16_16,U4_32,0,0,0 } /* e.g. vrepi */
#define INSTR_VRI_V0I 6, { V_8,I16_16,0,0,0,0 } /* e.g. vrepib */
#define INSTR_VRI_VVV0UU 6, { V_8,V_12,V_16,U8_24,U4_32,0 } /* e.g. verim */
+#define INSTR_VRI_VVV0UU2 6, { V_8,V_12,V_16,U8_28,U4_24,0 } /* e.g. vap */
#define INSTR_VRI_VVV0U 6, { V_8,V_12,V_16,U8_24,0,0 } /* e.g. verimb*/
#define INSTR_VRI_VVUUU 6, { V_8,V_12,U12_16,U4_32,U4_28,0 } /* e.g. vftci */
+#define INSTR_VRI_VVUUU2 6, { V_8,V_12,U8_28,U8_16,U4_24,0 } /* e.g. vpsop */
+#define INSTR_VRI_VR0UU 6, { V_8,R_12,U8_28,U4_24,0,0 } /* e.g. vcvd */
#define INSTR_VRX_VRRD 6, { V_8,D_20,X_12,B_16,0,0 } /* e.g. vl */
#define INSTR_VRX_VV 6, { V_8,V_12,0,0,0,0 } /* e.g. vlr */
-#define INSTR_VRX_VRRDU 6, { V_8,D_20,X_12,B_16,U4_32,0 } /* e.g. vlrp */
+#define INSTR_VRX_VRRDU 6, { V_8,D_20,X_12,B_16,U4_32,0 } /* e.g. vlrep */
#define INSTR_VRS_RVRDU 6, { R_8,V_12,D_20,B_16,U4_32,0 } /* e.g. vlgv */
#define INSTR_VRS_RVRD 6, { R_8,V_12,D_20,B_16,0,0 } /* e.g. vlgvb */
#define INSTR_VRS_VVRDU 6, { V_8,V_12,D_20,B_16,U4_32,0 } /* e.g. verll */
#define INSTR_VRS_VVRD 6, { V_8,V_12,D_20,B_16,0,0 } /* e.g. vlm */
#define INSTR_VRS_VRRDU 6, { V_8,R_12,D_20,B_16,U4_32,0 } /* e.g. vlvg */
#define INSTR_VRS_VRRD 6, { V_8,R_12,D_20,B_16,0,0 } /* e.g. vlvgb */
+#define INSTR_VRS_RRDV 6, { V_32,R_12,D_20,B_16,0,0 } /* e.g. vlrlr */
+#define INSTR_VRR_0V 6, { V_12,0,0,0,0,0 } /* e.g. vtp */
#define INSTR_VRR_VRR 6, { V_8,R_12,R_16,0,0,0 } /* e.g. vlvgp */
#define INSTR_VRR_VVV0U 6, { V_8,V_12,V_16,U4_32,0,0 } /* e.g. vmrh */
#define INSTR_VRR_VVV0U0 6, { V_8,V_12,V_16,U4_24,0,0 } /* e.g. vfaeb */
@@ -499,6 +508,9 @@ const struct s390_operand s390_operands[
#define INSTR_VRR_VV0UUU 6, { V_8,V_12,U4_32,U4_28,U4_24,0 } /* e.g. vcdg */
#define INSTR_VRR_VVVU0UV 6, { V_8,V_12,V_16,V_32,U4_28,U4_20 } /* e.g. vfma */
#define INSTR_VRR_VV0U0U 6, { V_8,V_12,U4_32,U4_24,0,0 } /* e.g. vistr */
+#define INSTR_VRR_0VV0U 6, { V_12,V_16,U4_24,0,0,0 } /* e.g. vcp */
+#define INSTR_VRR_RV0U 6, { R_8,V_12,U4_24,0,0,0 } /* e.g. vcvb */
+#define INSTR_VSI_URDV 6, { V_32,D_20,B_16,U8_8,0,0 } /* e.g. vlrl */
#define MASK_E { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
#define MASK_IE_UU { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 }
@@ -631,6 +643,7 @@ const struct s390_operand s390_operands[
#define MASK_RXY_RERRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_RXY_FRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_RXY_URRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
+#define MASK_RXY_0RRD { 0xff, 0xf0, 0x00, 0x00, 0x00, 0xff }
#define MASK_RX_0RRD { 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RX_FRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RX_FERRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
@@ -660,14 +673,18 @@ const struct s390_operand s390_operands[
#define MASK_VRI_V { 0xff, 0x0f, 0xff, 0xff, 0xf0, 0xff }
#define MASK_VRI_V0UUU { 0xff, 0x0f, 0x00, 0x00, 0x00, 0xff }
#define MASK_VRI_V0UU { 0xff, 0x0f, 0x00, 0x00, 0xf0, 0xff }
+#define MASK_VRI_V0UU2 { 0xff, 0x0f, 0x00, 0x00, 0x00, 0xff }
#define MASK_VRI_VVUU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_VRI_VVU { 0xff, 0x00, 0x00, 0x00, 0xf0, 0xff }
#define MASK_VRI_VVU2 { 0xff, 0x00, 0x00, 0x0f, 0xf0, 0xff }
#define MASK_VRI_V0IU { 0xff, 0x0f, 0x00, 0x00, 0x00, 0xff }
#define MASK_VRI_V0I { 0xff, 0x0f, 0x00, 0x00, 0xf0, 0xff }
#define MASK_VRI_VVV0UU { 0xff, 0x00, 0x0f, 0x00, 0x00, 0xff }
+#define MASK_VRI_VVV0UU2 { 0xff, 0x00, 0x0f, 0x00, 0x00, 0xff }
#define MASK_VRI_VVV0U { 0xff, 0x00, 0x0f, 0x00, 0xf0, 0xff }
#define MASK_VRI_VVUUU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
+#define MASK_VRI_VVUUU2 { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
+#define MASK_VRI_VR0UU { 0xff, 0x00, 0xff, 0x00, 0x00, 0xff }
#define MASK_VRX_VRRD { 0xff, 0x00, 0x00, 0x00, 0xf0, 0xff }
#define MASK_VRX_VV { 0xff, 0x00, 0xff, 0xff, 0xf0, 0xff }
#define MASK_VRX_VRRDU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
@@ -677,6 +694,8 @@ const struct s390_operand s390_operands[
#define MASK_VRS_VVRD { 0xff, 0x00, 0x00, 0x00, 0xf0, 0xff }
#define MASK_VRS_VRRDU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_VRS_VRRD { 0xff, 0x00, 0x00, 0x00, 0xf0, 0xff }
+#define MASK_VRS_RRDV { 0xff, 0xf0, 0x00, 0x00, 0x00, 0xff }
+#define MASK_VRR_0V { 0xff, 0xf0, 0xff, 0xff, 0xf0, 0xff }
#define MASK_VRR_VRR { 0xff, 0x00, 0x0f, 0xff, 0xf0, 0xff }
#define MASK_VRR_VVV0U { 0xff, 0x00, 0x0f, 0xff, 0x00, 0xff }
#define MASK_VRR_VVV0U0 { 0xff, 0x00, 0x0f, 0x0f, 0xf0, 0xff }
@@ -705,36 +724,46 @@ const struct s390_operand s390_operands[
#define MASK_VRR_VV0UUU { 0xff, 0x00, 0xff, 0x00, 0x00, 0xff }
#define MASK_VRR_VVVU0UV { 0xff, 0x00, 0x00, 0xf0, 0x00, 0xff }
#define MASK_VRR_VV0U0U { 0xff, 0x00, 0xff, 0x0f, 0x00, 0xff }
+#define MASK_VRR_0VV0U { 0xff, 0xf0, 0x0f, 0x0f, 0xf0, 0xff }
+#define MASK_VRR_RV0U { 0xff, 0x00, 0xff, 0x0f, 0xf0, 0xff }
+#define MASK_VSI_URDV { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
+
/* The opcode formats table (blueprints for .insn pseudo mnemonic). */
const struct s390_opcode s390_opformats[] =
{
- { "e", OP8(0x00LL), MASK_E, INSTR_E, 3, 0 ,0 },
- { "ri", OP8(0x00LL), MASK_RI_RI, INSTR_RI_RI, 3, 0 ,0 },
- { "rie", OP8(0x00LL), MASK_RIE_RRP, INSTR_RIE_RRP, 3, 0 ,0 },
- { "ril", OP8(0x00LL), MASK_RIL_RP, INSTR_RIL_RP, 3, 0 ,0 },
- { "rilu", OP8(0x00LL), MASK_RIL_RU, INSTR_RIL_RU, 3, 0 ,0 },
- { "ris", OP8(0x00LL), MASK_RIS_RURDI, INSTR_RIS_RURDI,3, 6 ,0 },
- { "rr", OP8(0x00LL), MASK_RR_RR, INSTR_RR_RR, 3, 0 ,0 },
- { "rre", OP8(0x00LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0 ,0 },
- { "rrf", OP8(0x00LL), MASK_RRF_RURR, INSTR_RRF_RURR, 3, 0 ,0 },
- { "rrs", OP8(0x00LL), MASK_RRS_RRRDU, INSTR_RRS_RRRDU,3, 6 ,0 },
- { "rs", OP8(0x00LL), MASK_RS_RRRD, INSTR_RS_RRRD, 3, 0 ,0 },
- { "rse", OP8(0x00LL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 3, 0 ,0 },
- { "rsi", OP8(0x00LL), MASK_RSI_RRP, INSTR_RSI_RRP, 3, 0 ,0 },
- { "rsy", OP8(0x00LL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 3, 3 ,0 },
- { "rx", OP8(0x00LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0 ,0 },
- { "rxe", OP8(0x00LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 3, 0 ,0 },
- { "rxf", OP8(0x00LL), MASK_RXF_RRRDR, INSTR_RXF_RRRDR,3, 0 ,0 },
- { "rxy", OP8(0x00LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 3, 3 ,0 },
- { "s", OP8(0x00LL), MASK_S_RD, INSTR_S_RD, 3, 0 ,0 },
- { "si", OP8(0x00LL), MASK_SI_URD, INSTR_SI_URD, 3, 0 ,0 },
- { "siy", OP8(0x00LL), MASK_SIY_URD, INSTR_SIY_URD, 3, 3 ,0 },
- { "sil", OP8(0x00LL), MASK_SIL_RDI, INSTR_SIL_RDI, 3, 6 ,0 },
- { "ss", OP8(0x00LL), MASK_SS_RRRDRD, INSTR_SS_RRRDRD,3, 0 ,0 },
- { "sse", OP8(0x00LL), MASK_SSE_RDRD, INSTR_SSE_RDRD, 3, 0 ,0 },
- { "ssf", OP8(0x00LL), MASK_SSF_RRDRD, INSTR_SSF_RRDRD,3, 0 ,0 },
+ { "e", OP8(0x00LL), MASK_E, INSTR_E, 3, 0 ,0 },
+ { "ri", OP8(0x00LL), MASK_RI_RI, INSTR_RI_RI, 3, 0 ,0 },
+ { "rie", OP8(0x00LL), MASK_RIE_RRP, INSTR_RIE_RRP, 3, 0 ,0 },
+ { "ril", OP8(0x00LL), MASK_RIL_RP, INSTR_RIL_RP, 3, 0 ,0 },
+ { "rilu", OP8(0x00LL), MASK_RIL_RU, INSTR_RIL_RU, 3, 0 ,0 },
+ { "ris", OP8(0x00LL), MASK_RIS_RURDI, INSTR_RIS_RURDI, 3, 6 ,0 },
+ { "rr", OP8(0x00LL), MASK_RR_RR, INSTR_RR_RR, 3, 0 ,0 },
+ { "rre", OP8(0x00LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0 ,0 },
+ { "rrf", OP8(0x00LL), MASK_RRF_RURR, INSTR_RRF_RURR, 3, 0 ,0 },
+ { "rrs", OP8(0x00LL), MASK_RRS_RRRDU, INSTR_RRS_RRRDU, 3, 6 ,0 },
+ { "rs", OP8(0x00LL), MASK_RS_RRRD, INSTR_RS_RRRD, 3, 0 ,0 },
+ { "rse", OP8(0x00LL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 3, 0 ,0 },
+ { "rsi", OP8(0x00LL), MASK_RSI_RRP, INSTR_RSI_RRP, 3, 0 ,0 },
+ { "rsy", OP8(0x00LL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 3, 3 ,0 },
+ { "rx", OP8(0x00LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0 ,0 },
+ { "rxe", OP8(0x00LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 3, 0 ,0 },
+ { "rxf", OP8(0x00LL), MASK_RXF_RRRDR, INSTR_RXF_RRRDR, 3, 0 ,0 },
+ { "rxy", OP8(0x00LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 3, 3 ,0 },
+ { "s", OP8(0x00LL), MASK_S_RD, INSTR_S_RD, 3, 0 ,0 },
+ { "si", OP8(0x00LL), MASK_SI_URD, INSTR_SI_URD, 3, 0 ,0 },
+ { "siy", OP8(0x00LL), MASK_SIY_URD, INSTR_SIY_URD, 3, 3 ,0 },
+ { "sil", OP8(0x00LL), MASK_SIL_RDI, INSTR_SIL_RDI, 3, 6 ,0 },
+ { "ss", OP8(0x00LL), MASK_SS_RRRDRD, INSTR_SS_RRRDRD, 3, 0 ,0 },
+ { "sse", OP8(0x00LL), MASK_SSE_RDRD, INSTR_SSE_RDRD, 3, 0 ,0 },
+ { "ssf", OP8(0x00LL), MASK_SSF_RRDRD, INSTR_SSF_RRDRD, 3, 0 ,0 },
+ { "vrv", OP8(0x00LL), MASK_VRV_VVXRDU, INSTR_VRV_VVXRDU, 3, 9 ,0 },
+ { "vri", OP8(0x00LL), MASK_VRI_VVUUU, INSTR_VRI_VVUUU, 3, 9 ,0 },
+ { "vrx", OP8(0x00LL), MASK_VRX_VRRDU, INSTR_VRX_VRRDU, 3, 9 ,0 },
+ { "vrs", OP8(0x00LL), MASK_VRS_RVRDU, INSTR_VRS_RVRDU, 3, 9 ,0 },
+ { "vrr", OP8(0x00LL), MASK_VRR_VVV0UUU, INSTR_VRR_VVV0UUU, 3, 9 ,0 },
+ { "vsi", OP8(0x00LL), MASK_VSI_URDV, INSTR_VSI_URDV, 3, 10 ,0 },
};
const int s390_num_opformats =
diff -rup binutils.orig/opcodes/s390-opc.txt binutils-2.27/opcodes/s390-opc.txt
--- binutils.orig/opcodes/s390-opc.txt 2017-03-24 14:50:54.851778922 +0000
+++ binutils-2.27/opcodes/s390-opc.txt 2017-03-24 14:54:05.066330541 +0000
@@ -630,7 +630,7 @@ eb0000000051 tmy SIY_URD "test under mas
# 'old' instructions extended to long displacement
# these instructions are entered into the opcode table twice.
e30000000003 lrag RXY_RRRD "load real address with long offset 64" z990 zarch
-e30000000004 lg RXY_RRRD " load 64" z990 zarch
+e30000000004 lg RXY_RRRD "load 64" z990 zarch
e30000000008 ag RXY_RRRD "add with long offset 64" z990 zarch
e30000000009 sg RXY_RRRD "subtract with long offset 64" z990 zarch
e3000000000a alg RXY_RRRD "add logical with long offset 64" z990 zarch
@@ -1584,27 +1584,27 @@ e7000230008a vstrczfs VRR_VVVU0VB3 "vect
# Chapter 24
e700000000e3 vfa VRR_VVV0UU "vector fp add" z13 zarch vx
-e700000030e3 vfadb VRR_VVV "vector fp add" z13 zarch vx
-e700000830e3 wfadb VRR_VVV "vector fp add" z13 zarch vx
+e700000030e3 vfadb VRR_VVV "vector fp add long" z13 zarch vx
+e700000830e3 wfadb VRR_VVV "vector fp add long" z13 zarch vx
e700000000cb wfc VRR_VV0UU2 "vector fp compare scalar" z13 zarch vx
-e700000030cb wfcdb VRR_VV "vector fp compare scalar" z13 zarch vx
+e700000030cb wfcdb VRR_VV "vector fp compare scalar long" z13 zarch vx
e700000000ca wfk VRR_VV0UU2 "vector fp compare and signal scalar" z13 zarch vx
-e700000030ca wfkdb VRR_VV "vector fp compare and signal scalar" z13 zarch vx
+e700000030ca wfkdb VRR_VV "vector fp compare and signal scalar long" z13 zarch vx
e700000000e8 vfce VRR_VVV0UUU "vector fp compare equal" z13 zarch vx
-e700000030e8 vfcedb VRR_VVV "vector fp compare equal" z13 zarch vx
-e700001030e8 vfcedbs VRR_VVV "vector fp compare equal" z13 zarch vx
-e700000830e8 wfcedb VRR_VVV "vector fp compare equal" z13 zarch vx
-e700001830e8 wfcedbs VRR_VVV "vector fp compare equal" z13 zarch vx
+e700000030e8 vfcedb VRR_VVV "vector fp compare equal long" z13 zarch vx
+e700001030e8 vfcedbs VRR_VVV "vector fp compare equal long" z13 zarch vx
+e700000830e8 wfcedb VRR_VVV "vector fp compare equal long" z13 zarch vx
+e700001830e8 wfcedbs VRR_VVV "vector fp compare equal long" z13 zarch vx
e700000000eb vfch VRR_VVV0UUU "vector fp compare high" z13 zarch vx
-e700000030eb vfchdb VRR_VVV "vector fp compare high" z13 zarch vx
-e700001030eb vfchdbs VRR_VVV "vector fp compare high" z13 zarch vx
-e700000830eb wfchdb VRR_VVV "vector fp compare high" z13 zarch vx
-e700001830eb wfchdbs VRR_VVV "vector fp compare high" z13 zarch vx
+e700000030eb vfchdb VRR_VVV "vector fp compare high long" z13 zarch vx
+e700001030eb vfchdbs VRR_VVV "vector fp compare high long" z13 zarch vx
+e700000830eb wfchdb VRR_VVV "vector fp compare high long" z13 zarch vx
+e700001830eb wfchdbs VRR_VVV "vector fp compare high long" z13 zarch vx
e700000000ea vfche VRR_VVV0UUU "vector fp compare high or equal" z13 zarch vx
-e700000030ea vfchedb VRR_VVV "vector fp compare high or equal" z13 zarch vx
-e700001030ea vfchedbs VRR_VVV "vector fp compare high or equal" z13 zarch vx
-e700000830ea wfchedb VRR_VVV "vector fp compare high or equal" z13 zarch vx
-e700001830ea wfchedbs VRR_VVV "vector fp compare high or equal" z13 zarch vx
+e700000030ea vfchedb VRR_VVV "vector fp compare high or equal long" z13 zarch vx
+e700001030ea vfchedbs VRR_VVV "vector fp compare high or equal long" z13 zarch vx
+e700000830ea wfchedb VRR_VVV "vector fp compare high or equal long" z13 zarch vx
+e700001830ea wfchedbs VRR_VVV "vector fp compare high or equal long" z13 zarch vx
e700000000c3 vcdg VRR_VV0UUU "vector fp convert from fixed 64 bit" z13 zarch vx
e700000030c3 vcdgb VRR_VV0UU "vector fp convert from fixed 64 bit" z13 zarch vx
e700000830c3 wcdgb VRR_VV0UU8 "vector fp convert from fixed 64 bit" z13 zarch vx
@@ -1618,41 +1618,41 @@ e700000000c0 vclgd VRR_VV0UUU "vector fp
e700000030c0 vclgdb VRR_VV0UU "vector fp convert to logical 64 bit" z13 zarch vx
e700000830c0 wclgdb VRR_VV0UU8 "vector fp convert to logical 64 bit" z13 zarch vx
e700000000e5 vfd VRR_VVV0UU "vector fp divide" z13 zarch vx
-e700000030e5 vfddb VRR_VVV "vector fp divide" z13 zarch vx
-e700000830e5 wfddb VRR_VVV "vector fp divide" z13 zarch vx
+e700000030e5 vfddb VRR_VVV "vector fp divide long" z13 zarch vx
+e700000830e5 wfddb VRR_VVV "vector fp divide long" z13 zarch vx
e700000000c7 vfi VRR_VV0UUU "vector load fp integer" z13 zarch vx
-e700000030c7 vfidb VRR_VV0UU "vector load fp integer" z13 zarch vx
-e700000830c7 wfidb VRR_VV0UU8 "vector load fp integer" z13 zarch vx
+e700000030c7 vfidb VRR_VV0UU "vector load fp integer long" z13 zarch vx
+e700000830c7 wfidb VRR_VV0UU8 "vector load fp integer long" z13 zarch vx
e700000000c4 vlde VRR_VV0UU2 "vector fp load lengthened" z13 zarch vx
-e700000020c4 vldeb VRR_VV "vector fp load lengthened" z13 zarch vx
-e700000820c4 wldeb VRR_VV "vector fp load lengthened" z13 zarch vx
+e700000020c4 vldeb VRR_VV "vector fp load lengthened short to long" z13 zarch vx
+e700000820c4 wldeb VRR_VV "vector fp load lengthened short to long" z13 zarch vx
e700000000c5 vled VRR_VV0UUU "vector fp load rounded" z13 zarch vx
-e700000030c5 vledb VRR_VV0UU "vector fp load rounded" z13 zarch vx
-e700000830c5 wledb VRR_VV0UU8 "vector fp load rounded" z13 zarch vx
+e700000030c5 vledb VRR_VV0UU "vector fp load rounded long to short" z13 zarch vx
+e700000830c5 wledb VRR_VV0UU8 "vector fp load rounded long to short" z13 zarch vx
e700000000e7 vfm VRR_VVV0UU "vector fp multiply" z13 zarch vx
-e700000030e7 vfmdb VRR_VVV "vector fp multiply" z13 zarch vx
-e700000830e7 wfmdb VRR_VVV "vector fp multiply" z13 zarch vx
+e700000030e7 vfmdb VRR_VVV "vector fp multiply long" z13 zarch vx
+e700000830e7 wfmdb VRR_VVV "vector fp multiply long" z13 zarch vx
e7000000008f vfma VRR_VVVU0UV "vector fp multiply and add" z13 zarch vx
-e7000300008f vfmadb VRR_VVVV "vector fp multiply and add" z13 zarch vx
-e7000308008f wfmadb VRR_VVVV "vector fp multiply and add" z13 zarch vx
+e7000300008f vfmadb VRR_VVVV "vector fp multiply and add long" z13 zarch vx
+e7000308008f wfmadb VRR_VVVV "vector fp multiply and add long" z13 zarch vx
e7000000008e vfms VRR_VVVU0UV "vector fp multiply and subtract" z13 zarch vx
-e7000300008e vfmsdb VRR_VVVV "vector fp multiply and subtract" z13 zarch vx
-e7000308008e wfmsdb VRR_VVVV "vector fp multiply and subtract" z13 zarch vx
+e7000300008e vfmsdb VRR_VVVV "vector fp multiply and subtract long" z13 zarch vx
+e7000308008e wfmsdb VRR_VVVV "vector fp multiply and subtract long" z13 zarch vx
e700000000cc vfpso VRR_VV0UUU "vector fp perform sign operation" z13 zarch vx
-e700000030cc vfpsodb VRR_VV0U2 "vector fp perform sign operation" z13 zarch vx
-e700000830cc wfpsodb VRR_VV0U2 "vector fp perform sign operation" z13 zarch vx
-e700000030cc vflcdb VRR_VV "vector fp perform sign operation" z13 zarch vx
-e700000830cc wflcdb VRR_VV "vector fp perform sign operation" z13 zarch vx
-e700001030cc vflndb VRR_VV "vector fp perform sign operation" z13 zarch vx
-e700001830cc wflndb VRR_VV "vector fp perform sign operation" z13 zarch vx
-e700002030cc vflpdb VRR_VV "vector fp perform sign operation" z13 zarch vx
-e700002830cc wflpdb VRR_VV "vector fp perform sign operation" z13 zarch vx
+e700000030cc vfpsodb VRR_VV0U2 "vector fp perform sign operation long" z13 zarch vx
+e700000830cc wfpsodb VRR_VV0U2 "vector fp perform sign operation long" z13 zarch vx
+e700000030cc vflcdb VRR_VV "vector fp perform sign operation long" z13 zarch vx
+e700000830cc wflcdb VRR_VV "vector fp perform sign operation long" z13 zarch vx
+e700001030cc vflndb VRR_VV "vector fp perform sign operation long" z13 zarch vx
+e700001830cc wflndb VRR_VV "vector fp perform sign operation long" z13 zarch vx
+e700002030cc vflpdb VRR_VV "vector fp perform sign operation long" z13 zarch vx
+e700002830cc wflpdb VRR_VV "vector fp perform sign operation long" z13 zarch vx
e700000000ce vfsq VRR_VV0UU2 "vector fp square root" z13 zarch vx
-e700000030ce vfsqdb VRR_VV "vector fp square root" z13 zarch vx
-e700000830ce wfsqdb VRR_VV "vector fp square root" z13 zarch vx
+e700000030ce vfsqdb VRR_VV "vector fp square root long" z13 zarch vx
+e700000830ce wfsqdb VRR_VV "vector fp square root long" z13 zarch vx
e700000000e2 vfs VRR_VVV0UU "vector fp subtract" z13 zarch vx
-e700000030e2 vfsdb VRR_VVV "vector fp subtract" z13 zarch vx
-e700000830e2 wfsdb VRR_VVV "vector fp subtract" z13 zarch vx
+e700000030e2 vfsdb VRR_VVV "vector fp subtract long" z13 zarch vx
+e700000830e2 wfsdb VRR_VVV "vector fp subtract long" z13 zarch vx
e7000000004a vftci VRI_VVUUU "vector fp test data class immediate" z13 zarch vx
e7000000304a vftcidb VRI_VVU2 "vector fp test data class immediate" z13 zarch vx
e7000008304a wftcidb VRI_VVU2 "vector fp test data class immediate" z13 zarch vx
@@ -1679,3 +1679,200 @@ e3000000003a llzrgf RXY_RRRD "load logic
e3000000003b lzrf RXY_RRRD "load and zero rightmost byte 32->32" z13 zarch
e3000000002a lzrg RXY_RRRD "load and zero rightmost byte 64->64" z13 zarch
b93c ppno RRE_RR "perform pseudorandom number operation" z13 zarch
+
+# arch12 instructions
+
+# Vector Enhancements Facility 1
+
+e70000000085 vbperm VRR_VVV "vector bit permute" arch12 zarch
+e70000006004 vllezlf VRX_VRRD "vector load logical word element and zero - left aligned" arch12 zarch vx2
+e700000000b8 vmsl VRR_VVVUU0V "vector multiply sum logical" arch12 zarch vx2
+e700030000b8 vmslg VRR_VVVU0VB "vector multiply sum logical double word" arch12 zarch vx2
+e7000000006c vnx VRR_VVV "vector not exclusive or" arch12 zarch vx2
+e7000000006e vnn VRR_VVV "vector nand" arch12 zarch
+e7000000006f voc VRR_VVV "vector or with complement" arch12 zarch vx2
+e70000000050 vpopctb VRR_VV "vector population count byte" arch12 zarch vx2
+e70000001050 vpopcth VRR_VV "vector population count halfword" arch12 zarch vx2
+e70000002050 vpopctf VRR_VV "vector population count word" arch12 zarch vx2
+e70000003050 vpopctg VRR_VV "vector population count double word" arch12 zarch vx2
+e700000020e3 vfasb VRR_VVV "vector fp add short" arch12 zarch vx2
+e700000820e3 wfasb VRR_VVV "scalar vector fp add scalar short" arch12 zarch vx2
+e700000840e3 wfaxb VRR_VVV "scalar vector fp add scalar extended" arch12 zarch vx2
+e700000020cb wfcsb VRR_VV "scalar vector fp compare scalar short" arch12 zarch vx2
+e700000040cb wfcxb VRR_VV "scalar vector fp compare scalar extended" arch12 zarch vx2
+e700000020ca wfksb VRR_VV "scalar vector fp compare and signal scalar short" arch12 zarch vx2
+e700000040ca wfkxb VRR_VV "scalar vector fp compare and signal scalar extended" arch12 zarch vx2
+
+e700000020e8 vfcesb VRR_VVV "vector fp compare equal short" arch12 zarch vx2
+e700001020e8 vfcesbs VRR_VVV "vector fp compare equal short" arch12 zarch vx2
+e700000820e8 wfcesb VRR_VVV "scalar vector fp compare equal scalar short" arch12 zarch vx2
+e700001820e8 wfcesbs VRR_VVV "scalar fp compare equal scalar short" arch12 zarch vx2
+e700000840e8 wfcexb VRR_VVV "scalar vector fp compare equal scalar extended" arch12 zarch vx2
+e700001840e8 wfcexbs VRR_VVV "scalar vector fp compare equal scalar extended" arch12 zarch vx2
+
+e700000420e8 vfkesb VRR_VVV "vector fp compare and signal equal short" arch12 zarch vx2
+e700001420e8 vfkesbs VRR_VVV "vector fp compare and signal equal short" arch12 zarch vx2
+e700000c20e8 wfkesb VRR_VVV "scalar vector fp compare and signal equal scalar short" arch12 zarch vx2
+e700001c20e8 wfkesbs VRR_VVV "scalar fp compare and signal equal scalar short" arch12 zarch vx2
+e700000430e8 vfkedb VRR_VVV "vector fp compare and signal equal long" arch12 zarch vx
+e700001430e8 vfkedbs VRR_VVV "vector fp compare and signal equal long" arch12 zarch vx
+e700000c30e8 wfkedb VRR_VVV "vector fp compare and signal equal long" arch12 zarch vx
+e700001c30e8 wfkedbs VRR_VVV "vector fp compare and signal equal long" arch12 zarch vx
+e700000c40e8 wfkexb VRR_VVV "scalar vector fp compare and signal equal scalar extended" arch12 zarch vx2
+e700001c40e8 wfkexbs VRR_VVV "scalar vector fp compare and signal equal scalar extended" arch12 zarch vx2
+
+e700000020eb vfchsb VRR_VVV "vector fp compare high short" arch12 zarch vx2
+e700001020eb vfchsbs VRR_VVV "vector fp compare high short" arch12 zarch vx2
+e700000820eb wfchsb VRR_VVV "scalar vector fp compare high scalar short" arch12 zarch vx2
+e700001820eb wfchsbs VRR_VVV "scalar vector fp compare high scalar short" arch12 zarch vx2
+e700000840eb wfchxb VRR_VVV "scalar vector fp compare high scalar extended" arch12 zarch vx2
+e700001840eb wfchxbs VRR_VVV "scalar vector fp compare high scalar extended" arch12 zarch vx2
+
+e700000420eb vfkhsb VRR_VVV "vector fp compare and signal high short" arch12 zarch vx2
+e700001420eb vfkhsbs VRR_VVV "vector fp compare and signal high short" arch12 zarch vx2
+e700000c20eb wfkhsb VRR_VVV "scalar vector fp compare and signal high scalar short" arch12 zarch vx2
+e700001c20eb wfkhsbs VRR_VVV "scalar vector fp compare and signal high scalar short" arch12 zarch vx2
+e700000430eb vfkhdb VRR_VVV "vector fp compare and signal high long" arch12 zarch vx
+e700001430eb vfkhdbs VRR_VVV "vector fp compare and signal high long" arch12 zarch vx
+e700000c30eb wfkhdb VRR_VVV "vector fp compare and signal high long" arch12 zarch vx
+e700001c30eb wfkhdbs VRR_VVV "vector fp compare and signal high long" arch12 zarch vx
+e700000c40eb wfkhxb VRR_VVV "scalar vector fp compare and signal high scalar extended" arch12 zarch vx2
+e700001c40eb wfkhxbs VRR_VVV "scalar vector fp compare and signal high scalar extended" arch12 zarch vx2
+
+e700000020ea vfchesb VRR_VVV "vector fp compare high or equal short" arch12 zarch vx2
+e700001020ea vfchesbs VRR_VVV "vector fp compare high or equal short" arch12 zarch vx2
+e700000820ea wfchesb VRR_VVV "scalar vector fp compare high or equal scalar short" arch12 zarch vx2
+e700001820ea wfchesbs VRR_VVV "scalar vector fp compare high or equal scalar short" arch12 zarch vx2
+e700000840ea wfchexb VRR_VVV "scalar vector fp compare high or equal scalar extended" arch12 zarch vx2
+e700001840ea wfchexbs VRR_VVV "scalar vector fp compare high or equal scalar extended" arch12 zarch vx2
+
+e700000420ea vfkhesb VRR_VVV "vector fp compare and signal high or equal short" arch12 zarch vx2
+e700001420ea vfkhesbs VRR_VVV "vector fp compare and signal high or equal short" arch12 zarch vx2
+e700000c20ea wfkhesb VRR_VVV "scalar vector fp compare and signal high or equal scalar short" arch12 zarch vx2
+e700001c20ea wfkhesbs VRR_VVV "scalar vector fp compare and signal high or equal scalar short" arch12 zarch vx2
+e700000430ea vfkhedb VRR_VVV "vector fp compare and signal high or equal long" arch12 zarch vx
+e700001430ea vfkhedbs VRR_VVV "vector fp compare and signal high or equal long" arch12 zarch vx
+e700000c30ea wfkhedb VRR_VVV "vector fp compare and signal high or equal long" arch12 zarch vx
+e700001c30ea wfkhedbs VRR_VVV "vector fp compare and signal high or equal long" arch12 zarch vx
+e700000c40ea wfkhexb VRR_VVV "scalar vector fp compare and signal high or equal scalar extended" arch12 zarch vx2
+e700001c40ea wfkhexbs VRR_VVV "scalar vector fp compare and signal high or equal scalar extended" arch12 zarch vx2
+
+e700000020e5 vfdsb VRR_VVV "vector fp divide short" arch12 zarch vx2
+e700000820e5 wfdsb VRR_VVV "scalar vector fp divide scalar short" arch12 zarch vx2
+e700000840e5 wfdxb VRR_VVV "scalar vector fp divide scalar extended" arch12 zarch vx2
+e700000020c7 vfisb VRR_VV0UU "vector load fp integer short" arch12 zarch vx2
+e700000820c7 wfisb VRR_VV0UU8 "scalar vector load fp integer scalar short" arch12 zarch vx2
+e700000840c7 wfixb VRR_VV0UU8 "scalar vector load fp integer scalar extended" arch12 zarch vx2
+e700000000c4 vfll VRR_VV0UU2 "vector fp load lengthened" arch12 zarch vx2
+e700000020c4 vflls VRR_VV "vector fp load lengthened" arch12 zarch vx2
+e700000820c4 wflls VRR_VV "scalar vector fp load lengthened short" arch12 zarch vx2
+e700000830c4 wflld VRR_VV "scalar vector fp load lengthened long" arch12 zarch vx2
+e700000000c5 vflr VRR_VV0UUU "vector fp load rounded" arch12 zarch vx2
+e700000030c5 vflrd VRR_VV0UU "vector fp load rounded long" arch12 zarch vx2
+e700000830c5 wflrd VRR_VV0UU8 "scalar vector fp load rounded long" arch12 zarch vx2
+e700000840c5 wflrx VRR_VV0UU8 "scalar vector fp load rounded extended" arch12 zarch vx2
+e700000000ef vfmax VRR_VVV0UUU "vector fp maximum" arch12 zarch vx2
+e700000020ef vfmaxsb VRR_VVV0U0 "vector fp maximum short" arch12 zarch vx2
+e700000030ef vfmaxdb VRR_VVV0U0 "vector fp maximum long" arch12 zarch vx2
+e700000820ef wfmaxsb VRR_VVV0U0 "scalar fp maximum scalar short" arch12 zarch vx2
+e700000830ef wfmaxdb VRR_VVV0U0 "scalar fp maximum scalar long" arch12 zarch vx2
+e700000840ef wfmaxxb VRR_VVV0U0 "scalar fp maximum scalar extended" arch12 zarch vx2
+e700000000ee vfmin VRR_VVV0UUU "vector fp minimum" arch12 zarch vx2
+e700000020ee vfminsb VRR_VVV0U0 "vector fp minimum short" arch12 zarch vx2
+e700000030ee vfmindb VRR_VVV0U0 "vector fp minimum long" arch12 zarch vx2
+e700000820ee wfminsb VRR_VVV0U0 "scalar fp minimum scalar short" arch12 zarch vx2
+e700000830ee wfmindb VRR_VVV0U0 "scalar fp minimum scalar long" arch12 zarch vx2
+e700000840ee wfminxb VRR_VVV0U0 "scalar fp minimum scalar extended" arch12 zarch vx2
+e700000020e7 vfmsb VRR_VVV "vector fp multiply short" arch12 zarch vx2
+e700000820e7 wfmsb VRR_VVV "scalar vector fp multiply scalar short" arch12 zarch vx2
+e700000840e7 wfmxb VRR_VVV "scalar vector fp multiply scalar extended" arch12 zarch vx2
+e7000200008f vfmasb VRR_VVVV "vector fp multiply and add short" arch12 zarch vx2
+e7000208008f wfmasb VRR_VVVV "scalar vector fp multiply and add scalar short" arch12 zarch vx2
+e7000408008f wfmaxb VRR_VVVV "scalar vector fp multiply and add scalar extended" arch12 zarch vx2
+e7000200008e vfmssb VRR_VVVV "vector fp multiply and subtract short" arch12 zarch vx2
+e7000208008e wfmssb VRR_VVVV "scalar vector fp multiply and subtract scalar short" arch12 zarch vx2
+e7000408008e wfmsxb VRR_VVVV "scalar vector fp multiply and subtract scalar extended" arch12 zarch vx2
+e7000000009f vfnma VRR_VVVU0UV "vector fp negative multiply and add" arch12 zarch vx2
+e7000200009f vfnmasb VRR_VVVV "vector fp negative multiply and add short" arch12 zarch vx2
+e7000208009f wfnmasb VRR_VVVV "scalar vector fp negative multiply and add scalar short" arch12 zarch vx2
+e7000300009f vfnmadb VRR_VVVV "vector fp negative multiply and add long" arch12 zarch vx2
+e7000308009f wfnmadb VRR_VVVV "scalar vector fp negative multiply and add scalar long" arch12 zarch vx2
+e7000408009f wfnmaxb VRR_VVVV "scalar vector fp negative multiply and add scalar extended" arch12 zarch vx2
+e7000000009e vfnms VRR_VVVU0UV "vector fp negative multiply and subtract" arch12 zarch vx2
+e7000200009e vfnmssb VRR_VVVV "vector fp negative multiply and subtract short" arch12 zarch vx2
+e7000208009e wfnmssb VRR_VVVV "scalar vector fp negative multiply and subtract scalar short" arch12 zarch vx2
+e7000300009e vfnmsdb VRR_VVVV "vector fp negative multiply and subtract long" arch12 zarch vx2
+e7000308009e wfnmsdb VRR_VVVV "scalar vector fp negative multiply and subtract scalar long" arch12 zarch vx2
+e7000408009e wfnmsxb VRR_VVVV "scalar vector fp negative multiply and subtract scalar extended" arch12 zarch vx2
+e700000020cc vfpsosb VRR_VV0U2 "vector fp perform sign operation short" arch12 zarch vx2
+e700000820cc wfpsosb VRR_VV0U2 "scalar vector fp perform sign operation scalar short" arch12 zarch vx2
+e700000020cc vflcsb VRR_VV "vector fp perform sign operation short" arch12 zarch vx2
+e700000820cc wflcsb VRR_VV "scalar vector fp perform sign operation scalar short" arch12 zarch vx2
+e700001020cc vflnsb VRR_VV "vector fp perform sign operation short" arch12 zarch vx2
+e700001820cc wflnsb VRR_VV "scalar vector fp perform sign operation scalar short" arch12 zarch vx2
+e700002020cc vflpsb VRR_VV "vector fp perform sign operation short" arch12 zarch vx2
+e700002820cc wflpsb VRR_VV "scalar vector fp perform sign operation scalar short" arch12 zarch vx2
+e700000840cc wfpsoxb VRR_VV0U2 "scalar vector fp perform sign operation scalar extended" arch12 zarch vx2
+e700000840cc wflcxb VRR_VV "scalar vector fp perform sign operation scalar extended" arch12 zarch vx2
+e700001840cc wflnxb VRR_VV "scalar vector fp perform sign operation scalar extended" arch12 zarch vx2
+e700002840cc wflpxb VRR_VV "scalar vector fp perform sign operation scalar extended" arch12 zarch vx2
+e700000020ce vfsqsb VRR_VV "vector fp square root short" arch12 zarch vx2
+e700000820ce wfsqsb VRR_VV "scalar vector fp square root scalar short" arch12 zarch vx2
+e700000840ce wfsqxb VRR_VV "scalar vector fp square root scalar extended" arch12 zarch vx2
+e700000020e2 vfssb VRR_VVV "vector fp subtract short" arch12 zarch vx2
+e700000820e2 wfssb VRR_VVV "scalar vector fp subtract scalar short" arch12 zarch vx2
+e700000840e2 wfsxb VRR_VVV "scalar vector fp subtract scalar extended" arch12 zarch vx2
+e7000000204a vftcisb VRI_VVU2 "vector fp test data class immediate short" arch12 zarch vx2
+e7000008204a wftcisb VRI_VVU2 "scalar vector fp test data class immediate scalar short" arch12 zarch vx2
+e7000008404a wftcixb VRI_VVU2 "scalar vector fp test data class immediate scalar extended" arch12 zarch vx2
+
+# Miscellaneous Instruction Extensions Facility 2
+
+e30000000038 agh RXY_RRRD "add halfword to 64 bit value" arch12 zarch
+e30000000047 bic RXY_URRD "branch indirect on condition" arch12 zarch
+e3f000000047 bi RXY_0RRD "unconditional indirect branch" arch12 zarch
+e30000000047 bi*8 RXY_0RRD "branch indirect on condition" arch12 zarch
+b9ec mgrk RRF_R0RR2 "multiply 64x64reg -> 128" arch12 zarch
+e30000000084 mg RXY_RRRD "multiply 64x64mem -> 128" arch12 zarch
+e3000000003c mgh RXY_RRRD "multiply halfword 64x16mem -> 64" arch12 zarch
+b9fd msrkc RRF_R0RR2 "multiply single 32x32 -> 32" arch12 zarch
+b9ed msgrkc RRF_R0RR2 "multiply single 64x64 -> 64" arch12 zarch
+e30000000053 msc RXY_RRRD "multiply single 32x32mem -> 32" arch12 zarch
+e30000000083 msgc RXY_RRRD "multiply single 64x64mem -> 64" arch12 zarch
+e30000000039 sgh RXY_RRRD "subtract halfword from 64 bit value" arch12 zarch
+
+# Vector packed decimal facility
+
+e60000000037 vlrlr VRS_RRDV "vector load rightmost with length" arch12 zarch vx2
+e60000000035 vlrl VSI_URDV "vector load rightmost with immediate length" arch12 zarch vx2
+e6000000003f vstrlr VRS_RRDV "vector store rightmost with length" arch12 zarch vx2
+e6000000003d vstrl VSI_URDV "vector store rightmost with immediate length" arch12 zarch vx2
+e60000000071 vap VRI_VVV0UU2 "vector add decimal" arch12 zarch vx2
+e60000000077 vcp VRR_0VV0U "vector compare decimal" arch12 zarch vx2
+e60000000050 vcvb VRR_RV0U "vector convert to binary 32 bit" arch12 zarch vx2
+e60000000052 vcvbg VRR_RV0U "vector convert to binary 64 bit" arch12 zarch vx2
+e60000000058 vcvd VRI_VR0UU "vector convert to decimal 32 bit" arch12 zarch vx2
+e6000000005a vcvdg VRI_VR0UU "vector convert to decimal 64 bit" arch12 zarch vx2
+e6000000007a vdp VRI_VVV0UU2 "vector divide decimal" arch12 zarch vx2
+e60000000049 vlip VRI_V0UU2 "vector load immediate decimal" arch12 zarch vx2
+e60000000078 vmp VRI_VVV0UU2 "vector multiply decimal" arch12 zarch vx2
+e60000000079 vmsp VRI_VVV0UU2 "vector multiply and shift decimal" arch12 zarch vx2
+e60000000034 vpkz VSI_URDV "vector pack zoned" arch12 zarch vx2
+e6000000005b vpsop VRI_VVUUU2 "vector perform sign operation decimal" arch12 zarch vx2
+e6000000007b vrp VRI_VVV0UU2 "vector remainder decimal" arch12 zarch vx2
+e6000000007e vsdp VRI_VVV0UU2 "vector shift and divide decimal" arch12 zarch vx2
+e60000000059 vsrp VRI_VVUUU2 "vector shift and round decimal" arch12 zarch vx2
+e60000000073 vsp VRI_VVV0UU2 "vector subtract decimal" arch12 zarch vx2
+e6000000005f vtp VRR_0V "vector test decimal" arch12 zarch vx2
+e6000000003c vupkz VSI_URDV "vector unpack zoned" arch12 zarch vx2
+
+# Guarded storage facility
+
+e3000000004c lgg RXY_RRRD "load guarded 64 bit" arch12 zarch
+e30000000048 llgfsg RXY_RRRD "load logical and shift guarded 64 bit" arch12 zarch
+e3000000004d lgsc RXY_RRRD "load guarded storage controls" arch12 zarch
+e30000000049 stgsc RXY_RRRD "store guarded storage controls" arch12 zarch
+
+# Message-Security-Assist Extension 8
+
+b929 kma RRF_R0RR "cipher message with galois counter mode" arch12 zarch