---
README.s390 | 33
VEX/priv/guest_s390_cc.c | 687 +
VEX/priv/guest_s390_cc.h | 168
VEX/priv/guest_s390_decoder.c | 1452 ++
VEX/priv/guest_s390_defs.h | 76
VEX/priv/guest_s390_helpers.c | 240
VEX/priv/guest_s390_irgen.c | 9772 +++++++++++++++++++
VEX/priv/guest_s390_priv.h | 661 +
VEX/priv/guest_s390_spechelper.c | 634 +
VEX/priv/guest_s390_toIR.c | 203
VEX/priv/host_s390_amode.c | 240
VEX/priv/host_s390_amode.h | 80
VEX/priv/host_s390_defs.c | 294
VEX/priv/host_s390_defs.h | 72
VEX/priv/host_s390_disasm.c | 452
VEX/priv/host_s390_disasm.h | 86
VEX/priv/host_s390_emit.c | 2376 ++++
VEX/priv/host_s390_emit.h | 279
VEX/priv/host_s390_hreg.c | 158
VEX/priv/host_s390_hreg.h | 62
VEX/priv/host_s390_insn.c | 3727 +++++++
VEX/priv/host_s390_insn.h | 423
VEX/priv/host_s390_isel.c | 2480 ++++
VEX/priv/host_s390_isel.h | 47
VEX/priv/host_s390_wrapper.c | 413
VEX/pub/libvex_guest_s390x.h | 178
VEX/pub/libvex_s390x.h | 59
cachegrind/cg-s390x.c | 73
coregrind/m_dispatch/dispatch-s390x-linux.S | 401
coregrind/m_sigframe/sigframe-s390x-linux.c | 565 +
coregrind/m_syswrap/syscall-s390x-linux.S | 172
coregrind/m_syswrap/syswrap-s390x-linux.c | 1524 ++
include/vki/vki-posixtypes-s390x-linux.h | 77
include/vki/vki-s390x-linux.h | 941 +
include/vki/vki-scnums-s390x-linux.h | 447
memcheck/tests/badjump.stderr.exp-s390x | 25
memcheck/tests/badjump2.stderr.exp-s390x | 6
memcheck/tests/origin5-bz2.stderr.exp-glibc212-s390x | 133
memcheck/tests/supp_unknown.stderr.exp-s390x | 10
none/tests/s390x/Makefile.am | 20
none/tests/s390x/clcle.c | 71
none/tests/s390x/clcle.stderr.exp | 2
none/tests/s390x/clcle.stdout.exp | 45
none/tests/s390x/clcle.vgtest | 1
none/tests/s390x/cvb.c | 104
none/tests/s390x/cvb.stderr.exp | 2
none/tests/s390x/cvb.stdout.exp | 68
none/tests/s390x/cvb.vgtest | 1
none/tests/s390x/cvd.c | 34
none/tests/s390x/cvd.stderr.exp | 2
none/tests/s390x/cvd.stdout.exp | 10
none/tests/s390x/cvd.vgtest | 1
none/tests/s390x/ex_clone.c | 60
none/tests/s390x/ex_clone.stderr.exp | 2
none/tests/s390x/ex_clone.stdout.exp | 2
none/tests/s390x/ex_clone.vgtest | 1
none/tests/s390x/ex_sig.c | 46
none/tests/s390x/ex_sig.stderr.exp | 2
none/tests/s390x/ex_sig.stdout.exp | 1
none/tests/s390x/ex_sig.vgtest | 1
none/tests/s390x/filter_stderr | 4
none/tests/s390x/flogr.c | 68
none/tests/s390x/flogr.stderr.exp | 2
none/tests/s390x/flogr.vgtest | 1
none/tests/s390x/lpr.c | 95
none/tests/s390x/lpr.stderr.exp | 2
none/tests/s390x/lpr.stdout.exp | 27
none/tests/s390x/lpr.vgtest | 1
68 files changed, 30402 insertions(+)
--- valgrind/README.s390
+++ valgrind/README.s390
@@ -0,0 +1,33 @@
+Requirements
+------------
+- You need GCC 3.1 or later to compile the s390 port.
+- A working combination of autotools is required. The following
+ combination is known to work: automake 1.9.6 and autoconf 2.59
+- To run valgrind a z900 machine or any later model is needed.
+- The long displacement facility must be installed on the host machine.
+
+
+Limitations
+-----------
+- 31-bit client programs are not supported.
+- Hexadecimal floating point is not supported.
+- Decimal floating point is not supported yet.
+- Currently, only memcheck, massif, lackey, and none are supported
+- helgrind and drd seem to work but are not yet supported.
+- exp-ptrcheck and callgrind are not supported.
+
+
+Recommendations
+---------------
+Applications should be compiled with -fno-builtin to avoid
+false positives due to builtin string operations when running memcheck.
+
+
+Reading Material
+----------------
+(1) Linux for zSeries ELF ABI Supplement
+ http://refspecs.linuxfoundation.org/ELF/zSeries/index.html
+(2) z/Architecture Principles of Operation
+ http://publibfi.boulder.ibm.com/epubs/pdf/dz9zr008.pdf
+(3) z/Architecture Reference Summary
+ http://publibfi.boulder.ibm.com/epubs/pdf/dz9zs006.pdf
--- valgrind/VEX/priv/guest_s390_cc.c
+++ valgrind/VEX/priv/guest_s390_cc.c
@@ -0,0 +1,687 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin guest_s390_cc.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex_guest_s390x.h" /* VexGuestS390XState */
+
+#include "main_globals.h" /* vex_control */
+#include "main_util.h" /* vassert */
+#include "guest_s390_priv.h" /* irsb */
+#include "guest_s390_cc.h"
+
+#if defined(VGA_s390x) /* guard the file */
+
+static UInt s390_calculate_cc(ULong cc_op, ULong cc_dep1, ULong cc_dep2,
+ ULong cc_ndep);
+
+/* Add a statement to the current irsb. */
+static __inline__ void
+stmt(IRStmt *st)
+{
+ addStmtToIRSB(irsb, st);
+}
+
+/* Create an expression node for a 32-bit integer constant */
+static __inline__ IRExpr *
+mkU32(UInt value)
+{
+ return IRExpr_Const(IRConst_U32(value));
+}
+
+/* Create an expression node for a 64-bit integer constant */
+static __inline__ IRExpr *
+mkU64(ULong value)
+{
+ return IRExpr_Const(IRConst_U64(value));
+}
+
+/* Create an expression node for a temporary */
+static __inline__ IRExpr *
+mkexpr(IRTemp tmp)
+{
+ return IRExpr_RdTmp(tmp);
+}
+
+/* Create a unary expression */
+static __inline__ IRExpr *
+unop(IROp kind, IRExpr *op)
+{
+ return IRExpr_Unop(kind, op);
+}
+
+/* Create a binary expression */
+static __inline__ IRExpr *
+binop(IROp kind, IRExpr *op1, IRExpr *op2)
+{
+ return IRExpr_Binop(kind, op1, op2);
+}
+
+
+/* Flags thunk offsets */
+#define S390X_GUEST_OFFSET_CC_OP S390_GUEST_OFFSET(guest_CC_OP)
+#define S390X_GUEST_OFFSET_CC_DEP1 S390_GUEST_OFFSET(guest_CC_DEP1)
+#define S390X_GUEST_OFFSET_CC_DEP2 S390_GUEST_OFFSET(guest_CC_DEP2)
+#define S390X_GUEST_OFFSET_CC_NDEP S390_GUEST_OFFSET(guest_CC_NDEP)
+
+
+/* Build IR to calculate the condition code from flags thunk.
+ Returns an expression of type Ity_I32 */
+IRExpr *
+s390_call_calculate_cc(void)
+{
+ IRExpr **args, *call, *op, *dep1, *dep2, *ndep;
+
+ op = IRExpr_Get(S390X_GUEST_OFFSET_CC_OP, Ity_I64);
+ dep1 = IRExpr_Get(S390X_GUEST_OFFSET_CC_DEP1, Ity_I64);
+ dep2 = IRExpr_Get(S390X_GUEST_OFFSET_CC_DEP2, Ity_I64);
+ ndep = IRExpr_Get(S390X_GUEST_OFFSET_CC_NDEP, Ity_I64);
+
+ args = mkIRExprVec_4(op, dep1, dep2, ndep);
+ call = mkIRExprCCall(Ity_I32, 0 /*regparm*/,
+ "s390_calculate_cc", &s390_calculate_cc, args);
+
+ /* Exclude OP and NDEP from definedness checking. We're only
+ interested in DEP1 and DEP2. */
+ call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<3);
+
+ return call;
+}
+
+/*------------------------------------------------------------*/
+/*--- Build the flags thunk. ---*/
+/*------------------------------------------------------------*/
+
+/* Completely fill the flags thunk. We're always filling all fields.
+ Apparently, that is better for redundant PUT elimination. */
+static void
+s390_cc_thunk_fill(IRExpr *op, IRExpr *dep1, IRExpr *dep2, IRExpr *ndep)
+{
+ UInt op_off, dep1_off, dep2_off, ndep_off;
+
+ op_off = S390X_GUEST_OFFSET_CC_OP;
+ dep1_off = S390X_GUEST_OFFSET_CC_DEP1;
+ dep2_off = S390X_GUEST_OFFSET_CC_DEP2;
+ ndep_off = S390X_GUEST_OFFSET_CC_NDEP;
+
+ stmt(IRStmt_Put(op_off, op));
+ stmt(IRStmt_Put(dep1_off, dep1));
+ stmt(IRStmt_Put(dep2_off, dep2));
+ stmt(IRStmt_Put(ndep_off, ndep));
+}
+
+
+/* Create an expression for V and widen the result to 64 bit. */
+static IRExpr *
+s390_cc_widen(IRTemp v, Bool sign_extend)
+{
+ IRExpr *expr;
+
+ expr = mkexpr(v);
+
+ switch (typeOfIRTemp(irsb->tyenv, v)) {
+ case Ity_I64:
+ break;
+ case Ity_I32:
+ expr = unop(sign_extend ? Iop_32Sto64 : Iop_32Uto64, expr);
+ break;
+ case Ity_I16:
+ expr = unop(sign_extend ? Iop_16Sto64 : Iop_16Uto64, expr);
+ break;
+ case Ity_I8:
+ expr = unop(sign_extend ? Iop_8Sto64 : Iop_8Uto64, expr);
+ break;
+ default:
+ vpanic("s390_cc_widen");
+ }
+
+ return expr;
+}
+
+
+void
+s390_cc_thunk_put0(UInt opc)
+{
+ IRExpr *op, *dep1, *dep2, *ndep;
+
+ op = mkU64(opc);
+ dep1 = mkU64(0);
+ dep2 = mkU64(0);
+ ndep = mkU64(0);
+
+ s390_cc_thunk_fill(op, dep1, dep2, ndep);
+}
+
+
+void
+s390_cc_thunk_put1(UInt opc, IRTemp d1, Bool sign_extend)
+{
+ IRExpr *op, *dep1, *dep2, *ndep;
+
+ op = mkU64(opc);
+ dep1 = s390_cc_widen(d1, sign_extend);
+ dep2 = mkU64(0);
+ ndep = mkU64(0);
+
+ s390_cc_thunk_fill(op, dep1, dep2, ndep);
+}
+
+
+void
+s390_cc_thunk_put2(UInt opc, IRTemp d1, IRTemp d2, Bool sign_extend)
+{
+ IRExpr *op, *dep1, *dep2, *ndep;
+
+ op = mkU64(opc);
+ dep1 = s390_cc_widen(d1, sign_extend);
+ dep2 = s390_cc_widen(d2, sign_extend);
+ ndep = mkU64(0);
+
+ s390_cc_thunk_fill(op, dep1, dep2, ndep);
+}
+
+
+/* memcheck believes that the NDEP field in the flags thunk is always
+ defined. But for some flag computations (e.g. add with carry) that is
+ just not true. We therefore need to convey to memcheck that the value
+ of the ndep field does matter and therefore we make the DEP2 field
+ depend on it:
+
+ DEP2 = original_DEP2 ^ NDEP
+
+ In s390_calculate_cc we exploit that (a^b)^b == a
+ I.e. we xor the DEP2 value with the NDEP value to recover the
+ original_DEP2 value. */
+void
+s390_cc_thunk_put3(UInt opc, IRTemp d1, IRTemp d2, IRTemp nd, Bool sign_extend)
+{
+ IRExpr *op, *dep1, *dep2, *ndep, *dep2x;
+
+ op = mkU64(opc);
+ dep1 = s390_cc_widen(d1, sign_extend);
+ dep2 = s390_cc_widen(d2, sign_extend);
+ ndep = s390_cc_widen(nd, sign_extend);
+
+ dep2x = binop(Iop_Xor64, dep2, ndep);
+
+ s390_cc_thunk_fill(op, dep1, dep2x, ndep);
+}
+
+
+/* Write one floating point value into the flags thunk */
+void
+s390_cc_thunk_put1f(UInt opc, IRTemp d1)
+{
+ IRExpr *op, *dep1, *dep2, *ndep;
+
+ op = mkU64(opc);
+ dep1 = mkexpr(d1);
+ dep2 = mkU64(0);
+ ndep = mkU64(0);
+
+ s390_cc_thunk_fill(op, dep1, dep2, ndep);
+}
+
+
+/* Write a floating point value and an integer into the flags thunk. The
+ integer value is zero-extended first. */
+void
+s390_cc_thunk_putFZ(UInt opc, IRTemp d1, IRTemp d2)
+{
+ IRExpr *op, *dep1, *dep2, *ndep;
+
+ op = mkU64(opc);
+ dep1 = mkexpr(d1);
+ dep2 = s390_cc_widen(d2, False);
+ ndep = mkU64(0);
+
+ s390_cc_thunk_fill(op, dep1, dep2, ndep);
+}
+
+
+/* Write a 128-bit floating point value into the flags thunk. This is
+ done by splitting the value into two 64-bits values. */
+void
+s390_cc_thunk_put1f128(UInt opc, IRTemp d1)
+{
+ IRExpr *op, *hi, *lo, *ndep;
+
+ op = mkU64(opc);
+ hi = unop(Iop_F128HIto64, mkexpr(d1));
+ lo = unop(Iop_F128to64, mkexpr(d1));
+ ndep = mkU64(0);
+
+ s390_cc_thunk_fill(op, hi, lo, ndep);
+}
+
+
+/* Write a 128-bit floating point value and an integer into the flags thunk.
+ The integer value is zero-extended first. */
+void
+s390_cc_thunk_put1f128Z(UInt opc, IRTemp d1, IRTemp nd)
+{
+ IRExpr *op, *hi, *lo, *lox, *ndep;
+
+ op = mkU64(opc);
+ hi = unop(Iop_F128HIto64, mkexpr(d1));
+ lo = unop(Iop_F128to64, mkexpr(d1));
+ ndep = s390_cc_widen(nd, False);
+
+ lox = binop(Iop_Xor64, lo, ndep); /* convey dependency */
+
+ s390_cc_thunk_fill(op, hi, lox, ndep);
+}
+
+
+void
+s390_cc_set(UInt val)
+{
+ s390_cc_thunk_fill(mkU64(S390_CC_OP_SET),
+ mkU64(val), mkU64(0), mkU64(0));
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Compute the condition code. ---*/
+/*------------------------------------------------------------*/
+
+#define S390_CC_FOR_BINARY(opcode,cc_dep1,cc_dep2) \
+({ \
+ __asm__ volatile ( \
+ opcode " %[op1],%[op2]\n\t" \
+ "ipm %[psw]\n\t" : [psw] "=d"(psw), [op1] "+d"(cc_dep1) \
+ : [op2] "d"(cc_dep2) \
+ : "cc");\
+ psw >> 28; /* cc */ \
+})
+
+#define S390_CC_FOR_TERNARY(opcode,cc_dep1,cc_dep2,cc_ndep) \
+({ \
+ /* Recover the original DEP2 value. See comment near s390_cc_thunk_put3 \
+ for rationale. */ \
+ cc_dep2 = cc_dep2 ^ cc_ndep; \
+ __asm__ volatile ( \
+ opcode " %[op1],%[op2]\n\t" \
+ opcode " %[op1],%[op3]\n\t" \
+ "ipm %[psw]\n\t" : [psw] "=d"(psw), [op1] "+&d"(cc_dep1) \
+ : [op2] "d"(cc_dep2), [op3] "d"(cc_ndep) \
+ : "cc");\
+ psw >> 28; /* cc */ \
+})
+
+#define S390_CC_FOR_BFP_RESULT(opcode,cc_dep1) \
+({ \
+ __asm__ volatile ( \
+ opcode " 0,%[op]\n\t" \
+ "ipm %[psw]\n\t" : [psw] "=d"(psw) \
+ : [op] "f"(cc_dep1) \
+ : "cc", "f0");\
+ psw >> 28; /* cc */ \
+})
+
+#define S390_CC_FOR_BFP128_RESULT(hi,lo) \
+({ \
+ __asm__ volatile ( \
+ "ldr 4,%[high]\n\t" \
+ "ldr 6,%[low]\n\t" \
+ "ltxbr 0,4\n\t" \
+ "ipm %[psw]\n\t" : [psw] "=d"(psw) \
+ : [high] "f"(hi), [low] "f"(lo) \
+ : "cc", "f0", "f2", "f4", "f6");\
+ psw >> 28; /* cc */ \
+})
+
+#define S390_CC_FOR_BFP_CONVERT(opcode,cc_dep1) \
+({ \
+ __asm__ volatile ( \
+ opcode " 0,0,%[op]\n\t" \
+ "ipm %[psw]\n\t" : [psw] "=d"(psw) \
+ : [op] "f"(cc_dep1) \
+ : "cc", "r0");\
+ psw >> 28; /* cc */ \
+})
+
+#define S390_CC_FOR_BFP128_CONVERT(opcode,hi,lo) \
+({ \
+ __asm__ volatile ( \
+ "ldr 4,%[high]\n\t" \
+ "ldr 6,%[low]\n\t" \
+ opcode " 0,0,4\n\t" \
+ "ipm %[psw]\n\t" : [psw] "=d"(psw) \
+ : [high] "f"(hi), [low] "f"(lo) \
+ : "cc", "r0", "f4", "f6");\
+ psw >> 28; /* cc */ \
+})
+
+#define S390_CC_FOR_BFP_TDC(opcode,cc_dep1,cc_dep2) \
+({ \
+ __asm__ volatile ( \
+ opcode " %[value],%[class]\n\t" \
+ "ipm %[psw]\n\t" : [psw] "=d"(psw) \
+ : [value] "f"(cc_dep1), \
+ [class] "m"(cc_dep2) \
+ : "cc");\
+ psw >> 28; /* cc */ \
+})
+
+#define S390_CC_FOR_BFP128_TDC(cc_dep1,cc_dep2,cc_ndep) \
+({ \
+ /* Recover the original DEP2 value. See comment near s390_cc_thunk_put1f128Z \
+ for rationale. */ \
+ cc_dep2 = cc_dep2 ^ cc_ndep; \
+ __asm__ volatile ( \
+ "ldr 4,%[high]\n\t" \
+ "ldr 6,%[low]\n\t" \
+ "tcxb 4,%[class]\n\t" \
+ "ipm %[psw]\n\t" : [psw] "=d"(psw) \
+ : [high] "f"(cc_dep1), [low] "f"(cc_dep2), \
+ [class] "m"(cc_ndep) \
+ : "cc", "f4", "f6");\
+ psw >> 28; /* cc */ \
+})
+
+
+/* Return the value of the condition code from the supplied thunk parameters.
+ This is not the value of the PSW. It is the value of the 2 CC bits within
+ the PSW. The returned value is thusly in the interval [0:3]. */
+static UInt
+s390_calculate_cc(ULong cc_op, ULong cc_dep1, ULong cc_dep2, ULong cc_ndep)
+{
+ UInt psw;
+
+ switch (cc_op) {
+
+ case S390_CC_OP_BITWISE:
+ return S390_CC_FOR_BINARY("ogr", cc_dep1, (ULong)0);
+
+ case S390_CC_OP_SIGNED_COMPARE:
+ return S390_CC_FOR_BINARY("cgr", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_UNSIGNED_COMPARE:
+ return S390_CC_FOR_BINARY("clgr", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_SIGNED_ADD_64:
+ return S390_CC_FOR_BINARY("agr", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_SIGNED_ADD_32:
+ return S390_CC_FOR_BINARY("ar", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_SIGNED_SUB_64:
+ return S390_CC_FOR_BINARY("sgr", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_SIGNED_SUB_32:
+ return S390_CC_FOR_BINARY("sr", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_UNSIGNED_ADD_64:
+ return S390_CC_FOR_BINARY("algr", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_UNSIGNED_ADD_32:
+ return S390_CC_FOR_BINARY("alr", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_UNSIGNED_ADDC_64:
+ return S390_CC_FOR_TERNARY("algr", cc_dep1, cc_dep2, cc_ndep);
+
+ case S390_CC_OP_UNSIGNED_ADDC_32:
+ return S390_CC_FOR_TERNARY("alr", cc_dep1, cc_dep2, cc_ndep);
+
+ case S390_CC_OP_UNSIGNED_SUB_64:
+ return S390_CC_FOR_BINARY("slgr", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_UNSIGNED_SUB_32:
+ return S390_CC_FOR_BINARY("slr", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_UNSIGNED_SUBB_64:
+ return S390_CC_FOR_TERNARY("slgr", cc_dep1, cc_dep2, cc_ndep);
+
+ case S390_CC_OP_UNSIGNED_SUBB_32:
+ return S390_CC_FOR_TERNARY("slr", cc_dep1, cc_dep2, cc_ndep);
+
+ case S390_CC_OP_LOAD_AND_TEST:
+ /* Like signed comparison with 0 */
+ return S390_CC_FOR_BINARY("cgr", cc_dep1, (Long)0);
+
+ case S390_CC_OP_TEST_AND_SET:
+ /* Shift the sign bit into the LSB. Note, that the tested value is an
+ 8-bit value which has been zero-extended to 32/64 bit. */
+ return cc_dep1 >> 7;
+
+ case S390_CC_OP_LOAD_POSITIVE_32:
+ __asm__ volatile (
+ "lpr %[result],%[op]\n\t"
+ "ipm %[psw]\n\t" : [psw] "=d"(psw), [result] "=d"(cc_dep1)
+ : [op] "d"(cc_dep1)
+ : "cc");
+ return psw >> 28; /* cc */
+
+ case S390_CC_OP_LOAD_POSITIVE_64:
+ __asm__ volatile (
+ "lpgr %[result],%[op]\n\t"
+ "ipm %[psw]\n\t" : [psw] "=d"(psw), [result] "=d"(cc_dep1)
+ : [op] "d"(cc_dep1)
+ : "cc");
+ return psw >> 28; /* cc */
+
+ case S390_CC_OP_TEST_UNDER_MASK_8: {
+ UChar value = cc_dep1;
+ UChar mask = cc_dep2;
+
+ __asm__ volatile (
+ "bras %%r2,1f\n\t" /* %r2 = address of next insn */
+ "tm %[value],0\n\t" /* this is skipped, then EXecuted */
+ "1: ex %[mask],0(%%r2)\n\t" /* EXecute TM after modifying mask */
+ "ipm %[psw]\n\t" : [psw] "=d"(psw)
+ : [value] "m"(value), [mask] "a"(mask)
+ : "r2", "cc");
+ return psw >> 28; /* cc */
+ }
+
+ case S390_CC_OP_TEST_UNDER_MASK_16: {
+ /* Create a TMLL insn with the mask as given by cc_dep2 */
+ UInt insn = (0xA701 << 16) | cc_dep2;
+ UInt value = cc_dep1;
+
+ __asm__ volatile (
+ "lr 1,%[value]\n\t"
+ "lhi 2,0x10\n\t"
+ "ex 2,%[insn]\n\t"
+ "ipm %[psw]\n\t" : [psw] "=d"(psw)
+ : [value] "d"(value), [insn] "m"(insn)
+ : "r1", "r2", "cc");
+ return psw >> 28; /* cc */
+ }
+
+ case S390_CC_OP_SHIFT_LEFT_32:
+ __asm__ volatile (
+ "sla %[op],0(%[amount])\n\t"
+ "ipm %[psw]\n\t" : [psw] "=d"(psw), [op] "+d"(cc_dep1)
+ : [amount] "a"(cc_dep2)
+ : "cc");
+ return psw >> 28; /* cc */
+
+ case S390_CC_OP_SHIFT_LEFT_64: {
+ Int high = (Int)(cc_dep1 >> 32);
+ Int low = (Int)(cc_dep1 & 0xFFFFFFFF);
+
+ __asm__ volatile (
+ "lr 2,%[high]\n\t"
+ "lr 3,%[low]\n\t"
+ "slda 2,0(%[amount])\n\t"
+ "ipm %[psw]\n\t" : [psw] "=d"(psw), [high] "+d"(high), [low] "+d"(low)
+ : [amount] "a"(cc_dep2)
+ : "cc", "r2", "r3");
+ return psw >> 28; /* cc */
+ }
+
+ case S390_CC_OP_INSERT_CHAR_MASK_32: {
+ Int inserted = 0;
+ Int msb = 0;
+
+ if (cc_dep2 & 1) {
+ inserted |= cc_dep1 & 0xff;
+ msb = 0x80;
+ }
+ if (cc_dep2 & 2) {
+ inserted |= cc_dep1 & 0xff00;
+ msb = 0x8000;
+ }
+ if (cc_dep2 & 4) {
+ inserted |= cc_dep1 & 0xff0000;
+ msb = 0x800000;
+ }
+ if (cc_dep2 & 8) {
+ inserted |= cc_dep1 & 0xff000000;
+ msb = 0x80000000;
+ }
+
+ if (inserted & msb) // MSB is 1
+ return 1;
+ if (inserted > 0)
+ return 2;
+ return 0;
+ }
+
+ case S390_CC_OP_BFP_RESULT_32:
+ return S390_CC_FOR_BFP_RESULT("ltebr", cc_dep1);
+
+ case S390_CC_OP_BFP_RESULT_64:
+ return S390_CC_FOR_BFP_RESULT("ltdbr", cc_dep1);
+
+ case S390_CC_OP_BFP_RESULT_128:
+ return S390_CC_FOR_BFP128_RESULT(cc_dep1, cc_dep2);
+
+ case S390_CC_OP_BFP_32_TO_INT_32:
+ return S390_CC_FOR_BFP_CONVERT("cfebr", cc_dep1);
+
+ case S390_CC_OP_BFP_64_TO_INT_32:
+ return S390_CC_FOR_BFP_CONVERT("cfdbr", cc_dep1);
+
+ case S390_CC_OP_BFP_128_TO_INT_32:
+ return S390_CC_FOR_BFP128_CONVERT("cfxbr", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_BFP_32_TO_INT_64:
+ return S390_CC_FOR_BFP_CONVERT("cgebr", cc_dep1);
+
+ case S390_CC_OP_BFP_64_TO_INT_64:
+ return S390_CC_FOR_BFP_CONVERT("cgdbr", cc_dep1);
+
+ case S390_CC_OP_BFP_128_TO_INT_64:
+ return S390_CC_FOR_BFP128_CONVERT("cgxbr", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_BFP_TDC_32:
+ return S390_CC_FOR_BFP_TDC("tceb", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_BFP_TDC_64:
+ return S390_CC_FOR_BFP_TDC("tcdb", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_BFP_TDC_128:
+ return S390_CC_FOR_BFP128_TDC(cc_dep1, cc_dep2, cc_ndep);
+
+ case S390_CC_OP_SET:
+ return cc_dep1;
+
+ default:
+ break;
+ }
+ vpanic("s390_calculate_cc");
+}
+
+
+static UInt
+s390_calculate_icc(ULong op, ULong dep1, ULong dep2)
+{
+ return s390_calculate_cc(op, dep1, dep2, 0 /* unused */);
+}
+
+
+/* Build IR to calculate the internal condition code for a "compare and branch"
+ insn. Returns an expression of type Ity_I32 */
+IRExpr *
+s390_call_calculate_icc(UInt opc, IRTemp op1, IRTemp op2, Bool sign_extend)
+{
+ IRExpr **args, *call, *op, *dep1, *dep2;
+
+ op = mkU64(opc);
+ dep1 = s390_cc_widen(op1, sign_extend);
+ dep2 = s390_cc_widen(op2, sign_extend);
+
+ args = mkIRExprVec_3(op, dep1, dep2);
+ call = mkIRExprCCall(Ity_I32, 0 /*regparm*/,
+ "s390_calculate_icc", &s390_calculate_icc, args);
+
+ /* Exclude OP from definedness checking. We're only
+ interested in DEP1 and DEP2. */
+ call->Iex.CCall.cee->mcx_mask = (1<<0);
+
+ return call;
+}
+
+
+/* Note that this does *not* return a Boolean value. The result needs to be
+ explicitly tested against zero. */
+static UInt
+s390_calculate_cond(ULong mask, ULong op, ULong dep1, ULong dep2, ULong ndep)
+{
+ UInt cc = s390_calculate_cc(op, dep1, dep2, ndep);
+
+ return ((mask << cc) & 0x8);
+}
+
+
+/* Build IR to calculate the condition code from flags thunk.
+ Returns an expression of type Ity_I32 */
+IRExpr *
+s390_call_calculate_cond(UInt m)
+{
+ IRExpr **args, *call, *op, *dep1, *dep2, *ndep, *mask;
+
+ mask = mkU64(m);
+ op = IRExpr_Get(S390X_GUEST_OFFSET_CC_OP, Ity_I64);
+ dep1 = IRExpr_Get(S390X_GUEST_OFFSET_CC_DEP1, Ity_I64);
+ dep2 = IRExpr_Get(S390X_GUEST_OFFSET_CC_DEP2, Ity_I64);
+ ndep = IRExpr_Get(S390X_GUEST_OFFSET_CC_NDEP, Ity_I64);
+
+ args = mkIRExprVec_5(mask, op, dep1, dep2, ndep);
+ call = mkIRExprCCall(Ity_I32, 0 /*regparm*/,
+ "s390_calculate_cond", &s390_calculate_cond, args);
+
+ /* Exclude the requested condition, OP and NDEP from definedness
+ checking. We're only interested in DEP1 and DEP2. */
+ call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<1) | (1<<4);
+
+ return call;
+}
+
+#endif /* VGA_s390x */
+
+/*---------------------------------------------------------------*/
+/*--- end guest_s390_cc.c ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/guest_s390_cc.h
+++ valgrind/VEX/priv/guest_s390_cc.h
@@ -0,0 +1,168 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin guest_s390_cc.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __VEX_GUEST_S390_CC_H
+#define __VEX_GUEST_S390_CC_H
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+
+/* The various ways to compute the condition code. */
+
+enum {
+ S390_CC_OP_BITWISE = 0,
+ S390_CC_OP_SIGNED_COMPARE = 1,
+ S390_CC_OP_UNSIGNED_COMPARE = 2,
+ S390_CC_OP_SIGNED_ADD_32 = 3,
+ S390_CC_OP_SIGNED_ADD_64 = 4,
+ S390_CC_OP_UNSIGNED_ADD_32 = 5,
+ S390_CC_OP_UNSIGNED_ADD_64 = 6,
+ S390_CC_OP_UNSIGNED_ADDC_32 = 7,
+ S390_CC_OP_UNSIGNED_ADDC_64 = 8,
+ S390_CC_OP_SIGNED_SUB_32 = 9,
+ S390_CC_OP_SIGNED_SUB_64 = 10,
+ S390_CC_OP_UNSIGNED_SUB_32 = 11,
+ S390_CC_OP_UNSIGNED_SUB_64 = 12,
+ S390_CC_OP_UNSIGNED_SUBB_32 = 13,
+ S390_CC_OP_UNSIGNED_SUBB_64 = 14,
+ S390_CC_OP_LOAD_AND_TEST = 15,
+ S390_CC_OP_LOAD_POSITIVE_32 = 16,
+ S390_CC_OP_LOAD_POSITIVE_64 = 17,
+ S390_CC_OP_TEST_AND_SET = 18,
+ S390_CC_OP_TEST_UNDER_MASK_8 = 19,
+ S390_CC_OP_TEST_UNDER_MASK_16 = 20,
+ S390_CC_OP_SHIFT_LEFT_32 = 21,
+ S390_CC_OP_SHIFT_LEFT_64 = 22,
+ S390_CC_OP_INSERT_CHAR_MASK_32 = 23,
+ S390_CC_OP_BFP_RESULT_32 = 24,
+ S390_CC_OP_BFP_RESULT_64 = 25,
+ S390_CC_OP_BFP_RESULT_128 = 26,
+ S390_CC_OP_BFP_32_TO_INT_32 = 27,
+ S390_CC_OP_BFP_64_TO_INT_32 = 28,
+ S390_CC_OP_BFP_128_TO_INT_32 = 29,
+ S390_CC_OP_BFP_32_TO_INT_64 = 30,
+ S390_CC_OP_BFP_64_TO_INT_64 = 31,
+ S390_CC_OP_BFP_128_TO_INT_64 = 32,
+ S390_CC_OP_BFP_TDC_32 = 33,
+ S390_CC_OP_BFP_TDC_64 = 34,
+ S390_CC_OP_BFP_TDC_128 = 35,
+ S390_CC_OP_SET = 36
+};
+
+/*------------------------------------------------------------*/
+/*--- Thunk layout ---*/
+/*------------------------------------------------------------*/
+
+/*
+ Z -- value is zero extended to 32 / 64 bit
+ S -- value is sign extended to 32 / 64 bit
+ F -- a binary floating point value
+
+ +--------------------------------+-----------------------+----------------------+-------------+
+ | op | cc_dep1 | cc_dep2 | cc_ndep |
+ +--------------------------------+-----------------------+----------------------+-------------+
+ | S390_CC_OP_BITWISE | Z result | | |
+ | S390_CC_OP_SIGNED_COMPARE | S 1st operand | S 2nd operand | |
+ | S390_CC_OP_UNSIGNED_COMPARE | Z 1st operand | Z 2nd operand | |
+ | S390_CC_OP_SIGNED_ADD_32 | S 1st operand | S 2nd operand | |
+ | S390_CC_OP_SIGNED_ADD_64 | S 1st operand | S 2nd operand | |
+ | S390_CC_OP_UNSIGNED_ADD_32 | Z 1st operand | Z 2nd operand | |
+ | S390_CC_OP_UNSIGNED_ADD_64 | Z 1st operand | Z 2nd operand | |
+ | S390_CC_OP_UNSIGNED_ADDC_32 | Z 1st operand | Z 2nd operand | Z carry in |
+ | S390_CC_OP_UNSIGNED_ADDC_64 | Z 1st operand | Z 2nd operand | Z carry in |
+ | S390_CC_OP_SIGNED_SUB_32 | S left operand | S right operand | |
+ | S390_CC_OP_SIGNED_SUB_64 | S left operand | S right operand | |
+ | S390_CC_OP_UNSIGNED_SUB_32 | Z left operand | Z right operand | |
+ | S390_CC_OP_UNSIGNED_SUB_64 | Z left operand | Z right operand | |
+ | S390_CC_OP_UNSIGNED_SUBB_32 | Z left operand | Z right operand | Z borrow in |
+ | S390_CC_OP_UNSIGNED_SUBB_64 | Z left operand | Z right operand | Z borrow in |
+ | S390_CC_OP_LOAD_AND_TEST | S loaded value | | |
+ | S390_CC_OP_LOAD_POSITIVE_32 | S loaded value | | |
+ | S390_CC_OP_LOAD_POSITIVE_64 | S loaded value | | |
+ | S390_CC_OP_TEST_AND_SET | Z tested value | | |
+ | S390_CC_OP_TEST_UNDER_MASK_8 | Z tested value | Z mask | |
+ | S390_CC_OP_TEST_UNDER_MASK_16 | Z tested value | Z mask | |
+ | S390_CC_OP_SHIFT_LEFT_32 | Z value to be shifted | Z shift amount | |
+ | S390_CC_OP_SHIFT_LEFT_64 | Z value to be shifted | Z shift amount | |
+ | S390_CC_OP_INSERT_CHAR_MASK_32 | Z result | Z mask | |
+ | S390_CC_OP_BFP_RESULT_32 | F result | | |
+ | S390_CC_OP_BFP_RESULT_64 | F result | | |
+ | S390_CC_OP_BFP_RESULT_128 | F result hi 64 bits | F result low 64 bits | |
+ | S390_CC_OP_BFP_32_TO_INT_32 | F source | | |
+ | S390_CC_OP_BFP_64_TO_INT_32 | F source | | |
+ | S390_CC_OP_BFP_128_TO_INT_32 | F source hi 64 bits | | |
+ | S390_CC_OP_BFP_32_TO_INT_64 | F source | | |
+ | S390_CC_OP_BFP_64_TO_INT_64 | F source | | |
+ | S390_CC_OP_BFP_128_TO_INT_64 | F source hi 64 bits | | |
+ | S390_CC_OP_BFP_TDC_32 | F value | Z class | |
+ | S390_CC_OP_BFP_TDC_64 | F value | Z class | |
+ | S390_CC_OP_BFP_TDC_128 | F value hi 64 bits | F value low 64 bits | Z class |
+ | S390_CC_OP_SET | Z condition code | | |
+ +--------------------------------+-----------------------+----------------------+-------------+
+*/
+
+/* Functions for writing the flags thunk */
+void s390_cc_thunk_put0(UInt);
+void s390_cc_thunk_put1(UInt, IRTemp, Bool);
+void s390_cc_thunk_put2(UInt, IRTemp, IRTemp, Bool);
+void s390_cc_thunk_put3(UInt, IRTemp, IRTemp, IRTemp, Bool);
+void s390_cc_thunk_put1f(UInt, IRTemp);
+void s390_cc_thunk_putFZ(UInt, IRTemp, IRTemp);
+void s390_cc_thunk_put1f128(UInt, IRTemp);
+void s390_cc_thunk_put1f128Z(UInt, IRTemp, IRTemp);
+
+/* Convenience functions to set the condition code */
+void s390_cc_set(UInt);
+
+/* Create an IR call expression to s390_calculate_cc */
+IRExpr *s390_call_calculate_cc(void);
+
+/* Create an IR call expression to s390_calculate_icc */
+IRExpr *s390_call_calculate_icc(UInt, IRTemp, IRTemp, Bool);
+
+/* Create an IR call expression to s390_calculate_cond */
+IRExpr *s390_call_calculate_cond(UInt);
+
+#define s390_cc_thunk_putZ(op,dep1) s390_cc_thunk_put1(op,dep1,False)
+#define s390_cc_thunk_putS(op,dep1) s390_cc_thunk_put1(op,dep1,True)
+#define s390_cc_thunk_putF(op,dep1) s390_cc_thunk_put1f(op,dep1)
+#define s390_cc_thunk_putZZ(op,dep1,dep2) s390_cc_thunk_put2(op,dep1,dep2,False)
+#define s390_cc_thunk_putSS(op,dep1,dep2) s390_cc_thunk_put2(op,dep1,dep2,True)
+#define s390_cc_thunk_putFF(op,dep1,dep2) s390_cc_thunk_put2f(op,dep1,dep2)
+#define s390_cc_thunk_putZZZ(op,dep1,dep2,ndep) s390_cc_thunk_put3(op,dep1,dep2,ndep,False)
+#define s390_cc_thunk_putSSS(op,dep1,dep2,ndep) s390_cc_thunk_put3(op,dep1,dep2,ndep,True)
+
+#define s390_call_calculate_iccZZ(op,dep1,dep2) s390_call_calculate_icc(op,dep1,dep2,False)
+#define s390_call_calculate_iccSS(op,dep1,dep2) s390_call_calculate_icc(op,dep1,dep2,True)
+
+/*---------------------------------------------------------------*/
+/*--- end guest_s390_cc.h ---*/
+/*---------------------------------------------------------------*/
+
+#endif /* __VEX_GUEST_S390_CC_H */
--- valgrind/VEX/priv/guest_s390_decoder.c
+++ valgrind/VEX/priv/guest_s390_decoder.c
@@ -0,0 +1,1452 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin guest_s390_decoder.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* The code below only works on a big-endian machine which is
+ asserted in disInstr_S390. */
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h" // IRSB (needed by bb_to_IR.h)
+#include "libvex.h" // VexArch (needed by bb_to_IR.h)
+#include "guest_s390_priv.h" // s390_dis_res
+#include "main_util.h" // vassert
+
+/* The possible outcomes of a decoding operation */
+typedef enum {
+ S390_DECODE_OK,
+ S390_DECODE_UNKNOWN_INSN,
+ S390_DECODE_UNIMPLEMENTED_INSN,
+ S390_DECODE_UNKNOWN_SPECIAL_INSN,
+ S390_DECODE_ERROR
+} s390_decode_t;
+
+#include "guest_s390_priv.h"
+
+/* Force proper alignment for the structures below. */
+#pragma pack(1)
+
+
+static s390_decode_t
+s390_decode_2byte_and_irgen(UChar *bytes)
+{
+ typedef union {
+ struct {
+ unsigned int op : 16;
+ } E;
+ struct {
+ unsigned int op : 8;
+ unsigned int i : 8;
+ } I;
+ struct {
+ unsigned int op : 8;
+ unsigned int r1 : 4;
+ unsigned int r2 : 4;
+ } RR;
+ } formats;
+ union {
+ formats fmt;
+ UShort value;
+ } ovl;
+
+ vassert(sizeof(formats) == 2);
+
+ ((char *)(&ovl.value))[0] = bytes[0];
+ ((char *)(&ovl.value))[1] = bytes[1];
+
+ switch (ovl.value & 0xffff) {
+ case 0x0101: /* PR */ goto unimplemented;
+ case 0x0102: /* UPT */ goto unimplemented;
+ case 0x0104: /* PTFF */ goto unimplemented;
+ case 0x0107: /* SCKPF */ goto unimplemented;
+ case 0x010a: /* PFPO */ goto unimplemented;
+ case 0x010b: /* TAM */ goto unimplemented;
+ case 0x010c: /* SAM24 */ goto unimplemented;
+ case 0x010d: /* SAM31 */ goto unimplemented;
+ case 0x010e: /* SAM64 */ goto unimplemented;
+ case 0x01ff: /* TRAP2 */ goto unimplemented;
+ }
+
+ switch ((ovl.value & 0xff00) >> 8) {
+ case 0x04: /* SPM */ goto unimplemented;
+ case 0x05: /* BALR */ goto unimplemented;
+ case 0x06: s390_format_RR_RR(s390_irgen_BCTR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x07: s390_format_RR(s390_irgen_BCR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x0a: s390_format_I(s390_irgen_SVC, ovl.fmt.I.i); goto ok;
+ case 0x0b: /* BSM */ goto unimplemented;
+ case 0x0c: /* BASSM */ goto unimplemented;
+ case 0x0d: s390_format_RR_RR(s390_irgen_BASR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x0e: /* MVCL */ goto unimplemented;
+ case 0x0f: /* CLCL */ goto unimplemented;
+ case 0x10: s390_format_RR_RR(s390_irgen_LPR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x11: s390_format_RR_RR(s390_irgen_LNR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x12: s390_format_RR_RR(s390_irgen_LTR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x13: s390_format_RR_RR(s390_irgen_LCR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x14: s390_format_RR_RR(s390_irgen_NR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x15: s390_format_RR_RR(s390_irgen_CLR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x16: s390_format_RR_RR(s390_irgen_OR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x17: s390_format_RR_RR(s390_irgen_XR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x18: s390_format_RR_RR(s390_irgen_LR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x19: s390_format_RR_RR(s390_irgen_CR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x1a: s390_format_RR_RR(s390_irgen_AR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x1b: s390_format_RR_RR(s390_irgen_SR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x1c: s390_format_RR_RR(s390_irgen_MR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x1d: s390_format_RR_RR(s390_irgen_DR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x1e: s390_format_RR_RR(s390_irgen_ALR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x1f: s390_format_RR_RR(s390_irgen_SLR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x20: /* LPDR */ goto unimplemented;
+ case 0x21: /* LNDR */ goto unimplemented;
+ case 0x22: /* LTDR */ goto unimplemented;
+ case 0x23: /* LCDR */ goto unimplemented;
+ case 0x24: /* HDR */ goto unimplemented;
+ case 0x25: /* LDXR */ goto unimplemented;
+ case 0x26: /* MXR */ goto unimplemented;
+ case 0x27: /* MXDR */ goto unimplemented;
+ case 0x28: s390_format_RR_FF(s390_irgen_LDR, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x29: /* CDR */ goto unimplemented;
+ case 0x2a: /* ADR */ goto unimplemented;
+ case 0x2b: /* SDR */ goto unimplemented;
+ case 0x2c: /* MDR */ goto unimplemented;
+ case 0x2d: /* DDR */ goto unimplemented;
+ case 0x2e: /* AWR */ goto unimplemented;
+ case 0x2f: /* SWR */ goto unimplemented;
+ case 0x30: /* LPER */ goto unimplemented;
+ case 0x31: /* LNER */ goto unimplemented;
+ case 0x32: /* LTER */ goto unimplemented;
+ case 0x33: /* LCER */ goto unimplemented;
+ case 0x34: /* HER */ goto unimplemented;
+ case 0x35: /* LEDR */ goto unimplemented;
+ case 0x36: /* AXR */ goto unimplemented;
+ case 0x37: /* SXR */ goto unimplemented;
+ case 0x38: s390_format_RR_FF(s390_irgen_LER, ovl.fmt.RR.r1, ovl.fmt.RR.r2); goto ok;
+ case 0x39: /* CER */ goto unimplemented;
+ case 0x3a: /* AER */ goto unimplemented;
+ case 0x3b: /* SER */ goto unimplemented;
+ case 0x3c: /* MDER */ goto unimplemented;
+ case 0x3d: /* DER */ goto unimplemented;
+ case 0x3e: /* AUR */ goto unimplemented;
+ case 0x3f: /* SUR */ goto unimplemented;
+ }
+
+ return S390_DECODE_UNKNOWN_INSN;
+
+ok:
+ return S390_DECODE_OK;
+
+unimplemented:
+ return S390_DECODE_UNIMPLEMENTED_INSN;
+}
+
+static s390_decode_t
+s390_decode_4byte_and_irgen(UChar *bytes)
+{
+ typedef union {
+ struct {
+ unsigned int op1 : 8;
+ unsigned int r1 : 4;
+ unsigned int op2 : 4;
+ unsigned int i2 : 16;
+ } RI;
+ struct {
+ unsigned int op : 16;
+ unsigned int : 8;
+ unsigned int r1 : 4;
+ unsigned int r2 : 4;
+ } RRE;
+ struct {
+ unsigned int op : 16;
+ unsigned int r1 : 4;
+ unsigned int : 4;
+ unsigned int r3 : 4;
+ unsigned int r2 : 4;
+ } RRF;
+ struct {
+ unsigned int op : 16;
+ unsigned int r3 : 4;
+ unsigned int m4 : 4;
+ unsigned int r1 : 4;
+ unsigned int r2 : 4;
+ } RRF2;
+ struct {
+ unsigned int op : 16;
+ unsigned int r3 : 4;
+ unsigned int : 4;
+ unsigned int r1 : 4;
+ unsigned int r2 : 4;
+ } RRF3;
+ struct {
+ unsigned int op : 16;
+ unsigned int r3 : 4;
+ unsigned int : 4;
+ unsigned int r1 : 4;
+ unsigned int r2 : 4;
+ } RRR;
+ struct {
+ unsigned int op : 16;
+ unsigned int r3 : 4;
+ unsigned int : 4;
+ unsigned int r1 : 4;
+ unsigned int r2 : 4;
+ } RRF4;
+ struct {
+ unsigned int op : 8;
+ unsigned int r1 : 4;
+ unsigned int r3 : 4;
+ unsigned int b2 : 4;
+ unsigned int d2 : 12;
+ } RS;
+ struct {
+ unsigned int op : 8;
+ unsigned int r1 : 4;
+ unsigned int r3 : 4;
+ unsigned int i2 : 16;
+ } RSI;
+ struct {
+ unsigned int op : 8;
+ unsigned int r1 : 4;
+ unsigned int x2 : 4;
+ unsigned int b2 : 4;
+ unsigned int d2 : 12;
+ } RX;
+ struct {
+ unsigned int op : 16;
+ unsigned int b2 : 4;
+ unsigned int d2 : 12;
+ } S;
+ struct {
+ unsigned int op : 8;
+ unsigned int i2 : 8;
+ unsigned int b1 : 4;
+ unsigned int d1 : 12;
+ } SI;
+ } formats;
+ union {
+ formats fmt;
+ UInt value;
+ } ovl;
+
+ vassert(sizeof(formats) == 4);
+
+ ((char *)(&ovl.value))[0] = bytes[0];
+ ((char *)(&ovl.value))[1] = bytes[1];
+ ((char *)(&ovl.value))[2] = bytes[2];
+ ((char *)(&ovl.value))[3] = bytes[3];
+
+ switch ((ovl.value & 0xff0f0000) >> 16) {
+ case 0xa500: s390_format_RI_RU(s390_irgen_IIHH, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa501: s390_format_RI_RU(s390_irgen_IIHL, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa502: s390_format_RI_RU(s390_irgen_IILH, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa503: s390_format_RI_RU(s390_irgen_IILL, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa504: s390_format_RI_RU(s390_irgen_NIHH, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa505: s390_format_RI_RU(s390_irgen_NIHL, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa506: s390_format_RI_RU(s390_irgen_NILH, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa507: s390_format_RI_RU(s390_irgen_NILL, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa508: s390_format_RI_RU(s390_irgen_OIHH, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa509: s390_format_RI_RU(s390_irgen_OIHL, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa50a: s390_format_RI_RU(s390_irgen_OILH, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa50b: s390_format_RI_RU(s390_irgen_OILL, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa50c: s390_format_RI_RU(s390_irgen_LLIHH, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa50d: s390_format_RI_RU(s390_irgen_LLIHL, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa50e: s390_format_RI_RU(s390_irgen_LLILH, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa50f: s390_format_RI_RU(s390_irgen_LLILL, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa700: s390_format_RI_RU(s390_irgen_TMLH, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa701: s390_format_RI_RU(s390_irgen_TMLL, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa702: s390_format_RI_RU(s390_irgen_TMHH, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa703: s390_format_RI_RU(s390_irgen_TMHL, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa704: s390_format_RI(s390_irgen_BRC, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa705: s390_format_RI_RP(s390_irgen_BRAS, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa706: s390_format_RI_RP(s390_irgen_BRCT, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa707: s390_format_RI_RP(s390_irgen_BRCTG, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa708: s390_format_RI_RI(s390_irgen_LHI, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa709: s390_format_RI_RI(s390_irgen_LGHI, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa70a: s390_format_RI_RI(s390_irgen_AHI, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa70b: s390_format_RI_RI(s390_irgen_AGHI, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa70c: s390_format_RI_RI(s390_irgen_MHI, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa70d: s390_format_RI_RI(s390_irgen_MGHI, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa70e: s390_format_RI_RI(s390_irgen_CHI, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ case 0xa70f: s390_format_RI_RI(s390_irgen_CGHI, ovl.fmt.RI.r1, ovl.fmt.RI.i2); goto ok;
+ }
+
+ switch ((ovl.value & 0xffff0000) >> 16) {
+ case 0x8000: /* SSM */ goto unimplemented;
+ case 0x8200: /* LPSW */ goto unimplemented;
+ case 0x9300: s390_format_S_RD(s390_irgen_TS, ovl.fmt.S.b2, ovl.fmt.S.d2); goto ok;
+ case 0xb202: /* STIDP */ goto unimplemented;
+ case 0xb204: /* SCK */ goto unimplemented;
+ case 0xb205: /* STCK */ goto unimplemented;
+ case 0xb206: /* SCKC */ goto unimplemented;
+ case 0xb207: /* STCKC */ goto unimplemented;
+ case 0xb208: /* SPT */ goto unimplemented;
+ case 0xb209: /* STPT */ goto unimplemented;
+ case 0xb20a: /* SPKA */ goto unimplemented;
+ case 0xb20b: /* IPK */ goto unimplemented;
+ case 0xb20d: /* PTLB */ goto unimplemented;
+ case 0xb210: /* SPX */ goto unimplemented;
+ case 0xb211: /* STPX */ goto unimplemented;
+ case 0xb212: /* STAP */ goto unimplemented;
+ case 0xb214: /* SIE */ goto unimplemented;
+ case 0xb218: /* PC */ goto unimplemented;
+ case 0xb219: /* SAC */ goto unimplemented;
+ case 0xb21a: /* CFC */ goto unimplemented;
+ case 0xb221: /* IPTE */ goto unimplemented;
+ case 0xb222: s390_format_RRE_R0(s390_irgen_IPM, ovl.fmt.RRE.r1); goto ok;
+ case 0xb223: /* IVSK */ goto unimplemented;
+ case 0xb224: /* IAC */ goto unimplemented;
+ case 0xb225: /* SSAR */ goto unimplemented;
+ case 0xb226: /* EPAR */ goto unimplemented;
+ case 0xb227: /* ESAR */ goto unimplemented;
+ case 0xb228: /* PT */ goto unimplemented;
+ case 0xb229: /* ISKE */ goto unimplemented;
+ case 0xb22a: /* RRBE */ goto unimplemented;
+ case 0xb22b: /* SSKE */ goto unimplemented;
+ case 0xb22c: /* TB */ goto unimplemented;
+ case 0xb22d: /* DXR */ goto unimplemented;
+ case 0xb22e: /* PGIN */ goto unimplemented;
+ case 0xb22f: /* PGOUT */ goto unimplemented;
+ case 0xb230: /* CSCH */ goto unimplemented;
+ case 0xb231: /* HSCH */ goto unimplemented;
+ case 0xb232: /* MSCH */ goto unimplemented;
+ case 0xb233: /* SSCH */ goto unimplemented;
+ case 0xb234: /* STSCH */ goto unimplemented;
+ case 0xb235: /* TSCH */ goto unimplemented;
+ case 0xb236: /* TPI */ goto unimplemented;
+ case 0xb237: /* SAL */ goto unimplemented;
+ case 0xb238: /* RSCH */ goto unimplemented;
+ case 0xb239: /* STCRW */ goto unimplemented;
+ case 0xb23a: /* STCPS */ goto unimplemented;
+ case 0xb23b: /* RCHP */ goto unimplemented;
+ case 0xb23c: /* SCHM */ goto unimplemented;
+ case 0xb240: /* BAKR */ goto unimplemented;
+ case 0xb241: /* CKSM */ goto unimplemented;
+ case 0xb244: /* SQDR */ goto unimplemented;
+ case 0xb245: /* SQER */ goto unimplemented;
+ case 0xb246: /* STURA */ goto unimplemented;
+ case 0xb247: /* MSTA */ goto unimplemented;
+ case 0xb248: /* PALB */ goto unimplemented;
+ case 0xb249: /* EREG */ goto unimplemented;
+ case 0xb24a: /* ESTA */ goto unimplemented;
+ case 0xb24b: /* LURA */ goto unimplemented;
+ case 0xb24c: /* TAR */ goto unimplemented;
+ case 0xb24d: s390_format_RRE(s390_irgen_CPYA, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb24e: s390_format_RRE(s390_irgen_SAR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb24f: s390_format_RRE(s390_irgen_EAR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb250: /* CSP */ goto unimplemented;
+ case 0xb252: s390_format_RRE_RR(s390_irgen_MSR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb254: /* MVPG */ goto unimplemented;
+ case 0xb255: s390_format_RRE_RR(s390_irgen_MVST, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb257: /* CUSE */ goto unimplemented;
+ case 0xb258: /* BSG */ goto unimplemented;
+ case 0xb25a: /* BSA */ goto unimplemented;
+ case 0xb25d: s390_format_RRE_RR(s390_irgen_CLST, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb25e: s390_format_RRE_RR(s390_irgen_SRST, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb263: /* CMPSC */ goto unimplemented;
+ case 0xb274: /* SIGA */ goto unimplemented;
+ case 0xb276: /* XSCH */ goto unimplemented;
+ case 0xb277: /* RP */ goto unimplemented;
+ case 0xb278: /* STCKE */ goto unimplemented;
+ case 0xb279: /* SACF */ goto unimplemented;
+ case 0xb27c: /* STCKF */ goto unimplemented;
+ case 0xb27d: /* STSI */ goto unimplemented;
+ case 0xb299: s390_format_S_RD(s390_irgen_SRNM, ovl.fmt.S.b2, ovl.fmt.S.d2); goto ok;
+ case 0xb29c: s390_format_S_RD(s390_irgen_STFPC, ovl.fmt.S.b2, ovl.fmt.S.d2); goto ok;
+ case 0xb29d: s390_format_S_RD(s390_irgen_LFPC, ovl.fmt.S.b2, ovl.fmt.S.d2); goto ok;
+ case 0xb2a5: /* TRE */ goto unimplemented;
+ case 0xb2a6: /* CU21 */ goto unimplemented;
+ case 0xb2a7: /* CU12 */ goto unimplemented;
+ case 0xb2b0: /* STFLE */ goto unimplemented;
+ case 0xb2b1: /* STFL */ goto unimplemented;
+ case 0xb2b2: /* LPSWE */ goto unimplemented;
+ case 0xb2b8: /* SRNMB */ goto unimplemented;
+ case 0xb2b9: /* SRNMT */ goto unimplemented;
+ case 0xb2bd: /* LFAS */ goto unimplemented;
+ case 0xb2ff: /* TRAP4 */ goto unimplemented;
+ case 0xb300: s390_format_RRE_FF(s390_irgen_LPEBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb301: s390_format_RRE_FF(s390_irgen_LNEBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb302: s390_format_RRE_FF(s390_irgen_LTEBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb303: s390_format_RRE_FF(s390_irgen_LCEBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb304: s390_format_RRE_FF(s390_irgen_LDEBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb305: s390_format_RRE_FF(s390_irgen_LXDBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb306: s390_format_RRE_FF(s390_irgen_LXEBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb307: /* MXDBR */ goto unimplemented;
+ case 0xb308: /* KEBR */ goto unimplemented;
+ case 0xb309: s390_format_RRE_FF(s390_irgen_CEBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb30a: s390_format_RRE_FF(s390_irgen_AEBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb30b: s390_format_RRE_FF(s390_irgen_SEBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb30c: /* MDEBR */ goto unimplemented;
+ case 0xb30d: s390_format_RRE_FF(s390_irgen_DEBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb30e: s390_format_RRF_F0FF(s390_irgen_MAEBR, ovl.fmt.RRF.r1, ovl.fmt.RRF.r3, ovl.fmt.RRF.r2); goto ok;
+ case 0xb30f: s390_format_RRF_F0FF(s390_irgen_MSEBR, ovl.fmt.RRF.r1, ovl.fmt.RRF.r3, ovl.fmt.RRF.r2); goto ok;
+ case 0xb310: s390_format_RRE_FF(s390_irgen_LPDBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb311: s390_format_RRE_FF(s390_irgen_LNDBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb312: s390_format_RRE_FF(s390_irgen_LTDBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb313: s390_format_RRE_FF(s390_irgen_LCDBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb314: s390_format_RRE_FF(s390_irgen_SQEBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb315: s390_format_RRE_FF(s390_irgen_SQDBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb316: s390_format_RRE_FF(s390_irgen_SQXBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb317: s390_format_RRE_FF(s390_irgen_MEEBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb318: /* KDBR */ goto unimplemented;
+ case 0xb319: s390_format_RRE_FF(s390_irgen_CDBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb31a: s390_format_RRE_FF(s390_irgen_ADBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb31b: s390_format_RRE_FF(s390_irgen_SDBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb31c: s390_format_RRE_FF(s390_irgen_MDBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb31d: s390_format_RRE_FF(s390_irgen_DDBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb31e: s390_format_RRF_F0FF(s390_irgen_MADBR, ovl.fmt.RRF.r1, ovl.fmt.RRF.r3, ovl.fmt.RRF.r2); goto ok;
+ case 0xb31f: s390_format_RRF_F0FF(s390_irgen_MSDBR, ovl.fmt.RRF.r1, ovl.fmt.RRF.r3, ovl.fmt.RRF.r2); goto ok;
+ case 0xb324: /* LDER */ goto unimplemented;
+ case 0xb325: /* LXDR */ goto unimplemented;
+ case 0xb326: /* LXER */ goto unimplemented;
+ case 0xb32e: /* MAER */ goto unimplemented;
+ case 0xb32f: /* MSER */ goto unimplemented;
+ case 0xb336: /* SQXR */ goto unimplemented;
+ case 0xb337: /* MEER */ goto unimplemented;
+ case 0xb338: /* MAYLR */ goto unimplemented;
+ case 0xb339: /* MYLR */ goto unimplemented;
+ case 0xb33a: /* MAYR */ goto unimplemented;
+ case 0xb33b: /* MYR */ goto unimplemented;
+ case 0xb33c: /* MAYHR */ goto unimplemented;
+ case 0xb33d: /* MYHR */ goto unimplemented;
+ case 0xb33e: /* MADR */ goto unimplemented;
+ case 0xb33f: /* MSDR */ goto unimplemented;
+ case 0xb340: s390_format_RRE_FF(s390_irgen_LPXBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb341: s390_format_RRE_FF(s390_irgen_LNXBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb342: s390_format_RRE_FF(s390_irgen_LTXBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb343: s390_format_RRE_FF(s390_irgen_LCXBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb344: s390_format_RRE_FF(s390_irgen_LEDBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb345: s390_format_RRE_FF(s390_irgen_LDXBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb346: s390_format_RRE_FF(s390_irgen_LEXBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb347: /* FIXBR */ goto unimplemented;
+ case 0xb348: /* KXBR */ goto unimplemented;
+ case 0xb349: s390_format_RRE_FF(s390_irgen_CXBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb34a: s390_format_RRE_FF(s390_irgen_AXBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb34b: s390_format_RRE_FF(s390_irgen_SXBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb34c: s390_format_RRE_FF(s390_irgen_MXBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb34d: s390_format_RRE_FF(s390_irgen_DXBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb350: /* TBEDR */ goto unimplemented;
+ case 0xb351: /* TBDR */ goto unimplemented;
+ case 0xb353: /* DIEBR */ goto unimplemented;
+ case 0xb357: /* FIEBR */ goto unimplemented;
+ case 0xb358: /* THDER */ goto unimplemented;
+ case 0xb359: /* THDR */ goto unimplemented;
+ case 0xb35b: /* DIDBR */ goto unimplemented;
+ case 0xb35f: /* FIDBR */ goto unimplemented;
+ case 0xb360: /* LPXR */ goto unimplemented;
+ case 0xb361: /* LNXR */ goto unimplemented;
+ case 0xb362: /* LTXR */ goto unimplemented;
+ case 0xb363: /* LCXR */ goto unimplemented;
+ case 0xb365: s390_format_RRE_FF(s390_irgen_LXR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb366: /* LEXR */ goto unimplemented;
+ case 0xb367: /* FIXR */ goto unimplemented;
+ case 0xb369: /* CXR */ goto unimplemented;
+ case 0xb370: s390_format_RRE_FF(s390_irgen_LPDFR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb371: s390_format_RRE_FF(s390_irgen_LNDFR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb372: s390_format_RRF_F0FF2(s390_irgen_CPSDR, ovl.fmt.RRF3.r3, ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2); goto ok;
+ case 0xb373: s390_format_RRE_FF(s390_irgen_LCDFR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb374: s390_format_RRE_F0(s390_irgen_LZER, ovl.fmt.RRE.r1); goto ok;
+ case 0xb375: s390_format_RRE_F0(s390_irgen_LZDR, ovl.fmt.RRE.r1); goto ok;
+ case 0xb376: s390_format_RRE_F0(s390_irgen_LZXR, ovl.fmt.RRE.r1); goto ok;
+ case 0xb377: /* FIER */ goto unimplemented;
+ case 0xb37f: /* FIDR */ goto unimplemented;
+ case 0xb384: s390_format_RRE_R0(s390_irgen_SFPC, ovl.fmt.RRE.r1); goto ok;
+ case 0xb385: /* SFASR */ goto unimplemented;
+ case 0xb38c: s390_format_RRE_R0(s390_irgen_EFPC, ovl.fmt.RRE.r1); goto ok;
+ case 0xb390: /* CELFBR */ goto unimplemented;
+ case 0xb391: /* CDLFBR */ goto unimplemented;
+ case 0xb392: /* CXLFBR */ goto unimplemented;
+ case 0xb394: s390_format_RRE_FR(s390_irgen_CEFBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb395: s390_format_RRE_FR(s390_irgen_CDFBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb396: s390_format_RRE_FR(s390_irgen_CXFBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb398: s390_format_RRF_U0RF(s390_irgen_CFEBR, ovl.fmt.RRF3.r3, ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2); goto ok;
+ case 0xb399: s390_format_RRF_U0RF(s390_irgen_CFDBR, ovl.fmt.RRF3.r3, ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2); goto ok;
+ case 0xb39a: s390_format_RRF_U0RF(s390_irgen_CFXBR, ovl.fmt.RRF3.r3, ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2); goto ok;
+ case 0xb3a0: /* CELGBR */ goto unimplemented;
+ case 0xb3a1: /* CDLGBR */ goto unimplemented;
+ case 0xb3a2: /* CXLGBR */ goto unimplemented;
+ case 0xb3a4: s390_format_RRE_FR(s390_irgen_CEGBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb3a5: s390_format_RRE_FR(s390_irgen_CDGBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb3a6: s390_format_RRE_FR(s390_irgen_CXGBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb3a8: s390_format_RRF_U0RF(s390_irgen_CGEBR, ovl.fmt.RRF3.r3, ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2); goto ok;
+ case 0xb3a9: s390_format_RRF_U0RF(s390_irgen_CGDBR, ovl.fmt.RRF3.r3, ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2); goto ok;
+ case 0xb3aa: s390_format_RRF_U0RF(s390_irgen_CGXBR, ovl.fmt.RRF3.r3, ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2); goto ok;
+ case 0xb3b4: /* CEFR */ goto unimplemented;
+ case 0xb3b5: /* CDFR */ goto unimplemented;
+ case 0xb3b6: /* CXFR */ goto unimplemented;
+ case 0xb3b8: /* CFER */ goto unimplemented;
+ case 0xb3b9: /* CFDR */ goto unimplemented;
+ case 0xb3ba: /* CFXR */ goto unimplemented;
+ case 0xb3c1: s390_format_RRE_FR(s390_irgen_LDGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb3c4: /* CEGR */ goto unimplemented;
+ case 0xb3c5: /* CDGR */ goto unimplemented;
+ case 0xb3c6: /* CXGR */ goto unimplemented;
+ case 0xb3c8: /* CGER */ goto unimplemented;
+ case 0xb3c9: /* CGDR */ goto unimplemented;
+ case 0xb3ca: /* CGXR */ goto unimplemented;
+ case 0xb3cd: s390_format_RRE_RF(s390_irgen_LGDR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb3d0: /* MDTR */ goto unimplemented;
+ case 0xb3d1: /* DDTR */ goto unimplemented;
+ case 0xb3d2: /* ADTR */ goto unimplemented;
+ case 0xb3d3: /* SDTR */ goto unimplemented;
+ case 0xb3d4: /* LDETR */ goto unimplemented;
+ case 0xb3d5: /* LEDTR */ goto unimplemented;
+ case 0xb3d6: /* LTDTR */ goto unimplemented;
+ case 0xb3d7: /* FIDTR */ goto unimplemented;
+ case 0xb3d8: /* MXTR */ goto unimplemented;
+ case 0xb3d9: /* DXTR */ goto unimplemented;
+ case 0xb3da: /* AXTR */ goto unimplemented;
+ case 0xb3db: /* SXTR */ goto unimplemented;
+ case 0xb3dc: /* LXDTR */ goto unimplemented;
+ case 0xb3dd: /* LDXTR */ goto unimplemented;
+ case 0xb3de: /* LTXTR */ goto unimplemented;
+ case 0xb3df: /* FIXTR */ goto unimplemented;
+ case 0xb3e0: /* KDTR */ goto unimplemented;
+ case 0xb3e1: /* CGDTR */ goto unimplemented;
+ case 0xb3e2: /* CUDTR */ goto unimplemented;
+ case 0xb3e3: /* CSDTR */ goto unimplemented;
+ case 0xb3e4: /* CDTR */ goto unimplemented;
+ case 0xb3e5: /* EEDTR */ goto unimplemented;
+ case 0xb3e7: /* ESDTR */ goto unimplemented;
+ case 0xb3e8: /* KXTR */ goto unimplemented;
+ case 0xb3e9: /* CGXTR */ goto unimplemented;
+ case 0xb3ea: /* CUXTR */ goto unimplemented;
+ case 0xb3eb: /* CSXTR */ goto unimplemented;
+ case 0xb3ec: /* CXTR */ goto unimplemented;
+ case 0xb3ed: /* EEXTR */ goto unimplemented;
+ case 0xb3ef: /* ESXTR */ goto unimplemented;
+ case 0xb3f1: /* CDGTR */ goto unimplemented;
+ case 0xb3f2: /* CDUTR */ goto unimplemented;
+ case 0xb3f3: /* CDSTR */ goto unimplemented;
+ case 0xb3f4: /* CEDTR */ goto unimplemented;
+ case 0xb3f5: /* QADTR */ goto unimplemented;
+ case 0xb3f6: /* IEDTR */ goto unimplemented;
+ case 0xb3f7: /* RRDTR */ goto unimplemented;
+ case 0xb3f9: /* CXGTR */ goto unimplemented;
+ case 0xb3fa: /* CXUTR */ goto unimplemented;
+ case 0xb3fb: /* CXSTR */ goto unimplemented;
+ case 0xb3fc: /* CEXTR */ goto unimplemented;
+ case 0xb3fd: /* QAXTR */ goto unimplemented;
+ case 0xb3fe: /* IEXTR */ goto unimplemented;
+ case 0xb3ff: /* RRXTR */ goto unimplemented;
+ case 0xb900: s390_format_RRE_RR(s390_irgen_LPGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb901: s390_format_RRE_RR(s390_irgen_LNGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb902: s390_format_RRE_RR(s390_irgen_LTGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb903: s390_format_RRE_RR(s390_irgen_LCGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb904: s390_format_RRE_RR(s390_irgen_LGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb905: /* LURAG */ goto unimplemented;
+ case 0xb906: s390_format_RRE_RR(s390_irgen_LGBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb907: s390_format_RRE_RR(s390_irgen_LGHR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb908: s390_format_RRE_RR(s390_irgen_AGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb909: s390_format_RRE_RR(s390_irgen_SGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb90a: s390_format_RRE_RR(s390_irgen_ALGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb90b: s390_format_RRE_RR(s390_irgen_SLGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb90c: s390_format_RRE_RR(s390_irgen_MSGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb90d: s390_format_RRE_RR(s390_irgen_DSGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb90e: /* EREGG */ goto unimplemented;
+ case 0xb90f: s390_format_RRE_RR(s390_irgen_LRVGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb910: s390_format_RRE_RR(s390_irgen_LPGFR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb911: s390_format_RRE_RR(s390_irgen_LNGFR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb912: s390_format_RRE_RR(s390_irgen_LTGFR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb913: s390_format_RRE_RR(s390_irgen_LCGFR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb914: s390_format_RRE_RR(s390_irgen_LGFR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb916: s390_format_RRE_RR(s390_irgen_LLGFR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb917: s390_format_RRE_RR(s390_irgen_LLGTR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb918: s390_format_RRE_RR(s390_irgen_AGFR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb919: s390_format_RRE_RR(s390_irgen_SGFR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb91a: s390_format_RRE_RR(s390_irgen_ALGFR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb91b: s390_format_RRE_RR(s390_irgen_SLGFR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb91c: s390_format_RRE_RR(s390_irgen_MSGFR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb91d: s390_format_RRE_RR(s390_irgen_DSGFR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb91e: /* KMAC */ goto unimplemented;
+ case 0xb91f: s390_format_RRE_RR(s390_irgen_LRVR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb920: s390_format_RRE_RR(s390_irgen_CGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb921: s390_format_RRE_RR(s390_irgen_CLGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb925: /* STURG */ goto unimplemented;
+ case 0xb926: s390_format_RRE_RR(s390_irgen_LBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb927: s390_format_RRE_RR(s390_irgen_LHR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb928: /* PCKMO */ goto unimplemented;
+ case 0xb92b: /* KMO */ goto unimplemented;
+ case 0xb92c: /* PCC */ goto unimplemented;
+ case 0xb92d: /* KMCTR */ goto unimplemented;
+ case 0xb92e: /* KM */ goto unimplemented;
+ case 0xb92f: /* KMC */ goto unimplemented;
+ case 0xb930: s390_format_RRE_RR(s390_irgen_CGFR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb931: s390_format_RRE_RR(s390_irgen_CLGFR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb93e: /* KIMD */ goto unimplemented;
+ case 0xb93f: /* KLMD */ goto unimplemented;
+ case 0xb941: /* CFDTR */ goto unimplemented;
+ case 0xb942: /* CLGDTR */ goto unimplemented;
+ case 0xb943: /* CLFDTR */ goto unimplemented;
+ case 0xb946: s390_format_RRE_RR(s390_irgen_BCTGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb949: /* CFXTR */ goto unimplemented;
+ case 0xb94a: /* CLGXTR */ goto unimplemented;
+ case 0xb94b: /* CLFXTR */ goto unimplemented;
+ case 0xb951: /* CDFTR */ goto unimplemented;
+ case 0xb952: /* CDLGTR */ goto unimplemented;
+ case 0xb953: /* CDLFTR */ goto unimplemented;
+ case 0xb959: /* CXFTR */ goto unimplemented;
+ case 0xb95a: /* CXLGTR */ goto unimplemented;
+ case 0xb95b: /* CXLFTR */ goto unimplemented;
+ case 0xb960: /* CGRT */ goto unimplemented;
+ case 0xb961: /* CLGRT */ goto unimplemented;
+ case 0xb972: /* CRT */ goto unimplemented;
+ case 0xb973: /* CLRT */ goto unimplemented;
+ case 0xb980: s390_format_RRE_RR(s390_irgen_NGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb981: s390_format_RRE_RR(s390_irgen_OGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb982: s390_format_RRE_RR(s390_irgen_XGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb983: s390_format_RRE_RR(s390_irgen_FLOGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb984: s390_format_RRE_RR(s390_irgen_LLGCR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb985: s390_format_RRE_RR(s390_irgen_LLGHR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb986: s390_format_RRE_RR(s390_irgen_MLGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb987: s390_format_RRE_RR(s390_irgen_DLGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb988: s390_format_RRE_RR(s390_irgen_ALCGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb989: s390_format_RRE_RR(s390_irgen_SLBGR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb98a: /* CSPG */ goto unimplemented;
+ case 0xb98d: /* EPSW */ goto unimplemented;
+ case 0xb98e: /* IDTE */ goto unimplemented;
+ case 0xb990: /* TRTT */ goto unimplemented;
+ case 0xb991: /* TRTO */ goto unimplemented;
+ case 0xb992: /* TROT */ goto unimplemented;
+ case 0xb993: /* TROO */ goto unimplemented;
+ case 0xb994: s390_format_RRE_RR(s390_irgen_LLCR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb995: s390_format_RRE_RR(s390_irgen_LLHR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb996: s390_format_RRE_RR(s390_irgen_MLR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb997: s390_format_RRE_RR(s390_irgen_DLR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb998: s390_format_RRE_RR(s390_irgen_ALCR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb999: s390_format_RRE_RR(s390_irgen_SLBR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb99a: /* EPAIR */ goto unimplemented;
+ case 0xb99b: /* ESAIR */ goto unimplemented;
+ case 0xb99d: /* ESEA */ goto unimplemented;
+ case 0xb99e: /* PTI */ goto unimplemented;
+ case 0xb99f: /* SSAIR */ goto unimplemented;
+ case 0xb9a2: /* PTF */ goto unimplemented;
+ case 0xb9aa: /* LPTEA */ goto unimplemented;
+ case 0xb9ae: /* RRBM */ goto unimplemented;
+ case 0xb9af: /* PFMF */ goto unimplemented;
+ case 0xb9b0: /* CU14 */ goto unimplemented;
+ case 0xb9b1: /* CU24 */ goto unimplemented;
+ case 0xb9b2: /* CU41 */ goto unimplemented;
+ case 0xb9b3: /* CU42 */ goto unimplemented;
+ case 0xb9bd: /* TRTRE */ goto unimplemented;
+ case 0xb9be: /* SRSTU */ goto unimplemented;
+ case 0xb9bf: /* TRTE */ goto unimplemented;
+ case 0xb9c8: s390_format_RRF_R0RR2(s390_irgen_AHHHR, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9c9: s390_format_RRF_R0RR2(s390_irgen_SHHHR, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9ca: s390_format_RRF_R0RR2(s390_irgen_ALHHHR, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9cb: s390_format_RRF_R0RR2(s390_irgen_SLHHHR, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9cd: s390_format_RRE_RR(s390_irgen_CHHR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb9cf: s390_format_RRE_RR(s390_irgen_CLHHR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb9d8: s390_format_RRF_R0RR2(s390_irgen_AHHLR, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9d9: s390_format_RRF_R0RR2(s390_irgen_SHHLR, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9da: s390_format_RRF_R0RR2(s390_irgen_ALHHLR, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9db: s390_format_RRF_R0RR2(s390_irgen_SLHHLR, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9dd: s390_format_RRE_RR(s390_irgen_CHLR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb9df: s390_format_RRE_RR(s390_irgen_CLHLR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2); goto ok;
+ case 0xb9e1: /* POPCNT */ goto unimplemented;
+ case 0xb9e2: /* LOCGR */ goto unimplemented;
+ case 0xb9e4: s390_format_RRF_R0RR2(s390_irgen_NGRK, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9e6: s390_format_RRF_R0RR2(s390_irgen_OGRK, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9e7: s390_format_RRF_R0RR2(s390_irgen_XGRK, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9e8: s390_format_RRF_R0RR2(s390_irgen_AGRK, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9e9: s390_format_RRF_R0RR2(s390_irgen_SGRK, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9ea: s390_format_RRF_R0RR2(s390_irgen_ALGRK, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9eb: s390_format_RRF_R0RR2(s390_irgen_SLGRK, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9f2: /* LOCR */ goto unimplemented;
+ case 0xb9f4: s390_format_RRF_R0RR2(s390_irgen_NRK, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9f6: s390_format_RRF_R0RR2(s390_irgen_ORK, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9f7: s390_format_RRF_R0RR2(s390_irgen_XRK, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9f8: s390_format_RRF_R0RR2(s390_irgen_ARK, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9f9: s390_format_RRF_R0RR2(s390_irgen_SRK, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9fa: s390_format_RRF_R0RR2(s390_irgen_ALRK, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ case 0xb9fb: s390_format_RRF_R0RR2(s390_irgen_SLRK, ovl.fmt.RRF4.r3, ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2); goto ok;
+ }
+
+ switch ((ovl.value & 0xff000000) >> 24) {
+ case 0x40: s390_format_RX_RRRD(s390_irgen_STH, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x41: s390_format_RX_RRRD(s390_irgen_LA, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x42: s390_format_RX_RRRD(s390_irgen_STC, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x43: s390_format_RX_RRRD(s390_irgen_IC, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x44: s390_format_RX_RRRD(s390_irgen_EX, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x45: /* BAL */ goto unimplemented;
+ case 0x46: s390_format_RX_RRRD(s390_irgen_BCT, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x47: s390_format_RX(s390_irgen_BC, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x48: s390_format_RX_RRRD(s390_irgen_LH, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x49: s390_format_RX_RRRD(s390_irgen_CH, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x4a: s390_format_RX_RRRD(s390_irgen_AH, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x4b: s390_format_RX_RRRD(s390_irgen_SH, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x4c: s390_format_RX_RRRD(s390_irgen_MH, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x4d: s390_format_RX_RRRD(s390_irgen_BAS, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x4e: s390_format_RX_RRRD(s390_irgen_CVD, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x4f: s390_format_RX_RRRD(s390_irgen_CVB, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x50: s390_format_RX_RRRD(s390_irgen_ST, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x51: s390_format_RX_RRRD(s390_irgen_LAE, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x54: s390_format_RX_RRRD(s390_irgen_N, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x55: s390_format_RX_RRRD(s390_irgen_CL, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x56: s390_format_RX_RRRD(s390_irgen_O, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x57: s390_format_RX_RRRD(s390_irgen_X, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x58: s390_format_RX_RRRD(s390_irgen_L, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x59: s390_format_RX_RRRD(s390_irgen_C, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x5a: s390_format_RX_RRRD(s390_irgen_A, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x5b: s390_format_RX_RRRD(s390_irgen_S, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x5c: s390_format_RX_RRRD(s390_irgen_M, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x5d: s390_format_RX_RRRD(s390_irgen_D, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x5e: s390_format_RX_RRRD(s390_irgen_AL, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x5f: s390_format_RX_RRRD(s390_irgen_SL, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x60: s390_format_RX_FRRD(s390_irgen_STD, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x67: /* MXD */ goto unimplemented;
+ case 0x68: s390_format_RX_FRRD(s390_irgen_LD, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x69: /* CD */ goto unimplemented;
+ case 0x6a: /* AD */ goto unimplemented;
+ case 0x6b: /* SD */ goto unimplemented;
+ case 0x6c: /* MD */ goto unimplemented;
+ case 0x6d: /* DD */ goto unimplemented;
+ case 0x6e: /* AW */ goto unimplemented;
+ case 0x6f: /* SW */ goto unimplemented;
+ case 0x70: s390_format_RX_FRRD(s390_irgen_STE, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x71: s390_format_RX_RRRD(s390_irgen_MS, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x78: s390_format_RX_FRRD(s390_irgen_LE, ovl.fmt.RX.r1, ovl.fmt.RX.x2, ovl.fmt.RX.b2, ovl.fmt.RX.d2); goto ok;
+ case 0x79: /* CE */ goto unimplemented;
+ case 0x7a: /* AE */ goto unimplemented;
+ case 0x7b: /* SE */ goto unimplemented;
+ case 0x7c: /* MDE */ goto unimplemented;
+ case 0x7d: /* DE */ goto unimplemented;
+ case 0x7e: /* AU */ goto unimplemented;
+ case 0x7f: /* SU */ goto unimplemented;
+ case 0x83: /* DIAG */ goto unimplemented;
+ case 0x84: s390_format_RSI_RRP(s390_irgen_BRXH, ovl.fmt.RSI.r1, ovl.fmt.RSI.r3, ovl.fmt.RSI.i2); goto ok;
+ case 0x85: s390_format_RSI_RRP(s390_irgen_BRXLE, ovl.fmt.RSI.r1, ovl.fmt.RSI.r3, ovl.fmt.RSI.i2); goto ok;
+ case 0x86: s390_format_RS_RRRD(s390_irgen_BXH, ovl.fmt.RS.r1, ovl.fmt.RS.r3, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0x87: s390_format_RS_RRRD(s390_irgen_BXLE, ovl.fmt.RS.r1, ovl.fmt.RS.r3, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0x88: s390_format_RS_R0RD(s390_irgen_SRL, ovl.fmt.RS.r1, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0x89: s390_format_RS_R0RD(s390_irgen_SLL, ovl.fmt.RS.r1, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0x8a: s390_format_RS_R0RD(s390_irgen_SRA, ovl.fmt.RS.r1, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0x8b: s390_format_RS_R0RD(s390_irgen_SLA, ovl.fmt.RS.r1, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0x8c: s390_format_RS_R0RD(s390_irgen_SRDL, ovl.fmt.RS.r1, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0x8d: s390_format_RS_R0RD(s390_irgen_SLDL, ovl.fmt.RS.r1, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0x8e: s390_format_RS_R0RD(s390_irgen_SRDA, ovl.fmt.RS.r1, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0x8f: s390_format_RS_R0RD(s390_irgen_SLDA, ovl.fmt.RS.r1, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0x90: s390_format_RS_RRRD(s390_irgen_STM, ovl.fmt.RS.r1, ovl.fmt.RS.r3, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0x91: s390_format_SI_URD(s390_irgen_TM, ovl.fmt.SI.i2, ovl.fmt.SI.b1, ovl.fmt.SI.d1); goto ok;
+ case 0x92: s390_format_SI_URD(s390_irgen_MVI, ovl.fmt.SI.i2, ovl.fmt.SI.b1, ovl.fmt.SI.d1); goto ok;
+ case 0x94: s390_format_SI_URD(s390_irgen_NI, ovl.fmt.SI.i2, ovl.fmt.SI.b1, ovl.fmt.SI.d1); goto ok;
+ case 0x95: s390_format_SI_URD(s390_irgen_CLI, ovl.fmt.SI.i2, ovl.fmt.SI.b1, ovl.fmt.SI.d1); goto ok;
+ case 0x96: s390_format_SI_URD(s390_irgen_OI, ovl.fmt.SI.i2, ovl.fmt.SI.b1, ovl.fmt.SI.d1); goto ok;
+ case 0x97: s390_format_SI_URD(s390_irgen_XI, ovl.fmt.SI.i2, ovl.fmt.SI.b1, ovl.fmt.SI.d1); goto ok;
+ case 0x98: s390_format_RS_RRRD(s390_irgen_LM, ovl.fmt.RS.r1, ovl.fmt.RS.r3, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0x99: /* TRACE */ goto unimplemented;
+ case 0x9a: s390_format_RS_AARD(s390_irgen_LAM, ovl.fmt.RS.r1, ovl.fmt.RS.r3, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0x9b: s390_format_RS_AARD(s390_irgen_STAM, ovl.fmt.RS.r1, ovl.fmt.RS.r3, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0xa8: s390_format_RS_RRRD(s390_irgen_MVCLE, ovl.fmt.RS.r1, ovl.fmt.RS.r3, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0xa9: s390_format_RS_RRRD(s390_irgen_CLCLE, ovl.fmt.RS.r1, ovl.fmt.RS.r3, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0xac: /* STNSM */ goto unimplemented;
+ case 0xad: /* STOSM */ goto unimplemented;
+ case 0xae: /* SIGP */ goto unimplemented;
+ case 0xaf: /* MC */ goto unimplemented;
+ case 0xb1: /* LRA */ goto unimplemented;
+ case 0xb6: /* STCTL */ goto unimplemented;
+ case 0xb7: /* LCTL */ goto unimplemented;
+ case 0xba: s390_format_RS_RRRD(s390_irgen_CS, ovl.fmt.RS.r1, ovl.fmt.RS.r3, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0xbb: /* CDS */ goto unimplemented;
+ case 0xbd: s390_format_RS_RURD(s390_irgen_CLM, ovl.fmt.RS.r1, ovl.fmt.RS.r3, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0xbe: s390_format_RS_RURD(s390_irgen_STCM, ovl.fmt.RS.r1, ovl.fmt.RS.r3, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ case 0xbf: s390_format_RS_RURD(s390_irgen_ICM, ovl.fmt.RS.r1, ovl.fmt.RS.r3, ovl.fmt.RS.b2, ovl.fmt.RS.d2); goto ok;
+ }
+
+ return S390_DECODE_UNKNOWN_INSN;
+
+ok:
+ return S390_DECODE_OK;
+
+unimplemented:
+ return S390_DECODE_UNIMPLEMENTED_INSN;
+}
+
+static s390_decode_t
+s390_decode_6byte_and_irgen(UChar *bytes)
+{
+ typedef union {
+ struct {
+ unsigned int op1 : 8;
+ unsigned int r1 : 4;
+ unsigned int r3 : 4;
+ unsigned int i2 : 16;
+ unsigned int : 8;
+ unsigned int op2 : 8;
+ } RIE;
+ struct {
+ unsigned int op1 : 8;
+ unsigned int r1 : 4;
+ unsigned int r2 : 4;
+ unsigned int i3 : 8;
+ unsigned int i4 : 8;
+ unsigned int i5 : 8;
+ unsigned int op2 : 8;
+ } RIE_RRUUU;
+ struct {
+ unsigned int op1 : 8;
+ unsigned int r1 : 4;
+ unsigned int : 4;
+ unsigned int i2 : 16;
+ unsigned int m3 : 4;
+ unsigned int : 4;
+ unsigned int op2 : 8;
+ } RIEv1;
+ struct {
+ unsigned int op1 : 8;
+ unsigned int r1 : 4;
+ unsigned int r2 : 4;
+ unsigned int i4 : 16;
+ unsigned int m3 : 4;
+ unsigned int : 4;
+ unsigned int op2 : 8;
+ } RIE_RRPU;
+ struct {
+ unsigned int op1 : 8;
+ unsigned int r1 : 4;
+ unsigned int m3 : 4;
+ unsigned int i4 : 16;
+ unsigned int i2 : 8;
+ unsigned int op2 : 8;
+ } RIEv3;
+ struct {
+ unsigned int op1 : 8;
+ unsigned int r1 : 4;
+ unsigned int op2 : 4;
+ unsigned int i2 : 32;
+ } RIL;
+ struct {
+ unsigned int op1 : 8;
+ unsigned int r1 : 4;
+ unsigned int m3 : 4;
+ unsigned int b4 : 4;
+ unsigned int d4 : 12;
+ unsigned int i2 : 8;
+ unsigned int op2 : 8;
+ } RIS;
+ struct {
+ unsigned int op1 : 8;
+ unsigned int r1 : 4;
+ unsigned int r2 : 4;
+ unsigned int b4 : 4;
+ unsigned int d4 : 12;
+ unsigned int m3 : 4;
+ unsigned int : 4;
+ unsigned int op2 : 8;
+ } RRS;
+ struct {
+ unsigned int op1 : 8;
+ unsigned int l1 : 4;
+ unsigned int : 4;
+ unsigned int b1 : 4;
+ unsigned int d1 : 12;
+ unsigned int : 8;
+ unsigned int op2 : 8;
+ } RSL;
+ struct {
+ unsigned int op1 : 8;
+ unsigned int r1 : 4;
+ unsigned int r3 : 4;
+ unsigned int b2 : 4;
+ unsigned int dl2 : 12;
+ unsigned int dh2 : 8;
+ unsigned int op2 : 8;
+ } RSY;
+ struct {
+ unsigned int op1 : 8;
+ unsigned int r1 : 4;
+ unsigned int x2 : 4;
+ unsigned int b2 : 4;
+ unsigned int d2 : 12;
+ unsigned int : 8;
+ unsigned int op2 : 8;
+ } RXE;
+ struct {
+ unsigned int op1 : 8;
+ unsigned int r3 : 4;
+ unsigned int x2 : 4;
+ unsigned int b2 : 4;
+ unsigned int d2 : 12;
+ unsigned int r1 : 4;
+ unsigned int : 4;
+ unsigned int op2 : 8;
+ } RXF;
+ struct {
+ unsigned int op1 : 8;
+ unsigned int r1 : 4;
+ unsigned int x2 : 4;
+ unsigned int b2 : 4;
+ unsigned int dl2 : 12;
+ unsigned int dh2 : 8;
+ unsigned int op2 : 8;
+ } RXY;
+ struct {
+ unsigned int op1 : 8;
+ unsigned int i2 : 8;
+ unsigned int b1 : 4;
+ unsigned int dl1 : 12;
+ unsigned int dh1 : 8;
+ unsigned int op2 : 8;
+ } SIY;
+ struct {
+ unsigned int op : 8;
+ unsigned int l : 8;
+ unsigned int b1 : 4;
+ unsigned int d1 : 12;
+ unsigned int b2 : 4;
+ unsigned int d2 : 12;
+ } SS;
+ struct {
+ unsigned int op : 8;
+ unsigned int l1 : 4;
+ unsigned int l2 : 4;
+ unsigned int b1 : 4;
+ unsigned int d1 : 12;
+ unsigned int b2 : 4;
+ unsigned int d2 : 12;
+ } SS_LLRDRD;
+ struct {
+ unsigned int op : 8;
+ unsigned int r1 : 4;
+ unsigned int r3 : 4;
+ unsigned int b2 : 4;
+ unsigned int d2 : 12;
+ unsigned int b4 : 4;
+ unsigned int d4 : 12;
+ } SS_RRRDRD2;
+ struct {
+ unsigned int op : 16;
+ unsigned int b1 : 4;
+ unsigned int d1 : 12;
+ unsigned int b2 : 4;
+ unsigned int d2 : 12;
+ } SSE;
+ struct {
+ unsigned int op1 : 8;
+ unsigned int r3 : 4;
+ unsigned int op2 : 4;
+ unsigned int b1 : 4;
+ unsigned int d1 : 12;
+ unsigned int b2 : 4;
+ unsigned int d2 : 12;
+ } SSF;
+ struct {
+ unsigned int op : 16;
+ unsigned int b1 : 4;
+ unsigned int d1 : 12;
+ unsigned int i2 : 16;
+ } SIL;
+ } formats;
+ union {
+ formats fmt;
+ ULong value;
+ } ovl;
+
+ vassert(sizeof(formats) == 6);
+
+ ((char *)(&ovl.value))[0] = bytes[0];
+ ((char *)(&ovl.value))[1] = bytes[1];
+ ((char *)(&ovl.value))[2] = bytes[2];
+ ((char *)(&ovl.value))[3] = bytes[3];
+ ((char *)(&ovl.value))[4] = bytes[4];
+ ((char *)(&ovl.value))[5] = bytes[5];
+ ((char *)(&ovl.value))[6] = 0x0;
+ ((char *)(&ovl.value))[7] = 0x0;
+
+ switch ((ovl.value >> 16) & 0xff00000000ffULL) {
+ case 0xe30000000002ULL: s390_format_RXY_RRRD(s390_irgen_LTG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000003ULL: /* LRAG */ goto unimplemented;
+ case 0xe30000000004ULL: s390_format_RXY_RRRD(s390_irgen_LG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000006ULL: s390_format_RXY_RRRD(s390_irgen_CVBY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000008ULL: s390_format_RXY_RRRD(s390_irgen_AG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000009ULL: s390_format_RXY_RRRD(s390_irgen_SG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000000aULL: s390_format_RXY_RRRD(s390_irgen_ALG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000000bULL: s390_format_RXY_RRRD(s390_irgen_SLG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000000cULL: s390_format_RXY_RRRD(s390_irgen_MSG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000000dULL: s390_format_RXY_RRRD(s390_irgen_DSG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000000eULL: /* CVBG */ goto unimplemented;
+ case 0xe3000000000fULL: s390_format_RXY_RRRD(s390_irgen_LRVG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000012ULL: s390_format_RXY_RRRD(s390_irgen_LT, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000013ULL: /* LRAY */ goto unimplemented;
+ case 0xe30000000014ULL: s390_format_RXY_RRRD(s390_irgen_LGF, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000015ULL: s390_format_RXY_RRRD(s390_irgen_LGH, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000016ULL: s390_format_RXY_RRRD(s390_irgen_LLGF, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000017ULL: s390_format_RXY_RRRD(s390_irgen_LLGT, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000018ULL: s390_format_RXY_RRRD(s390_irgen_AGF, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000019ULL: s390_format_RXY_RRRD(s390_irgen_SGF, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000001aULL: s390_format_RXY_RRRD(s390_irgen_ALGF, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000001bULL: s390_format_RXY_RRRD(s390_irgen_SLGF, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000001cULL: s390_format_RXY_RRRD(s390_irgen_MSGF, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000001dULL: s390_format_RXY_RRRD(s390_irgen_DSGF, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000001eULL: s390_format_RXY_RRRD(s390_irgen_LRV, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000001fULL: s390_format_RXY_RRRD(s390_irgen_LRVH, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000020ULL: s390_format_RXY_RRRD(s390_irgen_CG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000021ULL: s390_format_RXY_RRRD(s390_irgen_CLG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000024ULL: s390_format_RXY_RRRD(s390_irgen_STG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000026ULL: s390_format_RXY_RRRD(s390_irgen_CVDY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000002eULL: /* CVDG */ goto unimplemented;
+ case 0xe3000000002fULL: s390_format_RXY_RRRD(s390_irgen_STRVG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000030ULL: s390_format_RXY_RRRD(s390_irgen_CGF, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000031ULL: s390_format_RXY_RRRD(s390_irgen_CLGF, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000032ULL: s390_format_RXY_RRRD(s390_irgen_LTGF, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000034ULL: s390_format_RXY_RRRD(s390_irgen_CGH, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000036ULL: s390_format_RXY_URRD(s390_irgen_PFD, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000003eULL: s390_format_RXY_RRRD(s390_irgen_STRV, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000003fULL: s390_format_RXY_RRRD(s390_irgen_STRVH, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000046ULL: s390_format_RXY_RRRD(s390_irgen_BCTG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000050ULL: s390_format_RXY_RRRD(s390_irgen_STY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000051ULL: s390_format_RXY_RRRD(s390_irgen_MSY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000054ULL: s390_format_RXY_RRRD(s390_irgen_NY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000055ULL: s390_format_RXY_RRRD(s390_irgen_CLY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000056ULL: s390_format_RXY_RRRD(s390_irgen_OY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000057ULL: s390_format_RXY_RRRD(s390_irgen_XY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000058ULL: s390_format_RXY_RRRD(s390_irgen_LY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000059ULL: s390_format_RXY_RRRD(s390_irgen_CY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000005aULL: s390_format_RXY_RRRD(s390_irgen_AY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000005bULL: s390_format_RXY_RRRD(s390_irgen_SY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000005cULL: s390_format_RXY_RRRD(s390_irgen_MFY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000005eULL: s390_format_RXY_RRRD(s390_irgen_ALY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000005fULL: s390_format_RXY_RRRD(s390_irgen_SLY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000070ULL: s390_format_RXY_RRRD(s390_irgen_STHY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000071ULL: s390_format_RXY_RRRD(s390_irgen_LAY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000072ULL: s390_format_RXY_RRRD(s390_irgen_STCY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000073ULL: s390_format_RXY_RRRD(s390_irgen_ICY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000075ULL: s390_format_RXY_RRRD(s390_irgen_LAEY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000076ULL: s390_format_RXY_RRRD(s390_irgen_LB, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000077ULL: s390_format_RXY_RRRD(s390_irgen_LGB, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000078ULL: s390_format_RXY_RRRD(s390_irgen_LHY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000079ULL: s390_format_RXY_RRRD(s390_irgen_CHY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000007aULL: s390_format_RXY_RRRD(s390_irgen_AHY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000007bULL: s390_format_RXY_RRRD(s390_irgen_SHY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000007cULL: s390_format_RXY_RRRD(s390_irgen_MHY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000080ULL: s390_format_RXY_RRRD(s390_irgen_NG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000081ULL: s390_format_RXY_RRRD(s390_irgen_OG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000082ULL: s390_format_RXY_RRRD(s390_irgen_XG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000086ULL: s390_format_RXY_RRRD(s390_irgen_MLG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000087ULL: s390_format_RXY_RRRD(s390_irgen_DLG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000088ULL: s390_format_RXY_RRRD(s390_irgen_ALCG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000089ULL: s390_format_RXY_RRRD(s390_irgen_SLBG, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000008eULL: s390_format_RXY_RRRD(s390_irgen_STPQ, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe3000000008fULL: s390_format_RXY_RRRD(s390_irgen_LPQ, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000090ULL: s390_format_RXY_RRRD(s390_irgen_LLGC, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000091ULL: s390_format_RXY_RRRD(s390_irgen_LLGH, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000094ULL: s390_format_RXY_RRRD(s390_irgen_LLC, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000095ULL: s390_format_RXY_RRRD(s390_irgen_LLH, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000096ULL: s390_format_RXY_RRRD(s390_irgen_ML, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000097ULL: s390_format_RXY_RRRD(s390_irgen_DL, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000098ULL: s390_format_RXY_RRRD(s390_irgen_ALC, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe30000000099ULL: s390_format_RXY_RRRD(s390_irgen_SLB, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe300000000c0ULL: s390_format_RXY_RRRD(s390_irgen_LBH, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe300000000c2ULL: s390_format_RXY_RRRD(s390_irgen_LLCH, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe300000000c3ULL: s390_format_RXY_RRRD(s390_irgen_STCH, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe300000000c4ULL: s390_format_RXY_RRRD(s390_irgen_LHH, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe300000000c6ULL: s390_format_RXY_RRRD(s390_irgen_LLHH, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe300000000c7ULL: s390_format_RXY_RRRD(s390_irgen_STHH, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe300000000caULL: s390_format_RXY_RRRD(s390_irgen_LFH, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe300000000cbULL: s390_format_RXY_RRRD(s390_irgen_STFH, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe300000000cdULL: s390_format_RXY_RRRD(s390_irgen_CHF, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xe300000000cfULL: s390_format_RXY_RRRD(s390_irgen_CLHF, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xeb0000000004ULL: s390_format_RSY_RRRD(s390_irgen_LMG, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb000000000aULL: s390_format_RSY_RRRD(s390_irgen_SRAG, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb000000000bULL: s390_format_RSY_RRRD(s390_irgen_SLAG, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb000000000cULL: s390_format_RSY_RRRD(s390_irgen_SRLG, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb000000000dULL: s390_format_RSY_RRRD(s390_irgen_SLLG, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb000000000fULL: /* TRACG */ goto unimplemented;
+ case 0xeb0000000014ULL: s390_format_RSY_RRRD(s390_irgen_CSY, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb000000001cULL: s390_format_RSY_RRRD(s390_irgen_RLLG, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb000000001dULL: s390_format_RSY_RRRD(s390_irgen_RLL, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb0000000020ULL: s390_format_RSY_RURD(s390_irgen_CLMH, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb0000000021ULL: s390_format_RSY_RURD(s390_irgen_CLMY, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb0000000024ULL: s390_format_RSY_RRRD(s390_irgen_STMG, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb0000000025ULL: /* STCTG */ goto unimplemented;
+ case 0xeb0000000026ULL: s390_format_RSY_RRRD(s390_irgen_STMH, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb000000002cULL: s390_format_RSY_RURD(s390_irgen_STCMH, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb000000002dULL: s390_format_RSY_RURD(s390_irgen_STCMY, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb000000002fULL: /* LCTLG */ goto unimplemented;
+ case 0xeb0000000030ULL: s390_format_RSY_RRRD(s390_irgen_CSG, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb0000000031ULL: /* CDSY */ goto unimplemented;
+ case 0xeb000000003eULL: /* CDSG */ goto unimplemented;
+ case 0xeb0000000044ULL: s390_format_RSY_RRRD(s390_irgen_BXHG, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb0000000045ULL: s390_format_RSY_RRRD(s390_irgen_BXLEG, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb000000004cULL: /* ECAG */ goto unimplemented;
+ case 0xeb0000000051ULL: s390_format_SIY_URD(s390_irgen_TMY, ovl.fmt.SIY.i2, ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1, ovl.fmt.SIY.dh1); goto ok;
+ case 0xeb0000000052ULL: s390_format_SIY_URD(s390_irgen_MVIY, ovl.fmt.SIY.i2, ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1, ovl.fmt.SIY.dh1); goto ok;
+ case 0xeb0000000054ULL: s390_format_SIY_URD(s390_irgen_NIY, ovl.fmt.SIY.i2, ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1, ovl.fmt.SIY.dh1); goto ok;
+ case 0xeb0000000055ULL: s390_format_SIY_URD(s390_irgen_CLIY, ovl.fmt.SIY.i2, ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1, ovl.fmt.SIY.dh1); goto ok;
+ case 0xeb0000000056ULL: s390_format_SIY_URD(s390_irgen_OIY, ovl.fmt.SIY.i2, ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1, ovl.fmt.SIY.dh1); goto ok;
+ case 0xeb0000000057ULL: s390_format_SIY_URD(s390_irgen_XIY, ovl.fmt.SIY.i2, ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1, ovl.fmt.SIY.dh1); goto ok;
+ case 0xeb000000006aULL: s390_format_SIY_IRD(s390_irgen_ASI, ovl.fmt.SIY.i2, ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1, ovl.fmt.SIY.dh1); goto ok;
+ case 0xeb000000006eULL: s390_format_SIY_IRD(s390_irgen_ALSI, ovl.fmt.SIY.i2, ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1, ovl.fmt.SIY.dh1); goto ok;
+ case 0xeb000000007aULL: s390_format_SIY_IRD(s390_irgen_AGSI, ovl.fmt.SIY.i2, ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1, ovl.fmt.SIY.dh1); goto ok;
+ case 0xeb000000007eULL: s390_format_SIY_IRD(s390_irgen_ALGSI, ovl.fmt.SIY.i2, ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1, ovl.fmt.SIY.dh1); goto ok;
+ case 0xeb0000000080ULL: s390_format_RSY_RURD(s390_irgen_ICMH, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb0000000081ULL: s390_format_RSY_RURD(s390_irgen_ICMY, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb000000008eULL: /* MVCLU */ goto unimplemented;
+ case 0xeb000000008fULL: /* CLCLU */ goto unimplemented;
+ case 0xeb0000000090ULL: s390_format_RSY_RRRD(s390_irgen_STMY, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb0000000096ULL: s390_format_RSY_RRRD(s390_irgen_LMH, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb0000000098ULL: s390_format_RSY_RRRD(s390_irgen_LMY, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb000000009aULL: s390_format_RSY_AARD(s390_irgen_LAMY, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb000000009bULL: s390_format_RSY_AARD(s390_irgen_STAMY, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb00000000c0ULL: /* TP */ goto unimplemented;
+ case 0xeb00000000dcULL: s390_format_RSY_RRRD(s390_irgen_SRAK, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb00000000ddULL: s390_format_RSY_RRRD(s390_irgen_SLAK, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb00000000deULL: s390_format_RSY_RRRD(s390_irgen_SRLK, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb00000000dfULL: s390_format_RSY_RRRD(s390_irgen_SLLK, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb00000000e2ULL: /* LOCG */ goto unimplemented;
+ case 0xeb00000000e3ULL: /* STOCG */ goto unimplemented;
+ case 0xeb00000000e4ULL: s390_format_RSY_RRRD(s390_irgen_LANG, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb00000000e6ULL: s390_format_RSY_RRRD(s390_irgen_LAOG, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb00000000e7ULL: s390_format_RSY_RRRD(s390_irgen_LAXG, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb00000000e8ULL: s390_format_RSY_RRRD(s390_irgen_LAAG, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb00000000eaULL: s390_format_RSY_RRRD(s390_irgen_LAALG, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb00000000f2ULL: /* LOC */ goto unimplemented;
+ case 0xeb00000000f3ULL: /* STOC */ goto unimplemented;
+ case 0xeb00000000f4ULL: s390_format_RSY_RRRD(s390_irgen_LAN, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb00000000f6ULL: s390_format_RSY_RRRD(s390_irgen_LAO, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb00000000f7ULL: s390_format_RSY_RRRD(s390_irgen_LAX, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb00000000f8ULL: s390_format_RSY_RRRD(s390_irgen_LAA, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xeb00000000faULL: s390_format_RSY_RRRD(s390_irgen_LAAL, ovl.fmt.RSY.r1, ovl.fmt.RSY.r3, ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2, ovl.fmt.RSY.dh2); goto ok;
+ case 0xec0000000044ULL: s390_format_RIE_RRP(s390_irgen_BRXHG, ovl.fmt.RIE.r1, ovl.fmt.RIE.r3, ovl.fmt.RIE.i2); goto ok;
+ case 0xec0000000045ULL: s390_format_RIE_RRP(s390_irgen_BRXLG, ovl.fmt.RIE.r1, ovl.fmt.RIE.r3, ovl.fmt.RIE.i2); goto ok;
+ case 0xec0000000051ULL: /* RISBLG */ goto unimplemented;
+ case 0xec0000000054ULL: s390_format_RIE_RRUUU(s390_irgen_RNSBG, ovl.fmt.RIE_RRUUU.r1, ovl.fmt.RIE_RRUUU.r2, ovl.fmt.RIE_RRUUU.i3, ovl.fmt.RIE_RRUUU.i4, ovl.fmt.RIE_RRUUU.i5); goto ok;
+ case 0xec0000000055ULL: s390_format_RIE_RRUUU(s390_irgen_RISBG, ovl.fmt.RIE_RRUUU.r1, ovl.fmt.RIE_RRUUU.r2, ovl.fmt.RIE_RRUUU.i3, ovl.fmt.RIE_RRUUU.i4, ovl.fmt.RIE_RRUUU.i5); goto ok;
+ case 0xec0000000056ULL: s390_format_RIE_RRUUU(s390_irgen_ROSBG, ovl.fmt.RIE_RRUUU.r1, ovl.fmt.RIE_RRUUU.r2, ovl.fmt.RIE_RRUUU.i3, ovl.fmt.RIE_RRUUU.i4, ovl.fmt.RIE_RRUUU.i5); goto ok;
+ case 0xec0000000057ULL: s390_format_RIE_RRUUU(s390_irgen_RXSBG, ovl.fmt.RIE_RRUUU.r1, ovl.fmt.RIE_RRUUU.r2, ovl.fmt.RIE_RRUUU.i3, ovl.fmt.RIE_RRUUU.i4, ovl.fmt.RIE_RRUUU.i5); goto ok;
+ case 0xec000000005dULL: /* RISBHG */ goto unimplemented;
+ case 0xec0000000064ULL: s390_format_RIE_RRPU(s390_irgen_CGRJ, ovl.fmt.RIE_RRPU.r1, ovl.fmt.RIE_RRPU.r2, ovl.fmt.RIE_RRPU.i4, ovl.fmt.RIE_RRPU.m3); goto ok;
+ case 0xec0000000065ULL: s390_format_RIE_RRPU(s390_irgen_CLGRJ, ovl.fmt.RIE_RRPU.r1, ovl.fmt.RIE_RRPU.r2, ovl.fmt.RIE_RRPU.i4, ovl.fmt.RIE_RRPU.m3); goto ok;
+ case 0xec0000000070ULL: /* CGIT */ goto unimplemented;
+ case 0xec0000000071ULL: /* CLGIT */ goto unimplemented;
+ case 0xec0000000072ULL: /* CIT */ goto unimplemented;
+ case 0xec0000000073ULL: /* CLFIT */ goto unimplemented;
+ case 0xec0000000076ULL: s390_format_RIE_RRPU(s390_irgen_CRJ, ovl.fmt.RIE_RRPU.r1, ovl.fmt.RIE_RRPU.r2, ovl.fmt.RIE_RRPU.i4, ovl.fmt.RIE_RRPU.m3); goto ok;
+ case 0xec0000000077ULL: s390_format_RIE_RRPU(s390_irgen_CLRJ, ovl.fmt.RIE_RRPU.r1, ovl.fmt.RIE_RRPU.r2, ovl.fmt.RIE_RRPU.i4, ovl.fmt.RIE_RRPU.m3); goto ok;
+ case 0xec000000007cULL: s390_format_RIE_RUPI(s390_irgen_CGIJ, ovl.fmt.RIEv3.r1, ovl.fmt.RIEv3.m3, ovl.fmt.RIEv3.i4, ovl.fmt.RIEv3.i2); goto ok;
+ case 0xec000000007dULL: s390_format_RIE_RUPU(s390_irgen_CLGIJ, ovl.fmt.RIEv3.r1, ovl.fmt.RIEv3.m3, ovl.fmt.RIEv3.i4, ovl.fmt.RIEv3.i2); goto ok;
+ case 0xec000000007eULL: s390_format_RIE_RUPI(s390_irgen_CIJ, ovl.fmt.RIEv3.r1, ovl.fmt.RIEv3.m3, ovl.fmt.RIEv3.i4, ovl.fmt.RIEv3.i2); goto ok;
+ case 0xec000000007fULL: s390_format_RIE_RUPU(s390_irgen_CLIJ, ovl.fmt.RIEv3.r1, ovl.fmt.RIEv3.m3, ovl.fmt.RIEv3.i4, ovl.fmt.RIEv3.i2); goto ok;
+ case 0xec00000000d8ULL: s390_format_RIE_RRI0(s390_irgen_AHIK, ovl.fmt.RIE.r1, ovl.fmt.RIE.r3, ovl.fmt.RIE.i2); goto ok;
+ case 0xec00000000d9ULL: s390_format_RIE_RRI0(s390_irgen_AGHIK, ovl.fmt.RIE.r1, ovl.fmt.RIE.r3, ovl.fmt.RIE.i2); goto ok;
+ case 0xec00000000daULL: s390_format_RIE_RRI0(s390_irgen_ALHSIK, ovl.fmt.RIE.r1, ovl.fmt.RIE.r3, ovl.fmt.RIE.i2); goto ok;
+ case 0xec00000000dbULL: s390_format_RIE_RRI0(s390_irgen_ALGHSIK, ovl.fmt.RIE.r1, ovl.fmt.RIE.r3, ovl.fmt.RIE.i2); goto ok;
+ case 0xec00000000e4ULL: s390_format_RRS(s390_irgen_CGRB, ovl.fmt.RRS.r1, ovl.fmt.RRS.r2, ovl.fmt.RRS.b4, ovl.fmt.RRS.d4, ovl.fmt.RRS.m3); goto ok;
+ case 0xec00000000e5ULL: s390_format_RRS(s390_irgen_CLGRB, ovl.fmt.RRS.r1, ovl.fmt.RRS.r2, ovl.fmt.RRS.b4, ovl.fmt.RRS.d4, ovl.fmt.RRS.m3); goto ok;
+ case 0xec00000000f6ULL: s390_format_RRS(s390_irgen_CRB, ovl.fmt.RRS.r1, ovl.fmt.RRS.r2, ovl.fmt.RRS.b4, ovl.fmt.RRS.d4, ovl.fmt.RRS.m3); goto ok;
+ case 0xec00000000f7ULL: s390_format_RRS(s390_irgen_CLRB, ovl.fmt.RRS.r1, ovl.fmt.RRS.r2, ovl.fmt.RRS.b4, ovl.fmt.RRS.d4, ovl.fmt.RRS.m3); goto ok;
+ case 0xec00000000fcULL: s390_format_RIS_RURDI(s390_irgen_CGIB, ovl.fmt.RIS.r1, ovl.fmt.RIS.m3, ovl.fmt.RIS.b4, ovl.fmt.RIS.d4, ovl.fmt.RIS.i2); goto ok;
+ case 0xec00000000fdULL: s390_format_RIS_RURDU(s390_irgen_CLGIB, ovl.fmt.RIS.r1, ovl.fmt.RIS.m3, ovl.fmt.RIS.b4, ovl.fmt.RIS.d4, ovl.fmt.RIS.i2); goto ok;
+ case 0xec00000000feULL: s390_format_RIS_RURDI(s390_irgen_CIB, ovl.fmt.RIS.r1, ovl.fmt.RIS.m3, ovl.fmt.RIS.b4, ovl.fmt.RIS.d4, ovl.fmt.RIS.i2); goto ok;
+ case 0xec00000000ffULL: s390_format_RIS_RURDU(s390_irgen_CLIB, ovl.fmt.RIS.r1, ovl.fmt.RIS.m3, ovl.fmt.RIS.b4, ovl.fmt.RIS.d4, ovl.fmt.RIS.i2); goto ok;
+ case 0xed0000000004ULL: s390_format_RXE_FRRD(s390_irgen_LDEB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed0000000005ULL: s390_format_RXE_FRRD(s390_irgen_LXDB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed0000000006ULL: s390_format_RXE_FRRD(s390_irgen_LXEB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed0000000007ULL: /* MXDB */ goto unimplemented;
+ case 0xed0000000008ULL: /* KEB */ goto unimplemented;
+ case 0xed0000000009ULL: s390_format_RXE_FRRD(s390_irgen_CEB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed000000000aULL: s390_format_RXE_FRRD(s390_irgen_AEB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed000000000bULL: s390_format_RXE_FRRD(s390_irgen_SEB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed000000000cULL: /* MDEB */ goto unimplemented;
+ case 0xed000000000dULL: s390_format_RXE_FRRD(s390_irgen_DEB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed000000000eULL: s390_format_RXF_FRRDF(s390_irgen_MAEB, ovl.fmt.RXF.r3, ovl.fmt.RXF.x2, ovl.fmt.RXF.b2, ovl.fmt.RXF.d2, ovl.fmt.RXF.r1); goto ok;
+ case 0xed000000000fULL: s390_format_RXF_FRRDF(s390_irgen_MSEB, ovl.fmt.RXF.r3, ovl.fmt.RXF.x2, ovl.fmt.RXF.b2, ovl.fmt.RXF.d2, ovl.fmt.RXF.r1); goto ok;
+ case 0xed0000000010ULL: s390_format_RXE_FRRD(s390_irgen_TCEB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed0000000011ULL: s390_format_RXE_FRRD(s390_irgen_TCDB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed0000000012ULL: s390_format_RXE_FRRD(s390_irgen_TCXB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed0000000014ULL: s390_format_RXE_FRRD(s390_irgen_SQEB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed0000000015ULL: s390_format_RXE_FRRD(s390_irgen_SQDB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed0000000017ULL: s390_format_RXE_FRRD(s390_irgen_MEEB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed0000000018ULL: /* KDB */ goto unimplemented;
+ case 0xed0000000019ULL: s390_format_RXE_FRRD(s390_irgen_CDB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed000000001aULL: s390_format_RXE_FRRD(s390_irgen_ADB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed000000001bULL: s390_format_RXE_FRRD(s390_irgen_SDB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed000000001cULL: s390_format_RXE_FRRD(s390_irgen_MDB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed000000001dULL: s390_format_RXE_FRRD(s390_irgen_DDB, ovl.fmt.RXE.r1, ovl.fmt.RXE.x2, ovl.fmt.RXE.b2, ovl.fmt.RXE.d2); goto ok;
+ case 0xed000000001eULL: s390_format_RXF_FRRDF(s390_irgen_MADB, ovl.fmt.RXF.r3, ovl.fmt.RXF.x2, ovl.fmt.RXF.b2, ovl.fmt.RXF.d2, ovl.fmt.RXF.r1); goto ok;
+ case 0xed000000001fULL: s390_format_RXF_FRRDF(s390_irgen_MSDB, ovl.fmt.RXF.r3, ovl.fmt.RXF.x2, ovl.fmt.RXF.b2, ovl.fmt.RXF.d2, ovl.fmt.RXF.r1); goto ok;
+ case 0xed0000000024ULL: /* LDE */ goto unimplemented;
+ case 0xed0000000025ULL: /* LXD */ goto unimplemented;
+ case 0xed0000000026ULL: /* LXE */ goto unimplemented;
+ case 0xed000000002eULL: /* MAE */ goto unimplemented;
+ case 0xed000000002fULL: /* MSE */ goto unimplemented;
+ case 0xed0000000034ULL: /* SQE */ goto unimplemented;
+ case 0xed0000000035ULL: /* SQD */ goto unimplemented;
+ case 0xed0000000037ULL: /* MEE */ goto unimplemented;
+ case 0xed0000000038ULL: /* MAYL */ goto unimplemented;
+ case 0xed0000000039ULL: /* MYL */ goto unimplemented;
+ case 0xed000000003aULL: /* MAY */ goto unimplemented;
+ case 0xed000000003bULL: /* MY */ goto unimplemented;
+ case 0xed000000003cULL: /* MAYH */ goto unimplemented;
+ case 0xed000000003dULL: /* MYH */ goto unimplemented;
+ case 0xed000000003eULL: /* MAD */ goto unimplemented;
+ case 0xed000000003fULL: /* MSD */ goto unimplemented;
+ case 0xed0000000040ULL: /* SLDT */ goto unimplemented;
+ case 0xed0000000041ULL: /* SRDT */ goto unimplemented;
+ case 0xed0000000048ULL: /* SLXT */ goto unimplemented;
+ case 0xed0000000049ULL: /* SRXT */ goto unimplemented;
+ case 0xed0000000050ULL: /* TDCET */ goto unimplemented;
+ case 0xed0000000051ULL: /* TDGET */ goto unimplemented;
+ case 0xed0000000054ULL: /* TDCDT */ goto unimplemented;
+ case 0xed0000000055ULL: /* TDGDT */ goto unimplemented;
+ case 0xed0000000058ULL: /* TDCXT */ goto unimplemented;
+ case 0xed0000000059ULL: /* TDGXT */ goto unimplemented;
+ case 0xed0000000064ULL: s390_format_RXY_FRRD(s390_irgen_LEY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xed0000000065ULL: s390_format_RXY_FRRD(s390_irgen_LDY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xed0000000066ULL: s390_format_RXY_FRRD(s390_irgen_STEY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ case 0xed0000000067ULL: s390_format_RXY_FRRD(s390_irgen_STDY, ovl.fmt.RXY.r1, ovl.fmt.RXY.x2, ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2, ovl.fmt.RXY.dh2); goto ok;
+ }
+
+ switch (((ovl.value >> 16) & 0xff0f00000000ULL) >> 32) {
+ case 0xc000ULL: s390_format_RIL_RP(s390_irgen_LARL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc001ULL: s390_format_RIL_RI(s390_irgen_LGFI, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc004ULL: s390_format_RIL(s390_irgen_BRCL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc005ULL: s390_format_RIL_RP(s390_irgen_BRASL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc006ULL: s390_format_RIL_RU(s390_irgen_XIHF, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc007ULL: s390_format_RIL_RU(s390_irgen_XILF, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc008ULL: s390_format_RIL_RU(s390_irgen_IIHF, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc009ULL: s390_format_RIL_RU(s390_irgen_IILF, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc00aULL: s390_format_RIL_RU(s390_irgen_NIHF, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc00bULL: s390_format_RIL_RU(s390_irgen_NILF, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc00cULL: s390_format_RIL_RU(s390_irgen_OIHF, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc00dULL: s390_format_RIL_RU(s390_irgen_OILF, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc00eULL: s390_format_RIL_RU(s390_irgen_LLIHF, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc00fULL: s390_format_RIL_RU(s390_irgen_LLILF, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc200ULL: s390_format_RIL_RI(s390_irgen_MSGFI, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc201ULL: s390_format_RIL_RI(s390_irgen_MSFI, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc204ULL: s390_format_RIL_RU(s390_irgen_SLGFI, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc205ULL: s390_format_RIL_RU(s390_irgen_SLFI, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc208ULL: s390_format_RIL_RI(s390_irgen_AGFI, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc209ULL: s390_format_RIL_RI(s390_irgen_AFI, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc20aULL: s390_format_RIL_RU(s390_irgen_ALGFI, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc20bULL: s390_format_RIL_RU(s390_irgen_ALFI, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc20cULL: s390_format_RIL_RI(s390_irgen_CGFI, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc20dULL: s390_format_RIL_RI(s390_irgen_CFI, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc20eULL: s390_format_RIL_RU(s390_irgen_CLGFI, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc20fULL: s390_format_RIL_RU(s390_irgen_CLFI, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc402ULL: s390_format_RIL_RP(s390_irgen_LLHRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc404ULL: s390_format_RIL_RP(s390_irgen_LGHRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc405ULL: s390_format_RIL_RP(s390_irgen_LHRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc406ULL: s390_format_RIL_RP(s390_irgen_LLGHRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc407ULL: s390_format_RIL_RP(s390_irgen_STHRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc408ULL: s390_format_RIL_RP(s390_irgen_LGRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc40bULL: s390_format_RIL_RP(s390_irgen_STGRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc40cULL: s390_format_RIL_RP(s390_irgen_LGFRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc40dULL: s390_format_RIL_RP(s390_irgen_LRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc40eULL: s390_format_RIL_RP(s390_irgen_LLGFRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc40fULL: s390_format_RIL_RP(s390_irgen_STRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc600ULL: s390_format_RIL_RP(s390_irgen_EXRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc602ULL: s390_format_RIL_UP(s390_irgen_PFDRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc604ULL: s390_format_RIL_RP(s390_irgen_CGHRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc605ULL: s390_format_RIL_RP(s390_irgen_CHRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc606ULL: s390_format_RIL_RP(s390_irgen_CLGHRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc607ULL: s390_format_RIL_RP(s390_irgen_CLHRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc608ULL: s390_format_RIL_RP(s390_irgen_CGRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc60aULL: s390_format_RIL_RP(s390_irgen_CLGRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc60cULL: s390_format_RIL_RP(s390_irgen_CGFRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc60dULL: s390_format_RIL_RP(s390_irgen_CRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc60eULL: s390_format_RIL_RP(s390_irgen_CLGFRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc60fULL: s390_format_RIL_RP(s390_irgen_CLRL, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xc800ULL: /* MVCOS */ goto unimplemented;
+ case 0xc801ULL: /* ECTG */ goto unimplemented;
+ case 0xc802ULL: /* CSST */ goto unimplemented;
+ case 0xc804ULL: /* LPD */ goto unimplemented;
+ case 0xc805ULL: /* LPDG */ goto unimplemented;
+ case 0xcc06ULL: /* BRCTH */ goto unimplemented;
+ case 0xcc08ULL: s390_format_RIL_RI(s390_irgen_AIH, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xcc0aULL: s390_format_RIL_RI(s390_irgen_ALSIH, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xcc0bULL: s390_format_RIL_RI(s390_irgen_ALSIHN, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xcc0dULL: s390_format_RIL_RI(s390_irgen_CIH, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ case 0xcc0fULL: s390_format_RIL_RU(s390_irgen_CLIH, ovl.fmt.RIL.r1, ovl.fmt.RIL.i2); goto ok;
+ }
+
+ switch (((ovl.value >> 16) & 0xff0000000000ULL) >> 40) {
+ case 0xd0ULL: /* TRTR */ goto unimplemented;
+ case 0xd1ULL: /* MVN */ goto unimplemented;
+ case 0xd2ULL: s390_format_SS_L0RDRD(s390_irgen_MVC, ovl.fmt.SS.l, ovl.fmt.SS.b1, ovl.fmt.SS.d1, ovl.fmt.SS.b2, ovl.fmt.SS.d2); goto ok;
+ case 0xd3ULL: /* MVZ */ goto unimplemented;
+ case 0xd4ULL: s390_format_SS_L0RDRD(s390_irgen_NC, ovl.fmt.SS.l, ovl.fmt.SS.b1, ovl.fmt.SS.d1, ovl.fmt.SS.b2, ovl.fmt.SS.d2); goto ok;
+ case 0xd5ULL: s390_format_SS_L0RDRD(s390_irgen_CLC, ovl.fmt.SS.l, ovl.fmt.SS.b1, ovl.fmt.SS.d1, ovl.fmt.SS.b2, ovl.fmt.SS.d2); goto ok;
+ case 0xd6ULL: s390_format_SS_L0RDRD(s390_irgen_OC, ovl.fmt.SS.l, ovl.fmt.SS.b1, ovl.fmt.SS.d1, ovl.fmt.SS.b2, ovl.fmt.SS.d2); goto ok;
+ case 0xd7ULL: s390_format_SS_L0RDRD(s390_irgen_XC, ovl.fmt.SS.l, ovl.fmt.SS.b1, ovl.fmt.SS.d1, ovl.fmt.SS.b2, ovl.fmt.SS.d2); goto ok;
+ case 0xd9ULL: /* MVCK */ goto unimplemented;
+ case 0xdaULL: /* MVCP */ goto unimplemented;
+ case 0xdbULL: /* MVCS */ goto unimplemented;
+ case 0xdcULL: /* TR */ goto unimplemented;
+ case 0xddULL: /* TRT */ goto unimplemented;
+ case 0xdeULL: /* ED */ goto unimplemented;
+ case 0xdfULL: /* EDMK */ goto unimplemented;
+ case 0xe1ULL: /* PKU */ goto unimplemented;
+ case 0xe2ULL: /* UNPKU */ goto unimplemented;
+ case 0xe8ULL: /* MVCIN */ goto unimplemented;
+ case 0xe9ULL: /* PKA */ goto unimplemented;
+ case 0xeaULL: /* UNPKA */ goto unimplemented;
+ case 0xeeULL: /* PLO */ goto unimplemented;
+ case 0xefULL: /* LMD */ goto unimplemented;
+ case 0xf0ULL: /* SRP */ goto unimplemented;
+ case 0xf1ULL: /* MVO */ goto unimplemented;
+ case 0xf2ULL: /* PACK */ goto unimplemented;
+ case 0xf3ULL: /* UNPK */ goto unimplemented;
+ case 0xf8ULL: /* ZAP */ goto unimplemented;
+ case 0xf9ULL: /* CP */ goto unimplemented;
+ case 0xfaULL: /* AP */ goto unimplemented;
+ case 0xfbULL: /* SP */ goto unimplemented;
+ case 0xfcULL: /* MP */ goto unimplemented;
+ case 0xfdULL: /* DP */ goto unimplemented;
+ }
+
+ switch (((ovl.value >> 16) & 0xffff00000000ULL) >> 32) {
+ case 0xe500ULL: /* LASP */ goto unimplemented;
+ case 0xe501ULL: /* TPROT */ goto unimplemented;
+ case 0xe502ULL: /* STRAG */ goto unimplemented;
+ case 0xe50eULL: /* MVCSK */ goto unimplemented;
+ case 0xe50fULL: /* MVCDK */ goto unimplemented;
+ case 0xe544ULL: s390_format_SIL_RDI(s390_irgen_MVHHI, ovl.fmt.SIL.b1, ovl.fmt.SIL.d1, ovl.fmt.SIL.i2); goto ok;
+ case 0xe548ULL: s390_format_SIL_RDI(s390_irgen_MVGHI, ovl.fmt.SIL.b1, ovl.fmt.SIL.d1, ovl.fmt.SIL.i2); goto ok;
+ case 0xe54cULL: s390_format_SIL_RDI(s390_irgen_MVHI, ovl.fmt.SIL.b1, ovl.fmt.SIL.d1, ovl.fmt.SIL.i2); goto ok;
+ case 0xe554ULL: s390_format_SIL_RDI(s390_irgen_CHHSI, ovl.fmt.SIL.b1, ovl.fmt.SIL.d1, ovl.fmt.SIL.i2); goto ok;
+ case 0xe555ULL: s390_format_SIL_RDU(s390_irgen_CLHHSI, ovl.fmt.SIL.b1, ovl.fmt.SIL.d1, ovl.fmt.SIL.i2); goto ok;
+ case 0xe558ULL: s390_format_SIL_RDI(s390_irgen_CGHSI, ovl.fmt.SIL.b1, ovl.fmt.SIL.d1, ovl.fmt.SIL.i2); goto ok;
+ case 0xe559ULL: s390_format_SIL_RDU(s390_irgen_CLGHSI, ovl.fmt.SIL.b1, ovl.fmt.SIL.d1, ovl.fmt.SIL.i2); goto ok;
+ case 0xe55cULL: s390_format_SIL_RDI(s390_irgen_CHSI, ovl.fmt.SIL.b1, ovl.fmt.SIL.d1, ovl.fmt.SIL.i2); goto ok;
+ case 0xe55dULL: s390_format_SIL_RDU(s390_irgen_CLFHSI, ovl.fmt.SIL.b1, ovl.fmt.SIL.d1, ovl.fmt.SIL.i2); goto ok;
+ }
+
+ return S390_DECODE_UNKNOWN_INSN;
+
+ok:
+ return S390_DECODE_OK;
+
+unimplemented:
+ return S390_DECODE_UNIMPLEMENTED_INSN;
+}
+
+/* Handle "special" instructions. */
+static s390_decode_t
+s390_decode_special_and_irgen(UChar *bytes)
+{
+ s390_decode_t status = S390_DECODE_OK;
+
+ /* Got a "Special" instruction preamble. Which one is it? */
+ if (bytes[0] == 0x18 && bytes[1] == 0x22 /* lr %r2, %r2 */) {
+ s390_irgen_client_request();
+ } else if (bytes[0] == 0x18 && bytes[1] == 0x33 /* lr %r3, %r3 */) {
+ s390_irgen_guest_NRADDR();
+ } else if (bytes[0] == 0x18 && bytes[1] == 0x44 /* lr %r4, %r4 */) {
+ s390_irgen_noredir();
+ } else {
+ /* We don't know what it is. */
+ return S390_DECODE_UNKNOWN_SPECIAL_INSN;
+ }
+
+ s390_dis_res->len = S390_SPECIAL_OP_PREAMBLE_SIZE + S390_SPECIAL_OP_SIZE;
+
+ return status;
+}
+
+
+/* Function returns # bytes that were decoded or 0 in case of failure */
+UInt
+s390_decode_and_irgen(UChar *bytes, UInt insn_length, DisResult *dres)
+{
+ s390_decode_t status;
+
+ s390_dis_res = dres;
+
+ /* Spot the 8-byte preamble: 18ff lr r15,r15
+ 1811 lr r1,r1
+ 1822 lr r2,r2
+ 1833 lr r3,r3 */
+ if (bytes[ 0] == 0x18 && bytes[ 1] == 0xff && bytes[ 2] == 0x18 &&
+ bytes[ 3] == 0x11 && bytes[ 4] == 0x18 && bytes[ 5] == 0x22 &&
+ bytes[ 6] == 0x18 && bytes[ 7] == 0x33) {
+
+ /* Handle special instruction that follows that preamble. */
+ if (0) vex_printf("special function handling...\n");
+ bytes += S390_SPECIAL_OP_PREAMBLE_SIZE;
+ status = s390_decode_special_and_irgen(bytes);
+ insn_length = S390_SPECIAL_OP_SIZE;
+ } else {
+ /* Handle normal instructions. */
+ switch (insn_length) {
+ case 2:
+ status = s390_decode_2byte_and_irgen(bytes);
+ break;
+
+ case 4:
+ status = s390_decode_4byte_and_irgen(bytes);
+ break;
+
+ case 6:
+ status = s390_decode_6byte_and_irgen(bytes);
+ break;
+
+ default:
+ status = S390_DECODE_ERROR;
+ break;
+ }
+ }
+ /* next instruction is execute, stop here */
+ if (irsb->next == NULL && (*(char *)(HWord) guest_IA_next_instr == 0x44)) {
+ irsb->next = IRExpr_Const(IRConst_U64(guest_IA_next_instr));
+ s390_dis_res->whatNext = Dis_StopHere;
+ }
+
+ if (status == S390_DECODE_OK) return insn_length; /* OK */
+
+ /* Decoding failed somehow */
+ vex_printf("vex s390->IR: ");
+ switch (status) {
+ case S390_DECODE_UNKNOWN_INSN:
+ vex_printf("unknown insn: ");
+ break;
+
+ case S390_DECODE_UNIMPLEMENTED_INSN:
+ vex_printf("unimplemented insn: ");
+ break;
+
+ case S390_DECODE_UNKNOWN_SPECIAL_INSN:
+ vex_printf("unimplemented special insn: ");
+ break;
+
+ default:
+ case S390_DECODE_ERROR:
+ vex_printf("decoding error: ");
+ break;
+ }
+
+ vex_printf("%02x%02x", bytes[0], bytes[1]);
+ if (insn_length > 2) {
+ vex_printf(" %02x%02x", bytes[2], bytes[3]);
+ }
+ if (insn_length > 4) {
+ vex_printf(" %02x%02x", bytes[4], bytes[5]);
+ }
+ vex_printf("\n");
+
+ return 0; /* Failed */
+}
+
+/*---------------------------------------------------------------*/
+/*--- end guest_s390_decoder.c ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/guest_s390_defs.h
+++ valgrind/VEX/priv/guest_s390_defs.h
@@ -0,0 +1,76 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin guest_s390_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+/* This file declares the symbols that guest-s390 exports to
+ vex_main.c and nothing else. */
+
+#ifndef __VEX_GUEST_S390_DEFS_H
+#define __VEX_GUEST_S390_DEFS_H
+
+
+/* Convert one s390 insn to IR. See the type DisOneInstrFn in
+ bb_to_IR.h. */
+extern
+DisResult disInstr_S390 ( IRSB* irbb,
+ Bool put_IP,
+ Bool (*resteerOkFn) ( void*, Addr64 ),
+ Bool resteerCisOk,
+ void* callback_opaque,
+ UChar* guest_code,
+ Long delta,
+ Addr64 guest_IP,
+ VexArch guest_arch,
+ VexArchInfo* archinfo,
+ VexAbiInfo* abiinfo,
+ Bool host_bigendian );
+
+/* Used by the optimiser to specialise calls to helpers. */
+extern
+IRExpr* guest_s390x_spechelper ( HChar *function_name,
+ IRExpr **args,
+ IRStmt **precedingStmts,
+ Int n_precedingStmts);
+
+
+/* Describes to the optimser which part of the guest state require
+ precise memory exceptions. This is logically part of the guest
+ state description. */
+extern
+Bool guest_s390x_state_requires_precise_mem_exns ( Int, Int );
+
+extern
+VexGuestLayout s390xGuest_layout;
+
+#endif /* __VEX_GUEST_S390_DEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end guest_s390_defs.h ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/guest_s390_helpers.c
+++ valgrind/VEX/priv/guest_s390_helpers.c
@@ -0,0 +1,240 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin guest_s390_helpers.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_emwarn.h"
+#include "libvex_guest_s390x.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+
+#include "main_util.h"
+#include "guest_generic_bb_to_IR.h"
+#include "guest_s390_defs.h"
+#include "guest_s390_priv.h"
+
+
+void
+LibVEX_GuestS390X_initialise(VexGuestS390XState *state)
+{
+/*------------------------------------------------------------*/
+/*--- Initialise ar registers ---*/
+/*------------------------------------------------------------*/
+
+ state->guest_a0 = 0;
+ state->guest_a1 = 0;
+ state->guest_a2 = 0;
+ state->guest_a3 = 0;
+ state->guest_a4 = 0;
+ state->guest_a5 = 0;
+ state->guest_a6 = 0;
+ state->guest_a7 = 0;
+ state->guest_a8 = 0;
+ state->guest_a9 = 0;
+ state->guest_a10 = 0;
+ state->guest_a11 = 0;
+ state->guest_a12 = 0;
+ state->guest_a13 = 0;
+ state->guest_a14 = 0;
+ state->guest_a15 = 0;
+
+/*------------------------------------------------------------*/
+/*--- Initialise fpr registers ---*/
+/*------------------------------------------------------------*/
+
+ state->guest_f0 = 0;
+ state->guest_f1 = 0;
+ state->guest_f2 = 0;
+ state->guest_f3 = 0;
+ state->guest_f4 = 0;
+ state->guest_f5 = 0;
+ state->guest_f6 = 0;
+ state->guest_f7 = 0;
+ state->guest_f8 = 0;
+ state->guest_f9 = 0;
+ state->guest_f10 = 0;
+ state->guest_f11 = 0;
+ state->guest_f12 = 0;
+ state->guest_f13 = 0;
+ state->guest_f14 = 0;
+ state->guest_f15 = 0;
+
+/*------------------------------------------------------------*/
+/*--- Initialise gpr registers ---*/
+/*------------------------------------------------------------*/
+
+ state->guest_r0 = 0;
+ state->guest_r1 = 0;
+ state->guest_r2 = 0;
+ state->guest_r3 = 0;
+ state->guest_r4 = 0;
+ state->guest_r5 = 0;
+ state->guest_r6 = 0;
+ state->guest_r7 = 0;
+ state->guest_r8 = 0;
+ state->guest_r9 = 0;
+ state->guest_r10 = 0;
+ state->guest_r11 = 0;
+ state->guest_r12 = 0;
+ state->guest_r13 = 0;
+ state->guest_r14 = 0;
+ state->guest_r15 = 0;
+
+/*------------------------------------------------------------*/
+/*--- Initialise S390 miscellaneous registers ---*/
+/*------------------------------------------------------------*/
+
+ state->guest_counter = 0;
+ state->guest_fpc = 0;
+ state->guest_IA = 0;
+
+/*------------------------------------------------------------*/
+/*--- Initialise S390 pseudo registers ---*/
+/*------------------------------------------------------------*/
+
+ state->guest_SYSNO = 0;
+
+/*------------------------------------------------------------*/
+/*--- Initialise generic pseudo registers ---*/
+/*------------------------------------------------------------*/
+
+ state->guest_NRADDR = 0;
+ state->guest_TISTART = 0;
+ state->guest_TILEN = 0;
+ state->guest_IP_AT_SYSCALL = 0;
+ state->guest_EMWARN = EmWarn_NONE;
+
+/*------------------------------------------------------------*/
+/*--- Initialise thunk ---*/
+/*------------------------------------------------------------*/
+
+ state->guest_CC_OP = 0;
+ state->guest_CC_DEP1 = 0;
+ state->guest_CC_DEP2 = 0;
+ state->guest_CC_NDEP = 0;
+}
+
+
+/* Figure out if any part of the guest state contained in minoff
+ .. maxoff requires precise memory exceptions. If in doubt return
+ True (but this is generates significantly slower code). */
+Bool
+guest_s390x_state_requires_precise_mem_exns(Int minoff, Int maxoff)
+{
+ /* fixs390: not sure whether all of these are needed */
+ Int lr_min = offsetof(VexGuestS390XState, guest_LR);
+ Int lr_max = lr_min + 8 - 1;
+ Int sp_min = offsetof(VexGuestS390XState, guest_SP);
+ Int sp_max = sp_min + 8 - 1;
+ Int fp_min = offsetof(VexGuestS390XState, guest_FP);
+ Int fp_max = fp_min + 8 - 1;
+ Int ia_min = offsetof(VexGuestS390XState, guest_IA);
+ Int ia_max = ia_min + 8 - 1;
+
+ if (maxoff < lr_min || minoff > lr_max) {
+ /* No overlap with LR */
+ } else {
+ return True;
+ }
+
+ if (maxoff < sp_min || minoff > sp_max) {
+ /* No overlap with SP */
+ } else {
+ return True;
+ }
+
+ if (maxoff < fp_min || minoff > fp_max) {
+ /* No overlap with FP */
+ } else {
+ return True;
+ }
+
+ if (maxoff < ia_min || minoff > ia_max) {
+ /* No overlap with IA */
+ } else {
+ return True;
+ }
+
+ return False;
+}
+
+
+#define ALWAYSDEFD(field) \
+ { offsetof(VexGuestS390XState, field), \
+ (sizeof ((VexGuestS390XState*)0)->field) }
+
+VexGuestLayout s390xGuest_layout = {
+
+ /* Total size of the guest state, in bytes. */
+ .total_sizeB = sizeof(VexGuestS390XState),
+
+ /* Describe the stack pointer. */
+ .offset_SP = offsetof(VexGuestS390XState, guest_SP),
+ .sizeof_SP = 8,
+
+ /* Describe the frame pointer. */
+ .offset_FP = offsetof(VexGuestS390XState, guest_FP),
+ .sizeof_FP = 8,
+
+ /* Describe the instruction pointer. */
+ .offset_IP = offsetof(VexGuestS390XState, guest_IA),
+ .sizeof_IP = 8,
+
+ /* Describe any sections to be regarded by Memcheck as
+ 'always-defined'. */
+ .n_alwaysDefd = 9,
+
+ /* Flags thunk: OP and NDEP are always defined, whereas DEP1
+ and DEP2 have to be tracked. See detailed comment in
+ gdefs.h on meaning of thunk fields. */
+ .alwaysDefd = {
+ /* 0 */ ALWAYSDEFD(guest_CC_OP), /* generic */
+ /* 1 */ ALWAYSDEFD(guest_CC_NDEP), /* generic */
+ /* 2 */ ALWAYSDEFD(guest_EMWARN), /* generic */
+ /* 3 */ ALWAYSDEFD(guest_TISTART), /* generic */
+ /* 4 */ ALWAYSDEFD(guest_TILEN), /* generic */
+ /* 5 */ ALWAYSDEFD(guest_IP_AT_SYSCALL), /* generic */
+ /* 6 */ ALWAYSDEFD(guest_IA), /* control reg */
+ /* 7 */ ALWAYSDEFD(guest_fpc), /* control reg */
+ /* 8 */ ALWAYSDEFD(guest_counter), /* internal usage register */
+ }
+};
+
+/*------------------------------------------------------------*/
+/*--- Dirty helper for EXecute ---*/
+/*------------------------------------------------------------*/
+void
+s390x_dirtyhelper_EX(ULong torun)
+{
+ last_execute_target = torun;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end guest_s390_helpers.c ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/guest_s390_irgen.c
+++ valgrind/VEX/priv/guest_s390_irgen.c
@@ -0,0 +1,9772 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin guest_s390_irgen.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "main_util.h" // vassert
+#include "main_globals.h" // vex_traceflags
+#include "libvex_guest_s390x.h" // VexGuestS390XState
+#include "libvex_guest_offsets.h" // OFFSET_s390x_SYSNO
+#include "host_s390_disasm.h"
+#include "host_s390_insn.h" // S390_ROUND_xyzzy
+#include "guest_s390_priv.h"
+#include "guest_s390_cc.h"
+
+#undef likely
+#undef unlikely
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+
+/*------------------------------------------------------------*/
+/*--- Helpers for constructing IR. ---*/
+/*------------------------------------------------------------*/
+
+/* Sign extend a value with the given number of bits. This is a
+ macro because it allows us to overload the type of the value.
+ Note that VALUE must have a signed type! */
+#undef sign_extend
+#define sign_extend(value,num_bits) \
+(((value) << (sizeof(__typeof__(value)) * 8 - (num_bits))) >> (sizeof(__typeof__(value)) * 8 - (num_bits)))
+
+
+/* Add a statement to the current irsb. */
+static __inline__ void
+stmt(IRStmt *st)
+{
+ addStmtToIRSB(irsb, st);
+}
+
+/* Allocate a new temporary of the given type. */
+static __inline__ IRTemp
+newTemp(IRType type)
+{
+ vassert(isPlausibleIRType(type));
+
+ return newIRTemp(irsb->tyenv, type);
+}
+
+/* Create an expression node for a temporary */
+static __inline__ IRExpr *
+mkexpr(IRTemp tmp)
+{
+ return IRExpr_RdTmp(tmp);
+}
+
+/* Add a statement that assigns to a temporary */
+static __inline__ void
+assign(IRTemp dst, IRExpr *expr)
+{
+ stmt(IRStmt_WrTmp(dst, expr));
+}
+
+/* Create a temporary of the given type and assign the expression to it */
+static __inline__ IRTemp
+mktemp(IRType type, IRExpr *expr)
+{
+ IRTemp temp = newTemp(type);
+
+ assign(temp, expr);
+
+ return temp;
+}
+
+/* Create a unary expression */
+static __inline__ IRExpr *
+unop(IROp kind, IRExpr *op)
+{
+ return IRExpr_Unop(kind, op);
+}
+
+/* Create a binary expression */
+static __inline__ IRExpr *
+binop(IROp kind, IRExpr *op1, IRExpr *op2)
+{
+ return IRExpr_Binop(kind, op1, op2);
+}
+
+/* Create a ternary expression */
+static __inline__ IRExpr *
+triop(IROp kind, IRExpr *op1, IRExpr *op2, IRExpr *op3)
+{
+ return IRExpr_Triop(kind, op1, op2, op3);
+}
+
+/* Create a quaternary expression */
+static __inline__ IRExpr *
+qop(IROp kind, IRExpr *op1, IRExpr *op2, IRExpr *op3, IRExpr *op4)
+{
+ return IRExpr_Qop(kind, op1, op2, op3, op4);
+}
+
+/* Create an expression node for an 8-bit integer constant */
+static __inline__ IRExpr *
+mkU8(UInt value)
+{
+ vassert(value < 256);
+
+ return IRExpr_Const(IRConst_U8((UChar)value));
+}
+
+/* Create an expression node for a 16-bit integer constant */
+static __inline__ IRExpr *
+mkU16(UInt value)
+{
+ vassert(value < 65536);
+
+ return IRExpr_Const(IRConst_U16((UShort)value));
+}
+
+/* Create an expression node for a 32-bit integer constant */
+static __inline__ IRExpr *
+mkU32(UInt value)
+{
+ return IRExpr_Const(IRConst_U32(value));
+}
+
+/* Create an expression node for a 64-bit integer constant */
+static __inline__ IRExpr *
+mkU64(ULong value)
+{
+ return IRExpr_Const(IRConst_U64(value));
+}
+
+/* Create an expression node for a 32-bit floating point constant
+ whose value is given by a bit pattern. */
+static __inline__ IRExpr *
+mkF32i(UInt value)
+{
+ return IRExpr_Const(IRConst_F32i(value));
+}
+
+/* Create an expression node for a 32-bit floating point constant
+ whose value is given by a bit pattern. */
+static __inline__ IRExpr *
+mkF64i(ULong value)
+{
+ return IRExpr_Const(IRConst_F64i(value));
+}
+
+/* Little helper function for my sanity. ITE = if-then-else */
+static IRExpr *
+mkite(IRExpr *condition, IRExpr *iftrue, IRExpr *iffalse)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, condition) == Ity_I1);
+
+ return IRExpr_Mux0X(unop(Iop_1Uto8, condition), iffalse, iftrue);
+}
+
+/* Add a statement that stores DATA at ADDR. This is a big-endian machine. */
+static void __inline__
+store(IRExpr *addr, IRExpr *data)
+{
+ stmt(IRStmt_Store(Iend_BE, addr, data));
+}
+
+/* Create an expression that loads a TYPE sized value from ADDR.
+ This is a big-endian machine. */
+static __inline__ IRExpr *
+load(IRType type, IRExpr *addr)
+{
+ return IRExpr_Load(Iend_BE, type, addr);
+}
+
+/* Function call */
+static void
+call_function(IRExpr *callee_address)
+{
+ irsb->next = callee_address;
+ irsb->jumpkind = Ijk_Call;
+
+ s390_dis_res->whatNext = Dis_StopHere;
+}
+
+/* Function return sequence */
+static void
+return_from_function(IRExpr *return_address)
+{
+ irsb->next = return_address;
+ irsb->jumpkind = Ijk_Ret;
+
+ s390_dis_res->whatNext = Dis_StopHere;
+}
+
+/* A conditional branch whose target is not known at instrumentation time.
+
+ if (condition) goto computed_target;
+
+ Needs to be represented as:
+
+ if (! condition) goto next_instruction;
+ goto computed_target;
+
+ This inversion is being handled at code generation time. So we just
+ take the condition here as is.
+*/
+static void
+if_not_condition_goto_computed(IRExpr *condition, IRExpr *target)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, condition) == Ity_I1);
+
+ stmt(IRStmt_Exit(condition, Ijk_Boring, IRConst_U64(guest_IA_next_instr)));
+
+ irsb->next = target;
+ irsb->jumpkind = Ijk_Boring;
+
+ s390_dis_res->whatNext = Dis_StopHere;
+}
+
+/* A conditional branch whose target is known at instrumentation time. */
+static void
+if_condition_goto(IRExpr *condition, Addr64 target)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, condition) == Ity_I1);
+
+ stmt(IRStmt_Exit(condition, Ijk_Boring, IRConst_U64(target)));
+ s390_dis_res->whatNext = Dis_Continue;
+}
+
+/* An unconditional branch. Target may or may not be known at instrumentation
+ time. */
+static void
+always_goto(IRExpr *target)
+{
+ irsb->next = target;
+ irsb->jumpkind = Ijk_Boring;
+
+ s390_dis_res->whatNext = Dis_StopHere;
+}
+
+/* A system call */
+static void
+system_call(IRExpr *sysno)
+{
+ /* Store the system call number in the pseudo register. */
+ stmt(IRStmt_Put(OFFSET_s390x_SYSNO, sysno));
+
+ /* Store the current IA into guest_IP_AT_SYSCALL. libvex_ir.h says so.
+ fixs390: As we do not use it, can we get rid of it ?? */
+ stmt(IRStmt_Put(OFFSET_s390x_IP_AT_SYSCALL, mkU64(guest_IA_curr_instr)));
+
+ /* It's important that all ArchRegs carry their up-to-date value
+ at this point. So we declare an end-of-block here, which
+ forces any TempRegs caching ArchRegs to be flushed. */
+ irsb->next = mkU64(guest_IA_next_instr);
+
+ irsb->jumpkind = Ijk_Sys_syscall;
+
+ s390_dis_res->whatNext = Dis_StopHere;
+}
+
+/* Encode the s390 rounding mode as it appears in the m3/m4 fields of certain
+ instructions to VEX's IRRoundingMode. */
+static IRRoundingMode
+encode_rounding_mode(UChar mode)
+{
+ switch (mode) {
+ case S390_ROUND_CURRENT: return Irrm_CURRENT;
+ case S390_ROUND_NEAREST_AWAY: return Irrm_NEAREST_AWAY;
+ case S390_ROUND_NEAREST_EVEN: return Irrm_NEAREST;
+ case S390_ROUND_ZERO: return Irrm_ZERO;
+ case S390_ROUND_POSINF: return Irrm_PosINF;
+ case S390_ROUND_NEGINF: return Irrm_NegINF;
+ }
+ vpanic("encode_rounding_mode");
+}
+
+static __inline__ IRExpr *get_fpr_dw0(UInt);
+static __inline__ void put_fpr_dw0(UInt, IRExpr *);
+
+/* Read a floating point register pair and combine their contents into a
+ 128-bit value */
+static IRExpr *
+get_fpr_pair(UInt archreg)
+{
+ IRExpr *high = get_fpr_dw0(archreg);
+ IRExpr *low = get_fpr_dw0(archreg + 2);
+
+ return binop(Iop_F64HLto128, high, low);
+}
+
+/* Write a 128-bit floating point value into a register pair. */
+static void
+put_fpr_pair(UInt archreg, IRExpr *expr)
+{
+ IRExpr *high = unop(Iop_F128HIto64, expr);
+ IRExpr *low = unop(Iop_F128to64, expr);
+
+ put_fpr_dw0(archreg, high);
+ put_fpr_dw0(archreg + 2, low);
+}
+
+/*------------------------------------------------------------*/
+/*--- Guest register access ---*/
+/*------------------------------------------------------------*/
+
+
+/*------------------------------------------------------------*/
+/*--- ar registers ---*/
+/*------------------------------------------------------------*/
+
+/* Return the guest state offset of a ar register. */
+static UInt
+ar_offset(UInt archreg)
+{
+ static const UInt offset[16] = {
+ offsetof(VexGuestS390XState, guest_a0),
+ offsetof(VexGuestS390XState, guest_a1),
+ offsetof(VexGuestS390XState, guest_a2),
+ offsetof(VexGuestS390XState, guest_a3),
+ offsetof(VexGuestS390XState, guest_a4),
+ offsetof(VexGuestS390XState, guest_a5),
+ offsetof(VexGuestS390XState, guest_a6),
+ offsetof(VexGuestS390XState, guest_a7),
+ offsetof(VexGuestS390XState, guest_a8),
+ offsetof(VexGuestS390XState, guest_a9),
+ offsetof(VexGuestS390XState, guest_a10),
+ offsetof(VexGuestS390XState, guest_a11),
+ offsetof(VexGuestS390XState, guest_a12),
+ offsetof(VexGuestS390XState, guest_a13),
+ offsetof(VexGuestS390XState, guest_a14),
+ offsetof(VexGuestS390XState, guest_a15),
+ };
+
+ vassert(archreg < 16);
+
+ return offset[archreg];
+}
+
+
+/* Return the guest state offset of word #0 of a ar register. */
+static __inline__ UInt
+ar_w0_offset(UInt archreg)
+{
+ return ar_offset(archreg) + 0;
+}
+
+/* Write word #0 of a ar to the guest state. */
+static __inline__ void
+put_ar_w0(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I32);
+
+ stmt(IRStmt_Put(ar_w0_offset(archreg), expr));
+}
+
+/* Read word #0 of a ar register. */
+static __inline__ IRExpr *
+get_ar_w0(UInt archreg)
+{
+ return IRExpr_Get(ar_w0_offset(archreg), Ity_I32);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- fpr registers ---*/
+/*------------------------------------------------------------*/
+
+/* Return the guest state offset of a fpr register. */
+static UInt
+fpr_offset(UInt archreg)
+{
+ static const UInt offset[16] = {
+ offsetof(VexGuestS390XState, guest_f0),
+ offsetof(VexGuestS390XState, guest_f1),
+ offsetof(VexGuestS390XState, guest_f2),
+ offsetof(VexGuestS390XState, guest_f3),
+ offsetof(VexGuestS390XState, guest_f4),
+ offsetof(VexGuestS390XState, guest_f5),
+ offsetof(VexGuestS390XState, guest_f6),
+ offsetof(VexGuestS390XState, guest_f7),
+ offsetof(VexGuestS390XState, guest_f8),
+ offsetof(VexGuestS390XState, guest_f9),
+ offsetof(VexGuestS390XState, guest_f10),
+ offsetof(VexGuestS390XState, guest_f11),
+ offsetof(VexGuestS390XState, guest_f12),
+ offsetof(VexGuestS390XState, guest_f13),
+ offsetof(VexGuestS390XState, guest_f14),
+ offsetof(VexGuestS390XState, guest_f15),
+ };
+
+ vassert(archreg < 16);
+
+ return offset[archreg];
+}
+
+
+/* Return the guest state offset of word #0 of a fpr register. */
+static __inline__ UInt
+fpr_w0_offset(UInt archreg)
+{
+ return fpr_offset(archreg) + 0;
+}
+
+/* Write word #0 of a fpr to the guest state. */
+static __inline__ void
+put_fpr_w0(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_F32);
+
+ stmt(IRStmt_Put(fpr_w0_offset(archreg), expr));
+}
+
+/* Read word #0 of a fpr register. */
+static __inline__ IRExpr *
+get_fpr_w0(UInt archreg)
+{
+ return IRExpr_Get(fpr_w0_offset(archreg), Ity_F32);
+}
+
+/* Return the guest state offset of double word #0 of a fpr register. */
+static __inline__ UInt
+fpr_dw0_offset(UInt archreg)
+{
+ return fpr_offset(archreg) + 0;
+}
+
+/* Write double word #0 of a fpr to the guest state. */
+static __inline__ void
+put_fpr_dw0(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_F64);
+
+ stmt(IRStmt_Put(fpr_dw0_offset(archreg), expr));
+}
+
+/* Read double word #0 of a fpr register. */
+static __inline__ IRExpr *
+get_fpr_dw0(UInt archreg)
+{
+ return IRExpr_Get(fpr_dw0_offset(archreg), Ity_F64);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- gpr registers ---*/
+/*------------------------------------------------------------*/
+
+/* Return the guest state offset of a gpr register. */
+static UInt
+gpr_offset(UInt archreg)
+{
+ static const UInt offset[16] = {
+ offsetof(VexGuestS390XState, guest_r0),
+ offsetof(VexGuestS390XState, guest_r1),
+ offsetof(VexGuestS390XState, guest_r2),
+ offsetof(VexGuestS390XState, guest_r3),
+ offsetof(VexGuestS390XState, guest_r4),
+ offsetof(VexGuestS390XState, guest_r5),
+ offsetof(VexGuestS390XState, guest_r6),
+ offsetof(VexGuestS390XState, guest_r7),
+ offsetof(VexGuestS390XState, guest_r8),
+ offsetof(VexGuestS390XState, guest_r9),
+ offsetof(VexGuestS390XState, guest_r10),
+ offsetof(VexGuestS390XState, guest_r11),
+ offsetof(VexGuestS390XState, guest_r12),
+ offsetof(VexGuestS390XState, guest_r13),
+ offsetof(VexGuestS390XState, guest_r14),
+ offsetof(VexGuestS390XState, guest_r15),
+ };
+
+ vassert(archreg < 16);
+
+ return offset[archreg];
+}
+
+
+/* Return the guest state offset of word #0 of a gpr register. */
+static __inline__ UInt
+gpr_w0_offset(UInt archreg)
+{
+ return gpr_offset(archreg) + 0;
+}
+
+/* Write word #0 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_w0(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I32);
+
+ stmt(IRStmt_Put(gpr_w0_offset(archreg), expr));
+}
+
+/* Read word #0 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_w0(UInt archreg)
+{
+ return IRExpr_Get(gpr_w0_offset(archreg), Ity_I32);
+}
+
+/* Return the guest state offset of double word #0 of a gpr register. */
+static __inline__ UInt
+gpr_dw0_offset(UInt archreg)
+{
+ return gpr_offset(archreg) + 0;
+}
+
+/* Write double word #0 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_dw0(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I64);
+
+ stmt(IRStmt_Put(gpr_dw0_offset(archreg), expr));
+}
+
+/* Read double word #0 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_dw0(UInt archreg)
+{
+ return IRExpr_Get(gpr_dw0_offset(archreg), Ity_I64);
+}
+
+/* Return the guest state offset of half word #1 of a gpr register. */
+static __inline__ UInt
+gpr_hw1_offset(UInt archreg)
+{
+ return gpr_offset(archreg) + 2;
+}
+
+/* Write half word #1 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_hw1(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I16);
+
+ stmt(IRStmt_Put(gpr_hw1_offset(archreg), expr));
+}
+
+/* Read half word #1 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_hw1(UInt archreg)
+{
+ return IRExpr_Get(gpr_hw1_offset(archreg), Ity_I16);
+}
+
+/* Return the guest state offset of byte #6 of a gpr register. */
+static __inline__ UInt
+gpr_b6_offset(UInt archreg)
+{
+ return gpr_offset(archreg) + 6;
+}
+
+/* Write byte #6 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_b6(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I8);
+
+ stmt(IRStmt_Put(gpr_b6_offset(archreg), expr));
+}
+
+/* Read byte #6 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_b6(UInt archreg)
+{
+ return IRExpr_Get(gpr_b6_offset(archreg), Ity_I8);
+}
+
+/* Return the guest state offset of byte #3 of a gpr register. */
+static __inline__ UInt
+gpr_b3_offset(UInt archreg)
+{
+ return gpr_offset(archreg) + 3;
+}
+
+/* Write byte #3 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_b3(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I8);
+
+ stmt(IRStmt_Put(gpr_b3_offset(archreg), expr));
+}
+
+/* Read byte #3 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_b3(UInt archreg)
+{
+ return IRExpr_Get(gpr_b3_offset(archreg), Ity_I8);
+}
+
+/* Return the guest state offset of byte #0 of a gpr register. */
+static __inline__ UInt
+gpr_b0_offset(UInt archreg)
+{
+ return gpr_offset(archreg) + 0;
+}
+
+/* Write byte #0 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_b0(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I8);
+
+ stmt(IRStmt_Put(gpr_b0_offset(archreg), expr));
+}
+
+/* Read byte #0 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_b0(UInt archreg)
+{
+ return IRExpr_Get(gpr_b0_offset(archreg), Ity_I8);
+}
+
+/* Return the guest state offset of word #1 of a gpr register. */
+static __inline__ UInt
+gpr_w1_offset(UInt archreg)
+{
+ return gpr_offset(archreg) + 4;
+}
+
+/* Write word #1 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_w1(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I32);
+
+ stmt(IRStmt_Put(gpr_w1_offset(archreg), expr));
+}
+
+/* Read word #1 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_w1(UInt archreg)
+{
+ return IRExpr_Get(gpr_w1_offset(archreg), Ity_I32);
+}
+
+/* Return the guest state offset of half word #3 of a gpr register. */
+static __inline__ UInt
+gpr_hw3_offset(UInt archreg)
+{
+ return gpr_offset(archreg) + 6;
+}
+
+/* Write half word #3 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_hw3(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I16);
+
+ stmt(IRStmt_Put(gpr_hw3_offset(archreg), expr));
+}
+
+/* Read half word #3 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_hw3(UInt archreg)
+{
+ return IRExpr_Get(gpr_hw3_offset(archreg), Ity_I16);
+}
+
+/* Return the guest state offset of byte #7 of a gpr register. */
+static __inline__ UInt
+gpr_b7_offset(UInt archreg)
+{
+ return gpr_offset(archreg) + 7;
+}
+
+/* Write byte #7 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_b7(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I8);
+
+ stmt(IRStmt_Put(gpr_b7_offset(archreg), expr));
+}
+
+/* Read byte #7 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_b7(UInt archreg)
+{
+ return IRExpr_Get(gpr_b7_offset(archreg), Ity_I8);
+}
+
+/* Return the guest state offset of half word #0 of a gpr register. */
+static __inline__ UInt
+gpr_hw0_offset(UInt archreg)
+{
+ return gpr_offset(archreg) + 0;
+}
+
+/* Write half word #0 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_hw0(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I16);
+
+ stmt(IRStmt_Put(gpr_hw0_offset(archreg), expr));
+}
+
+/* Read half word #0 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_hw0(UInt archreg)
+{
+ return IRExpr_Get(gpr_hw0_offset(archreg), Ity_I16);
+}
+
+/* Return the guest state offset of byte #4 of a gpr register. */
+static __inline__ UInt
+gpr_b4_offset(UInt archreg)
+{
+ return gpr_offset(archreg) + 4;
+}
+
+/* Write byte #4 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_b4(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I8);
+
+ stmt(IRStmt_Put(gpr_b4_offset(archreg), expr));
+}
+
+/* Read byte #4 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_b4(UInt archreg)
+{
+ return IRExpr_Get(gpr_b4_offset(archreg), Ity_I8);
+}
+
+/* Return the guest state offset of byte #1 of a gpr register. */
+static __inline__ UInt
+gpr_b1_offset(UInt archreg)
+{
+ return gpr_offset(archreg) + 1;
+}
+
+/* Write byte #1 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_b1(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I8);
+
+ stmt(IRStmt_Put(gpr_b1_offset(archreg), expr));
+}
+
+/* Read byte #1 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_b1(UInt archreg)
+{
+ return IRExpr_Get(gpr_b1_offset(archreg), Ity_I8);
+}
+
+/* Return the guest state offset of half word #2 of a gpr register. */
+static __inline__ UInt
+gpr_hw2_offset(UInt archreg)
+{
+ return gpr_offset(archreg) + 4;
+}
+
+/* Write half word #2 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_hw2(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I16);
+
+ stmt(IRStmt_Put(gpr_hw2_offset(archreg), expr));
+}
+
+/* Read half word #2 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_hw2(UInt archreg)
+{
+ return IRExpr_Get(gpr_hw2_offset(archreg), Ity_I16);
+}
+
+/* Return the guest state offset of byte #5 of a gpr register. */
+static __inline__ UInt
+gpr_b5_offset(UInt archreg)
+{
+ return gpr_offset(archreg) + 5;
+}
+
+/* Write byte #5 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_b5(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I8);
+
+ stmt(IRStmt_Put(gpr_b5_offset(archreg), expr));
+}
+
+/* Read byte #5 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_b5(UInt archreg)
+{
+ return IRExpr_Get(gpr_b5_offset(archreg), Ity_I8);
+}
+
+/* Return the guest state offset of byte #2 of a gpr register. */
+static __inline__ UInt
+gpr_b2_offset(UInt archreg)
+{
+ return gpr_offset(archreg) + 2;
+}
+
+/* Write byte #2 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_b2(UInt archreg, IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I8);
+
+ stmt(IRStmt_Put(gpr_b2_offset(archreg), expr));
+}
+
+/* Read byte #2 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_b2(UInt archreg)
+{
+ return IRExpr_Get(gpr_b2_offset(archreg), Ity_I8);
+}
+
+/* Return the guest state offset of the counter register. */
+static UInt
+counter_offset(void)
+{
+ return offsetof(VexGuestS390XState, guest_counter);
+}
+
+/* Return the guest state offset of double word #0 of the counter register. */
+static __inline__ UInt
+counter_dw0_offset(void)
+{
+ return counter_offset() + 0;
+}
+
+/* Write double word #0 of the counter to the guest state. */
+static __inline__ void
+put_counter_dw0(IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I64);
+
+ stmt(IRStmt_Put(counter_dw0_offset(), expr));
+}
+
+/* Read double word #0 of the counter register. */
+static __inline__ IRExpr *
+get_counter_dw0(void)
+{
+ return IRExpr_Get(counter_dw0_offset(), Ity_I64);
+}
+
+/* Return the guest state offset of the fpc register. */
+static UInt
+fpc_offset(void)
+{
+ return offsetof(VexGuestS390XState, guest_fpc);
+}
+
+/* Return the guest state offset of word #0 of the fpc register. */
+static __inline__ UInt
+fpc_w0_offset(void)
+{
+ return fpc_offset() + 0;
+}
+
+/* Write word #0 of the fpc to the guest state. */
+static __inline__ void
+put_fpc_w0(IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I32);
+
+ stmt(IRStmt_Put(fpc_w0_offset(), expr));
+}
+
+/* Read word #0 of the fpc register. */
+static __inline__ IRExpr *
+get_fpc_w0(void)
+{
+ return IRExpr_Get(fpc_w0_offset(), Ity_I32);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Build IR for formats ---*/
+/*------------------------------------------------------------*/
+void
+s390_format_I(HChar *(*irgen)(UChar i),
+ UChar i)
+{
+ HChar *mnm = irgen(i);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC2(MNM, UINT), mnm, i);
+}
+
+void
+s390_format_RI(HChar *(*irgen)(UChar r1, UShort i2),
+ UChar r1, UShort i2)
+{
+ irgen(r1, i2);
+}
+
+void
+s390_format_RI_RU(HChar *(*irgen)(UChar r1, UShort i2),
+ UChar r1, UShort i2)
+{
+ HChar *mnm = irgen(r1, i2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, GPR, UINT), mnm, r1, i2);
+}
+
+void
+s390_format_RI_RI(HChar *(*irgen)(UChar r1, UShort i2),
+ UChar r1, UShort i2)
+{
+ HChar *mnm = irgen(r1, i2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, GPR, INT), mnm, r1, (Int)(Short)i2);
+}
+
+void
+s390_format_RI_RP(HChar *(*irgen)(UChar r1, UShort i2),
+ UChar r1, UShort i2)
+{
+ HChar *mnm = irgen(r1, i2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, GPR, PCREL), mnm, r1, (Int)(Short)i2);
+}
+
+void
+s390_format_RIE_RRP(HChar *(*irgen)(UChar r1, UChar r3, UShort i2),
+ UChar r1, UChar r3, UShort i2)
+{
+ HChar *mnm = irgen(r1, r3, i2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC4(MNM, GPR, GPR, PCREL), mnm, r1, r3, (Int)(Short)i2);
+}
+
+void
+s390_format_RIE_RRI0(HChar *(*irgen)(UChar r1, UChar r3, UShort i2),
+ UChar r1, UChar r3, UShort i2)
+{
+ HChar *mnm = irgen(r1, r3, i2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC4(MNM, GPR, GPR, INT), mnm, r1, r3, (Int)(Short)i2);
+}
+
+void
+s390_format_RIE_RRUUU(HChar *(*irgen)(UChar r1, UChar r2, UChar i3, UChar i4, UChar i5),
+ UChar r1, UChar r2, UChar i3, UChar i4, UChar i5)
+{
+ HChar *mnm = irgen(r1, r2, i3, i4, i5);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC6(MNM, GPR, GPR, UINT, UINT, UINT), mnm, r1, r2, i3, i4, i5);
+}
+
+void
+s390_format_RIE_RRPU(HChar *(*irgen)(UChar r1, UChar r2, UShort i4, UChar m3),
+ UChar r1, UChar r2, UShort i4, UChar m3)
+{
+ HChar *mnm = irgen(r1, r2, i4, m3);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC5(XMNM, GPR, GPR, CABM, PCREL), S390_XMNM_CAB, mnm, m3, r1, r2, m3, (Int)(Short)i4);
+}
+
+void
+s390_format_RIE_RUPU(HChar *(*irgen)(UChar r1, UChar m3, UShort i4, UChar i2),
+ UChar r1, UChar m3, UShort i4, UChar i2)
+{
+ HChar *mnm = irgen(r1, m3, i4, i2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC5(XMNM, GPR, UINT, CABM, PCREL), S390_XMNM_CAB, mnm, m3, r1, i2, m3, (Int)(Short)i4);
+}
+
+void
+s390_format_RIE_RUPI(HChar *(*irgen)(UChar r1, UChar m3, UShort i4, UChar i2),
+ UChar r1, UChar m3, UShort i4, UChar i2)
+{
+ HChar *mnm = irgen(r1, m3, i4, i2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC5(XMNM, GPR, INT, CABM, PCREL), S390_XMNM_CAB, mnm, m3, r1, (Int)(Char)i2, m3, (Int)(Short)i4);
+}
+
+void
+s390_format_RIL(HChar *(*irgen)(UChar r1, UInt i2),
+ UChar r1, UInt i2)
+{
+ irgen(r1, i2);
+}
+
+void
+s390_format_RIL_RU(HChar *(*irgen)(UChar r1, UInt i2),
+ UChar r1, UInt i2)
+{
+ HChar *mnm = irgen(r1, i2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, GPR, UINT), mnm, r1, i2);
+}
+
+void
+s390_format_RIL_RI(HChar *(*irgen)(UChar r1, UInt i2),
+ UChar r1, UInt i2)
+{
+ HChar *mnm = irgen(r1, i2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, GPR, INT), mnm, r1, i2);
+}
+
+void
+s390_format_RIL_RP(HChar *(*irgen)(UChar r1, UInt i2),
+ UChar r1, UInt i2)
+{
+ HChar *mnm = irgen(r1, i2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, GPR, PCREL), mnm, r1, i2);
+}
+
+void
+s390_format_RIL_UP(HChar *(*irgen)(void),
+ UChar r1, UInt i2)
+{
+ HChar *mnm = irgen();
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, UINT, PCREL), mnm, r1, i2);
+}
+
+void
+s390_format_RIS_RURDI(HChar *(*irgen)(UChar r1, UChar m3, UChar i2, IRTemp op4addr),
+ UChar r1, UChar m3, UChar b4, UShort d4, UChar i2)
+{
+ HChar *mnm;
+ IRTemp op4addr = newTemp(Ity_I64);
+
+ assign(op4addr, binop(Iop_Add64, mkU64(d4), b4 != 0 ? get_gpr_dw0(b4) : mkU64(0)));
+
+ mnm = irgen(r1, m3, i2, op4addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC5(XMNM, GPR, INT, CABM, UDXB), S390_XMNM_CAB, mnm, m3, r1, (Int)(Char)i2, m3, d4, 0, b4);
+}
+
+void
+s390_format_RIS_RURDU(HChar *(*irgen)(UChar r1, UChar m3, UChar i2, IRTemp op4addr),
+ UChar r1, UChar m3, UChar b4, UShort d4, UChar i2)
+{
+ HChar *mnm;
+ IRTemp op4addr = newTemp(Ity_I64);
+
+ assign(op4addr, binop(Iop_Add64, mkU64(d4), b4 != 0 ? get_gpr_dw0(b4) : mkU64(0)));
+
+ mnm = irgen(r1, m3, i2, op4addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC5(XMNM, GPR, UINT, CABM, UDXB), S390_XMNM_CAB, mnm, m3, r1, i2, m3, d4, 0, b4);
+}
+
+void
+s390_format_RR(HChar *(*irgen)(UChar r1, UChar r2),
+ UChar r1, UChar r2)
+{
+ irgen(r1, r2);
+}
+
+void
+s390_format_RR_RR(HChar *(*irgen)(UChar r1, UChar r2),
+ UChar r1, UChar r2)
+{
+ HChar *mnm = irgen(r1, r2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, GPR, GPR), mnm, r1, r2);
+}
+
+void
+s390_format_RR_FF(HChar *(*irgen)(UChar r1, UChar r2),
+ UChar r1, UChar r2)
+{
+ HChar *mnm = irgen(r1, r2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, FPR, FPR), mnm, r1, r2);
+}
+
+void
+s390_format_RRE(HChar *(*irgen)(UChar r1, UChar r2),
+ UChar r1, UChar r2)
+{
+ irgen(r1, r2);
+}
+
+void
+s390_format_RRE_RR(HChar *(*irgen)(UChar r1, UChar r2),
+ UChar r1, UChar r2)
+{
+ HChar *mnm = irgen(r1, r2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, GPR, GPR), mnm, r1, r2);
+}
+
+void
+s390_format_RRE_FF(HChar *(*irgen)(UChar r1, UChar r2),
+ UChar r1, UChar r2)
+{
+ HChar *mnm = irgen(r1, r2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, FPR, FPR), mnm, r1, r2);
+}
+
+void
+s390_format_RRE_RF(HChar *(*irgen)(UChar, UChar),
+ UChar r1, UChar r2)
+{
+ HChar *mnm = irgen(r1, r2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, GPR, FPR), mnm, r1, r2);
+}
+
+void
+s390_format_RRE_FR(HChar *(*irgen)(UChar r1, UChar r2),
+ UChar r1, UChar r2)
+{
+ HChar *mnm = irgen(r1, r2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, FPR, GPR), mnm, r1, r2);
+}
+
+void
+s390_format_RRE_R0(HChar *(*irgen)(UChar r1),
+ UChar r1)
+{
+ HChar *mnm = irgen(r1);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC2(MNM, GPR), mnm, r1);
+}
+
+void
+s390_format_RRE_F0(HChar *(*irgen)(UChar r1),
+ UChar r1)
+{
+ HChar *mnm = irgen(r1);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC2(MNM, FPR), mnm, r1);
+}
+
+void
+s390_format_RRF_F0FF(HChar *(*irgen)(UChar, UChar, UChar),
+ UChar r1, UChar r3, UChar r2)
+{
+ HChar *mnm = irgen(r1, r3, r2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC4(MNM, FPR, FPR, FPR), mnm, r1, r3, r2);
+}
+
+void
+s390_format_RRF_U0RF(HChar *(*irgen)(UChar r3, UChar r1, UChar r2),
+ UChar r3, UChar r1, UChar r2)
+{
+ HChar *mnm = irgen(r3, r1, r2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC4(MNM, GPR, UINT, FPR), mnm, r1, r3, r2);
+}
+
+void
+s390_format_RRF_F0FF2(HChar *(*irgen)(UChar, UChar, UChar),
+ UChar r3, UChar r1, UChar r2)
+{
+ HChar *mnm = irgen(r3, r1, r2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC4(MNM, FPR, FPR, FPR), mnm, r1, r3, r2);
+}
+
+void
+s390_format_RRF_R0RR2(HChar *(*irgen)(UChar r3, UChar r1, UChar r2),
+ UChar r3, UChar r1, UChar r2)
+{
+ HChar *mnm = irgen(r3, r1, r2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC4(MNM, GPR, GPR, GPR), mnm, r1, r2, r3);
+}
+
+void
+s390_format_RRS(HChar *(*irgen)(UChar r1, UChar r2, UChar m3, IRTemp op4addr),
+ UChar r1, UChar r2, UChar b4, UShort d4, UChar m3)
+{
+ HChar *mnm;
+ IRTemp op4addr = newTemp(Ity_I64);
+
+ assign(op4addr, binop(Iop_Add64, mkU64(d4), b4 != 0 ? get_gpr_dw0(b4) : mkU64(0)));
+
+ mnm = irgen(r1, r2, m3, op4addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC5(XMNM, GPR, GPR, CABM, UDXB), S390_XMNM_CAB, mnm, m3, r1, r2, m3, d4, 0, b4);
+}
+
+void
+s390_format_RS_R0RD(HChar *(*irgen)(UChar r1, IRTemp op2addr),
+ UChar r1, UChar b2, UShort d2)
+{
+ HChar *mnm;
+ IRTemp op2addr = newTemp(Ity_I64);
+
+ assign(op2addr, binop(Iop_Add64, mkU64(d2), b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)));
+
+ mnm = irgen(r1, op2addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, GPR, UDXB), mnm, r1, d2, 0, b2);
+}
+
+void
+s390_format_RS_RRRD(HChar *(*irgen)(UChar r1, UChar r3, IRTemp op2addr),
+ UChar r1, UChar r3, UChar b2, UShort d2)
+{
+ HChar *mnm;
+ IRTemp op2addr = newTemp(Ity_I64);
+
+ assign(op2addr, binop(Iop_Add64, mkU64(d2), b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)));
+
+ mnm = irgen(r1, r3, op2addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC4(MNM, GPR, GPR, UDXB), mnm, r1, r3, d2, 0, b2);
+}
+
+void
+s390_format_RS_RURD(HChar *(*irgen)(UChar r1, UChar r3, IRTemp op2addr),
+ UChar r1, UChar r3, UChar b2, UShort d2)
+{
+ HChar *mnm;
+ IRTemp op2addr = newTemp(Ity_I64);
+
+ assign(op2addr, binop(Iop_Add64, mkU64(d2), b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)));
+
+ mnm = irgen(r1, r3, op2addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC4(MNM, GPR, UINT, UDXB), mnm, r1, r3, d2, 0, b2);
+}
+
+void
+s390_format_RS_AARD(HChar *(*irgen)(UChar, UChar, IRTemp),
+ UChar r1, UChar r3, UChar b2, UShort d2)
+{
+ HChar *mnm;
+ IRTemp op2addr = newTemp(Ity_I64);
+
+ assign(op2addr, binop(Iop_Add64, mkU64(d2), b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)));
+
+ mnm = irgen(r1, r3, op2addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC4(MNM, AR, AR, UDXB), mnm, r1, r3, d2, 0, b2);
+}
+
+void
+s390_format_RSI_RRP(HChar *(*irgen)(UChar r1, UChar r3, UShort i2),
+ UChar r1, UChar r3, UShort i2)
+{
+ HChar *mnm = irgen(r1, r3, i2);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC4(MNM, GPR, GPR, PCREL), mnm, r1, r3, (Int)(Short)i2);
+}
+
+void
+s390_format_RSY_RRRD(HChar *(*irgen)(UChar r1, UChar r3, IRTemp op2addr),
+ UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+ HChar *mnm;
+ IRTemp op2addr = newTemp(Ity_I64);
+ IRTemp d2 = newTemp(Ity_I64);
+
+ assign(d2, mkU64(((ULong)(Long)(Char)dh2 << 12) | ((ULong)dl2)));
+ assign(op2addr, binop(Iop_Add64, mkexpr(d2), b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)));
+
+ mnm = irgen(r1, r3, op2addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC4(MNM, GPR, GPR, SDXB), mnm, r1, r3, dh2, dl2, 0, b2);
+}
+
+void
+s390_format_RSY_AARD(HChar *(*irgen)(UChar, UChar, IRTemp),
+ UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+ HChar *mnm;
+ IRTemp op2addr = newTemp(Ity_I64);
+ IRTemp d2 = newTemp(Ity_I64);
+
+ assign(d2, mkU64(((ULong)(Long)(Char)dh2 << 12) | ((ULong)dl2)));
+ assign(op2addr, binop(Iop_Add64, mkexpr(d2), b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)));
+
+ mnm = irgen(r1, r3, op2addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC4(MNM, AR, AR, SDXB), mnm, r1, r3, dh2, dl2, 0, b2);
+}
+
+void
+s390_format_RSY_RURD(HChar *(*irgen)(UChar r1, UChar r3, IRTemp op2addr),
+ UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+ HChar *mnm;
+ IRTemp op2addr = newTemp(Ity_I64);
+ IRTemp d2 = newTemp(Ity_I64);
+
+ assign(d2, mkU64(((ULong)(Long)(Char)dh2 << 12) | ((ULong)dl2)));
+ assign(op2addr, binop(Iop_Add64, mkexpr(d2), b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)));
+
+ mnm = irgen(r1, r3, op2addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC4(MNM, GPR, UINT, SDXB), mnm, r1, r3, dh2, dl2, 0, b2);
+}
+
+void
+s390_format_RX(HChar *(*irgen)(UChar r1, UChar x2, UChar b2, UShort d2, IRTemp op2addr),
+ UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ IRTemp op2addr = newTemp(Ity_I64);
+
+ assign(op2addr, binop(Iop_Add64, binop(Iop_Add64, mkU64(d2), b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)), x2 != 0 ? get_gpr_dw0(x2) : mkU64(0)));
+
+ irgen(r1, x2, b2, d2, op2addr);
+}
+
+void
+s390_format_RX_RRRD(HChar *(*irgen)(UChar r1, IRTemp op2addr),
+ UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ HChar *mnm;
+ IRTemp op2addr = newTemp(Ity_I64);
+
+ assign(op2addr, binop(Iop_Add64, binop(Iop_Add64, mkU64(d2), b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)), x2 != 0 ? get_gpr_dw0(x2) : mkU64(0)));
+
+ mnm = irgen(r1, op2addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, GPR, UDXB), mnm, r1, d2, x2, b2);
+}
+
+void
+s390_format_RX_FRRD(HChar *(*irgen)(UChar r1, IRTemp op2addr),
+ UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ HChar *mnm;
+ IRTemp op2addr = newTemp(Ity_I64);
+
+ assign(op2addr, binop(Iop_Add64, binop(Iop_Add64, mkU64(d2), b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)), x2 != 0 ? get_gpr_dw0(x2) : mkU64(0)));
+
+ mnm = irgen(r1, op2addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, FPR, UDXB), mnm, r1, d2, x2, b2);
+}
+
+void
+s390_format_RXE_FRRD(HChar *(*irgen)(UChar r1, IRTemp op2addr),
+ UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ HChar *mnm;
+ IRTemp op2addr = newTemp(Ity_I64);
+
+ assign(op2addr, binop(Iop_Add64, binop(Iop_Add64, mkU64(d2), b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)), x2 != 0 ? get_gpr_dw0(x2) : mkU64(0)));
+
+ mnm = irgen(r1, op2addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, FPR, UDXB), mnm, r1, d2, x2, b2);
+}
+
+void
+s390_format_RXF_FRRDF(HChar *(*irgen)(UChar, IRTemp, UChar),
+ UChar r3, UChar x2, UChar b2, UShort d2, UChar r1)
+{
+ HChar *mnm;
+ IRTemp op2addr = newTemp(Ity_I64);
+
+ assign(op2addr, binop(Iop_Add64, binop(Iop_Add64, mkU64(d2), b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)), x2 != 0 ? get_gpr_dw0(x2) : mkU64(0)));
+
+ mnm = irgen(r3, op2addr, r1);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC4(MNM, FPR, FPR, UDXB), mnm, r1, r3, d2, x2, b2);
+}
+
+void
+s390_format_RXY_RRRD(HChar *(*irgen)(UChar r1, IRTemp op2addr),
+ UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ HChar *mnm;
+ IRTemp op2addr = newTemp(Ity_I64);
+ IRTemp d2 = newTemp(Ity_I64);
+
+ assign(d2, mkU64(((ULong)(Long)(Char)dh2 << 12) | ((ULong)dl2)));
+ assign(op2addr, binop(Iop_Add64, binop(Iop_Add64, mkexpr(d2), b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)), x2 != 0 ? get_gpr_dw0(x2) : mkU64(0)));
+
+ mnm = irgen(r1, op2addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, GPR, SDXB), mnm, r1, dh2, dl2, x2, b2);
+}
+
+void
+s390_format_RXY_FRRD(HChar *(*irgen)(UChar r1, IRTemp op2addr),
+ UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ HChar *mnm;
+ IRTemp op2addr = newTemp(Ity_I64);
+ IRTemp d2 = newTemp(Ity_I64);
+
+ assign(d2, mkU64(((ULong)(Long)(Char)dh2 << 12) | ((ULong)dl2)));
+ assign(op2addr, binop(Iop_Add64, binop(Iop_Add64, mkexpr(d2), b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)), x2 != 0 ? get_gpr_dw0(x2) : mkU64(0)));
+
+ mnm = irgen(r1, op2addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, FPR, SDXB), mnm, r1, dh2, dl2, x2, b2);
+}
+
+void
+s390_format_RXY_URRD(HChar *(*irgen)(void),
+ UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ HChar *mnm;
+ IRTemp op2addr = newTemp(Ity_I64);
+ IRTemp d2 = newTemp(Ity_I64);
+
+ assign(d2, mkU64(((ULong)(Long)(Char)dh2 << 12) | ((ULong)dl2)));
+ assign(op2addr, binop(Iop_Add64, binop(Iop_Add64, mkexpr(d2), b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)), x2 != 0 ? get_gpr_dw0(x2) : mkU64(0)));
+
+ mnm = irgen();
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, UINT, SDXB), mnm, r1, dh2, dl2, x2, b2);
+}
+
+void
+s390_format_S_RD(HChar *(*irgen)(IRTemp op2addr),
+ UChar b2, UShort d2)
+{
+ HChar *mnm;
+ IRTemp op2addr = newTemp(Ity_I64);
+
+ assign(op2addr, binop(Iop_Add64, mkU64(d2), b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)));
+
+ mnm = irgen(op2addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC2(MNM, UDXB), mnm, d2, 0, b2);
+}
+
+void
+s390_format_SI_URD(HChar *(*irgen)(UChar i2, IRTemp op1addr),
+ UChar i2, UChar b1, UShort d1)
+{
+ HChar *mnm;
+ IRTemp op1addr = newTemp(Ity_I64);
+
+ assign(op1addr, binop(Iop_Add64, mkU64(d1), b1 != 0 ? get_gpr_dw0(b1) : mkU64(0)));
+
+ mnm = irgen(i2, op1addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, UDXB, UINT), mnm, d1, 0, b1, i2);
+}
+
+void
+s390_format_SIY_URD(HChar *(*irgen)(UChar i2, IRTemp op1addr),
+ UChar i2, UChar b1, UShort dl1, UChar dh1)
+{
+ HChar *mnm;
+ IRTemp op1addr = newTemp(Ity_I64);
+ IRTemp d1 = newTemp(Ity_I64);
+
+ assign(d1, mkU64(((ULong)(Long)(Char)dh1 << 12) | ((ULong)dl1)));
+ assign(op1addr, binop(Iop_Add64, mkexpr(d1), b1 != 0 ? get_gpr_dw0(b1) : mkU64(0)));
+
+ mnm = irgen(i2, op1addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, SDXB, UINT), mnm, dh1, dl1, 0, b1, i2);
+}
+
+void
+s390_format_SIY_IRD(HChar *(*irgen)(UChar i2, IRTemp op1addr),
+ UChar i2, UChar b1, UShort dl1, UChar dh1)
+{
+ HChar *mnm;
+ IRTemp op1addr = newTemp(Ity_I64);
+ IRTemp d1 = newTemp(Ity_I64);
+
+ assign(d1, mkU64(((ULong)(Long)(Char)dh1 << 12) | ((ULong)dl1)));
+ assign(op1addr, binop(Iop_Add64, mkexpr(d1), b1 != 0 ? get_gpr_dw0(b1) : mkU64(0)));
+
+ mnm = irgen(i2, op1addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, SDXB, INT), mnm, dh1, dl1, 0, b1, (Int)(Char)i2);
+}
+
+void
+s390_format_SS_L0RDRD(HChar *(*irgen)(UChar, IRTemp, IRTemp),
+ UChar l, UChar b1, UShort d1, UChar b2, UShort d2)
+{
+ HChar *mnm;
+ IRTemp op1addr = newTemp(Ity_I64);
+ IRTemp op2addr = newTemp(Ity_I64);
+
+ assign(op1addr, binop(Iop_Add64, mkU64(d1), b1 != 0 ? get_gpr_dw0(b1) : mkU64(0)));
+ assign(op2addr, binop(Iop_Add64, mkU64(d2), b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)));
+
+ mnm = irgen(l, op1addr, op2addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, UDLB, UDXB), mnm, d1, l, b1, d2, 0, b2);
+}
+
+void
+s390_format_SIL_RDI(HChar *(*irgen)(UShort i2, IRTemp op1addr),
+ UChar b1, UShort d1, UShort i2)
+{
+ HChar *mnm;
+ IRTemp op1addr = newTemp(Ity_I64);
+
+ assign(op1addr, binop(Iop_Add64, mkU64(d1), b1 != 0 ? get_gpr_dw0(b1) : mkU64(0)));
+
+ mnm = irgen(i2, op1addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, UDXB, INT), mnm, d1, 0, b1, (Int)(Short)i2);
+}
+
+void
+s390_format_SIL_RDU(HChar *(*irgen)(UShort i2, IRTemp op1addr),
+ UChar b1, UShort d1, UShort i2)
+{
+ HChar *mnm;
+ IRTemp op1addr = newTemp(Ity_I64);
+
+ assign(op1addr, binop(Iop_Add64, mkU64(d1), b1 != 0 ? get_gpr_dw0(b1) : mkU64(0)));
+
+ mnm = irgen(i2, op1addr);
+
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, UDXB, UINT), mnm, d1, 0, b1, i2);
+}
+
+
+
+/*------------------------------------------------------------*/
+/*--- Build IR for opcodes ---*/
+/*------------------------------------------------------------*/
+
+HChar *
+s390_irgen_AR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, get_gpr_w1(r2));
+ assign(result, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, op2);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "ar";
+}
+
+HChar *
+s390_irgen_AGR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, get_gpr_dw0(r2));
+ assign(result, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op1, op2);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "agr";
+}
+
+HChar *
+s390_irgen_AGFR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, unop(Iop_32Sto64, get_gpr_w1(r2)));
+ assign(result, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op1, op2);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "agfr";
+}
+
+HChar *
+s390_irgen_ARK(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ assign(op3, get_gpr_w1(r3));
+ assign(result, binop(Iop_Add32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op2, op3);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "ark";
+}
+
+HChar *
+s390_irgen_AGRK(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp op3 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ assign(op3, get_gpr_dw0(r3));
+ assign(result, binop(Iop_Add64, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op2, op3);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "agrk";
+}
+
+HChar *
+s390_irgen_A(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, op2);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "a";
+}
+
+HChar *
+s390_irgen_AY(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, op2);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "ay";
+}
+
+HChar *
+s390_irgen_AG(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ assign(result, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op1, op2);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "ag";
+}
+
+HChar *
+s390_irgen_AGF(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, unop(Iop_32Sto64, load(Ity_I32, mkexpr(op2addr))));
+ assign(result, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op1, op2);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "agf";
+}
+
+HChar *
+s390_irgen_AFI(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ Int op2;
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ op2 = (Int)i2;
+ assign(result, binop(Iop_Add32, mkexpr(op1), mkU32((UInt)op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, mktemp(Ity_I32, mkU32((UInt)op2)));
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "afi";
+}
+
+HChar *
+s390_irgen_AGFI(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ Long op2;
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ op2 = (Long)(Int)i2;
+ assign(result, binop(Iop_Add64, mkexpr(op1), mkU64((ULong)op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op1, mktemp(Ity_I64, mkU64((ULong)op2)));
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "agfi";
+}
+
+HChar *
+s390_irgen_AHIK(UChar r1, UChar r3, UShort i2)
+{
+ Int op2;
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ op2 = (Int)(Short)i2;
+ assign(op3, get_gpr_w1(r3));
+ assign(result, binop(Iop_Add32, mkU32((UInt)op2), mkexpr(op3)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, mktemp(Ity_I32, mkU32((UInt)op2)), op3);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "ahik";
+}
+
+HChar *
+s390_irgen_AGHIK(UChar r1, UChar r3, UShort i2)
+{
+ Long op2;
+ IRTemp op3 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ op2 = (Long)(Short)i2;
+ assign(op3, get_gpr_dw0(r3));
+ assign(result, binop(Iop_Add64, mkU64((ULong)op2), mkexpr(op3)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, mktemp(Ity_I64, mkU64((ULong)op2)), op3);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "aghik";
+}
+
+HChar *
+s390_irgen_ASI(UChar i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ Int op2;
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, load(Ity_I32, mkexpr(op1addr)));
+ op2 = (Int)(Char)i2;
+ assign(result, binop(Iop_Add32, mkexpr(op1), mkU32((UInt)op2)));
+ store(mkexpr(op1addr), mkexpr(result));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, mktemp(Ity_I32, mkU32((UInt)op2)));
+
+ return "asi";
+}
+
+HChar *
+s390_irgen_AGSI(UChar i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ Long op2;
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, load(Ity_I64, mkexpr(op1addr)));
+ op2 = (Long)(Char)i2;
+ assign(result, binop(Iop_Add64, mkexpr(op1), mkU64((ULong)op2)));
+ store(mkexpr(op1addr), mkexpr(result));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op1, mktemp(Ity_I64, mkU64((ULong)op2)));
+
+ return "agsi";
+}
+
+HChar *
+s390_irgen_AH(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+ assign(result, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, op2);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "ah";
+}
+
+HChar *
+s390_irgen_AHY(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+ assign(result, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, op2);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "ahy";
+}
+
+HChar *
+s390_irgen_AHI(UChar r1, UShort i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ Int op2;
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ op2 = (Int)(Short)i2;
+ assign(result, binop(Iop_Add32, mkexpr(op1), mkU32((UInt)op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, mktemp(Ity_I32, mkU32((UInt)op2)));
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "ahi";
+}
+
+HChar *
+s390_irgen_AGHI(UChar r1, UShort i2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ Long op2;
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ op2 = (Long)(Short)i2;
+ assign(result, binop(Iop_Add64, mkexpr(op1), mkU64((ULong)op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op1, mktemp(Ity_I64, mkU64((ULong)op2)));
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "aghi";
+}
+
+HChar *
+s390_irgen_AHHHR(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w0(r2));
+ assign(op3, get_gpr_w0(r3));
+ assign(result, binop(Iop_Add32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op2, op3);
+ put_gpr_w0(r1, mkexpr(result));
+
+ return "ahhhr";
+}
+
+HChar *
+s390_irgen_AHHLR(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w0(r2));
+ assign(op3, get_gpr_w1(r3));
+ assign(result, binop(Iop_Add32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op2, op3);
+ put_gpr_w0(r1, mkexpr(result));
+
+ return "ahhlr";
+}
+
+HChar *
+s390_irgen_AIH(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ Int op2;
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w0(r1));
+ op2 = (Int)i2;
+ assign(result, binop(Iop_Add32, mkexpr(op1), mkU32((UInt)op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, mktemp(Ity_I32, mkU32((UInt)op2)));
+ put_gpr_w0(r1, mkexpr(result));
+
+ return "aih";
+}
+
+HChar *
+s390_irgen_ALR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, get_gpr_w1(r2));
+ assign(result, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op1, op2);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "alr";
+}
+
+HChar *
+s390_irgen_ALGR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, get_gpr_dw0(r2));
+ assign(result, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, op1, op2);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "algr";
+}
+
+HChar *
+s390_irgen_ALGFR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, unop(Iop_32Uto64, get_gpr_w1(r2)));
+ assign(result, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, op1, op2);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "algfr";
+}
+
+HChar *
+s390_irgen_ALRK(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ assign(op3, get_gpr_w1(r3));
+ assign(result, binop(Iop_Add32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op2, op3);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "alrk";
+}
+
+HChar *
+s390_irgen_ALGRK(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp op3 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ assign(op3, get_gpr_dw0(r3));
+ assign(result, binop(Iop_Add64, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, op2, op3);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "algrk";
+}
+
+HChar *
+s390_irgen_AL(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op1, op2);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "al";
+}
+
+HChar *
+s390_irgen_ALY(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op1, op2);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "aly";
+}
+
+HChar *
+s390_irgen_ALG(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ assign(result, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, op1, op2);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "alg";
+}
+
+HChar *
+s390_irgen_ALGF(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, unop(Iop_32Uto64, load(Ity_I32, mkexpr(op2addr))));
+ assign(result, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, op1, op2);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "algf";
+}
+
+HChar *
+s390_irgen_ALFI(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ UInt op2;
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ op2 = i2;
+ assign(result, binop(Iop_Add32, mkexpr(op1), mkU32(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op1, mktemp(Ity_I32, mkU32(op2)));
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "alfi";
+}
+
+HChar *
+s390_irgen_ALGFI(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ ULong op2;
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ op2 = (ULong)i2;
+ assign(result, binop(Iop_Add64, mkexpr(op1), mkU64(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, op1, mktemp(Ity_I64, mkU64(op2)));
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "algfi";
+}
+
+HChar *
+s390_irgen_ALHHHR(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w0(r2));
+ assign(op3, get_gpr_w0(r3));
+ assign(result, binop(Iop_Add32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op2, op3);
+ put_gpr_w0(r1, mkexpr(result));
+
+ return "alhhhr";
+}
+
+HChar *
+s390_irgen_ALHHLR(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w0(r2));
+ assign(op3, get_gpr_w1(r3));
+ assign(result, binop(Iop_Add32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op2, op3);
+ put_gpr_w0(r1, mkexpr(result));
+
+ return "alhhlr";
+}
+
+HChar *
+s390_irgen_ALCR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+ IRTemp carry_in = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, get_gpr_w1(r2));
+ assign(carry_in, binop(Iop_Shr32, s390_call_calculate_cc(), mkU8(1)));
+ assign(result, binop(Iop_Add32, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)), mkexpr(carry_in)));
+ s390_cc_thunk_putZZZ(S390_CC_OP_UNSIGNED_ADDC_32, op1, op2, carry_in);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "alcr";
+}
+
+HChar *
+s390_irgen_ALCGR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+ IRTemp carry_in = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, get_gpr_dw0(r2));
+ assign(carry_in, unop(Iop_32Uto64, binop(Iop_Shr32, s390_call_calculate_cc(), mkU8(1))));
+ assign(result, binop(Iop_Add64, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)), mkexpr(carry_in)));
+ s390_cc_thunk_putZZZ(S390_CC_OP_UNSIGNED_ADDC_64, op1, op2, carry_in);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "alcgr";
+}
+
+HChar *
+s390_irgen_ALC(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+ IRTemp carry_in = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(carry_in, binop(Iop_Shr32, s390_call_calculate_cc(), mkU8(1)));
+ assign(result, binop(Iop_Add32, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)), mkexpr(carry_in)));
+ s390_cc_thunk_putZZZ(S390_CC_OP_UNSIGNED_ADDC_32, op1, op2, carry_in);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "alc";
+}
+
+HChar *
+s390_irgen_ALCG(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+ IRTemp carry_in = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ assign(carry_in, unop(Iop_32Uto64, binop(Iop_Shr32, s390_call_calculate_cc(), mkU8(1))));
+ assign(result, binop(Iop_Add64, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)), mkexpr(carry_in)));
+ s390_cc_thunk_putZZZ(S390_CC_OP_UNSIGNED_ADDC_64, op1, op2, carry_in);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "alcg";
+}
+
+HChar *
+s390_irgen_ALSI(UChar i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ UInt op2;
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, load(Ity_I32, mkexpr(op1addr)));
+ op2 = (UInt)(Int)(Char)i2;
+ assign(result, binop(Iop_Add32, mkexpr(op1), mkU32(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op1, mktemp(Ity_I32, mkU32(op2)));
+ store(mkexpr(op1addr), mkexpr(result));
+
+ return "alsi";
+}
+
+HChar *
+s390_irgen_ALGSI(UChar i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ ULong op2;
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, load(Ity_I64, mkexpr(op1addr)));
+ op2 = (ULong)(Long)(Char)i2;
+ assign(result, binop(Iop_Add64, mkexpr(op1), mkU64(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, op1, mktemp(Ity_I64, mkU64(op2)));
+ store(mkexpr(op1addr), mkexpr(result));
+
+ return "algsi";
+}
+
+HChar *
+s390_irgen_ALHSIK(UChar r1, UChar r3, UShort i2)
+{
+ UInt op2;
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ op2 = (UInt)(Int)(Short)i2;
+ assign(op3, get_gpr_w1(r3));
+ assign(result, binop(Iop_Add32, mkU32(op2), mkexpr(op3)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, mktemp(Ity_I32, mkU32(op2)), op3);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "alhsik";
+}
+
+HChar *
+s390_irgen_ALGHSIK(UChar r1, UChar r3, UShort i2)
+{
+ ULong op2;
+ IRTemp op3 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ op2 = (ULong)(Long)(Short)i2;
+ assign(op3, get_gpr_dw0(r3));
+ assign(result, binop(Iop_Add64, mkU64(op2), mkexpr(op3)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, mktemp(Ity_I64, mkU64(op2)), op3);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "alghsik";
+}
+
+HChar *
+s390_irgen_ALSIH(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ UInt op2;
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w0(r1));
+ op2 = i2;
+ assign(result, binop(Iop_Add32, mkexpr(op1), mkU32(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op1, mktemp(Ity_I32, mkU32(op2)));
+ put_gpr_w0(r1, mkexpr(result));
+
+ return "alsih";
+}
+
+HChar *
+s390_irgen_ALSIHN(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ UInt op2;
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w0(r1));
+ op2 = i2;
+ assign(result, binop(Iop_Add32, mkexpr(op1), mkU32(op2)));
+ put_gpr_w0(r1, mkexpr(result));
+
+ return "alsihn";
+}
+
+HChar *
+s390_irgen_NR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, get_gpr_w1(r2));
+ assign(result, binop(Iop_And32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "nr";
+}
+
+HChar *
+s390_irgen_NGR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, get_gpr_dw0(r2));
+ assign(result, binop(Iop_And64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "ngr";
+}
+
+HChar *
+s390_irgen_NRK(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ assign(op3, get_gpr_w1(r3));
+ assign(result, binop(Iop_And32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "nrk";
+}
+
+HChar *
+s390_irgen_NGRK(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp op3 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ assign(op3, get_gpr_dw0(r3));
+ assign(result, binop(Iop_And64, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "ngrk";
+}
+
+HChar *
+s390_irgen_N(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_And32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "n";
+}
+
+HChar *
+s390_irgen_NY(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_And32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "ny";
+}
+
+HChar *
+s390_irgen_NG(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ assign(result, binop(Iop_And64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "ng";
+}
+
+HChar *
+s390_irgen_NI(UChar i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I8);
+ UChar op2;
+ IRTemp result = newTemp(Ity_I8);
+
+ assign(op1, load(Ity_I8, mkexpr(op1addr)));
+ op2 = i2;
+ assign(result, binop(Iop_And8, mkexpr(op1), mkU8(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ store(mkexpr(op1addr), mkexpr(result));
+
+ return "ni";
+}
+
+HChar *
+s390_irgen_NIY(UChar i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I8);
+ UChar op2;
+ IRTemp result = newTemp(Ity_I8);
+
+ assign(op1, load(Ity_I8, mkexpr(op1addr)));
+ op2 = i2;
+ assign(result, binop(Iop_And8, mkexpr(op1), mkU8(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ store(mkexpr(op1addr), mkexpr(result));
+
+ return "niy";
+}
+
+HChar *
+s390_irgen_NIHF(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ UInt op2;
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w0(r1));
+ op2 = i2;
+ assign(result, binop(Iop_And32, mkexpr(op1), mkU32(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w0(r1, mkexpr(result));
+
+ return "nihf";
+}
+
+HChar *
+s390_irgen_NIHH(UChar r1, UShort i2)
+{
+ IRTemp op1 = newTemp(Ity_I16);
+ UShort op2;
+ IRTemp result = newTemp(Ity_I16);
+
+ assign(op1, get_gpr_hw0(r1));
+ op2 = i2;
+ assign(result, binop(Iop_And16, mkexpr(op1), mkU16(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_hw0(r1, mkexpr(result));
+
+ return "nihh";
+}
+
+HChar *
+s390_irgen_NIHL(UChar r1, UShort i2)
+{
+ IRTemp op1 = newTemp(Ity_I16);
+ UShort op2;
+ IRTemp result = newTemp(Ity_I16);
+
+ assign(op1, get_gpr_hw1(r1));
+ op2 = i2;
+ assign(result, binop(Iop_And16, mkexpr(op1), mkU16(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_hw1(r1, mkexpr(result));
+
+ return "nihl";
+}
+
+HChar *
+s390_irgen_NILF(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ UInt op2;
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ op2 = i2;
+ assign(result, binop(Iop_And32, mkexpr(op1), mkU32(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "nilf";
+}
+
+HChar *
+s390_irgen_NILH(UChar r1, UShort i2)
+{
+ IRTemp op1 = newTemp(Ity_I16);
+ UShort op2;
+ IRTemp result = newTemp(Ity_I16);
+
+ assign(op1, get_gpr_hw2(r1));
+ op2 = i2;
+ assign(result, binop(Iop_And16, mkexpr(op1), mkU16(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_hw2(r1, mkexpr(result));
+
+ return "nilh";
+}
+
+HChar *
+s390_irgen_NILL(UChar r1, UShort i2)
+{
+ IRTemp op1 = newTemp(Ity_I16);
+ UShort op2;
+ IRTemp result = newTemp(Ity_I16);
+
+ assign(op1, get_gpr_hw3(r1));
+ op2 = i2;
+ assign(result, binop(Iop_And16, mkexpr(op1), mkU16(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_hw3(r1, mkexpr(result));
+
+ return "nill";
+}
+
+HChar *
+s390_irgen_BASR(UChar r1, UChar r2)
+{
+ IRTemp target = newTemp(Ity_I64);
+
+ if (r2 == 0) {
+ put_gpr_dw0(r1, mkU64(guest_IA_curr_instr + 2ULL));
+ } else {
+ if (r1 != r2) {
+ put_gpr_dw0(r1, mkU64(guest_IA_curr_instr + 2ULL));
+ call_function(get_gpr_dw0(r2));
+ } else {
+ assign(target, get_gpr_dw0(r2));
+ put_gpr_dw0(r1, mkU64(guest_IA_curr_instr + 2ULL));
+ call_function(mkexpr(target));
+ }
+ }
+
+ return "basr";
+}
+
+HChar *
+s390_irgen_BAS(UChar r1, IRTemp op2addr)
+{
+ IRTemp target = newTemp(Ity_I64);
+
+ put_gpr_dw0(r1, mkU64(guest_IA_curr_instr + 4ULL));
+ assign(target, mkexpr(op2addr));
+ call_function(mkexpr(target));
+
+ return "bas";
+}
+
+HChar *
+s390_irgen_BCR(UChar r1, UChar r2)
+{
+ IRTemp cond = newTemp(Ity_I32);
+
+ if ((r2 == 0) || (r1 == 0)) {
+ } else {
+ if (r1 == 15) {
+ return_from_function(get_gpr_dw0(r2));
+ } else {
+ assign(cond, s390_call_calculate_cond(r1));
+ if_not_condition_goto_computed(binop(Iop_CmpEQ32, mkexpr(cond), mkU32(0)), get_gpr_dw0(r2));
+ }
+ }
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC2(XMNM, GPR), S390_XMNM_BCR, r1, r2);
+
+ return "bcr";
+}
+
+HChar *
+s390_irgen_BC(UChar r1, UChar x2, UChar b2, UShort d2, IRTemp op2addr)
+{
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (r1 == 0) {
+ } else {
+ if (r1 == 15) {
+ always_goto(mkexpr(op2addr));
+ } else {
+ assign(cond, s390_call_calculate_cond(r1));
+ if_not_condition_goto_computed(binop(Iop_CmpEQ32, mkexpr(cond), mkU32(0)), mkexpr(op2addr));
+ }
+ }
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC2(XMNM, UDXB), S390_XMNM_BC, r1, d2, x2, b2);
+
+ return "bc";
+}
+
+HChar *
+s390_irgen_BCTR(UChar r1, UChar r2)
+{
+ put_gpr_w1(r1, binop(Iop_Sub32, get_gpr_w1(r1), mkU32(1)));
+ if (r2 != 0) {
+ if_not_condition_goto_computed(binop(Iop_CmpEQ32, get_gpr_w1(r1), mkU32(0)), get_gpr_dw0(r2));
+ }
+
+ return "bctr";
+}
+
+HChar *
+s390_irgen_BCTGR(UChar r1, UChar r2)
+{
+ put_gpr_dw0(r1, binop(Iop_Sub64, get_gpr_dw0(r1), mkU64(1)));
+ if (r2 != 0) {
+ if_not_condition_goto_computed(binop(Iop_CmpEQ64, get_gpr_dw0(r1), mkU64(0)), get_gpr_dw0(r2));
+ }
+
+ return "bctgr";
+}
+
+HChar *
+s390_irgen_BCT(UChar r1, IRTemp op2addr)
+{
+ put_gpr_w1(r1, binop(Iop_Sub32, get_gpr_w1(r1), mkU32(1)));
+ if_not_condition_goto_computed(binop(Iop_CmpEQ32, get_gpr_w1(r1), mkU32(0)), mkexpr(op2addr));
+
+ return "bct";
+}
+
+HChar *
+s390_irgen_BCTG(UChar r1, IRTemp op2addr)
+{
+ put_gpr_dw0(r1, binop(Iop_Sub64, get_gpr_dw0(r1), mkU64(1)));
+ if_not_condition_goto_computed(binop(Iop_CmpEQ64, get_gpr_dw0(r1), mkU64(0)), mkexpr(op2addr));
+
+ return "bctg";
+}
+
+HChar *
+s390_irgen_BXH(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp value = newTemp(Ity_I32);
+
+ assign(value, get_gpr_w1(r3 | 1));
+ put_gpr_w1(r1, binop(Iop_Add32, get_gpr_w1(r1), get_gpr_w1(r3)));
+ if_not_condition_goto_computed(binop(Iop_CmpLE32S, get_gpr_w1(r1), mkexpr(value)), mkexpr(op2addr));
+
+ return "bxh";
+}
+
+HChar *
+s390_irgen_BXHG(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp value = newTemp(Ity_I64);
+
+ assign(value, get_gpr_dw0(r3 | 1));
+ put_gpr_dw0(r1, binop(Iop_Add64, get_gpr_dw0(r1), get_gpr_dw0(r3)));
+ if_not_condition_goto_computed(binop(Iop_CmpLE64S, get_gpr_dw0(r1), mkexpr(value)), mkexpr(op2addr));
+
+ return "bxhg";
+}
+
+HChar *
+s390_irgen_BXLE(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp value = newTemp(Ity_I32);
+
+ assign(value, get_gpr_w1(r3 | 1));
+ put_gpr_w1(r1, binop(Iop_Add32, get_gpr_w1(r1), get_gpr_w1(r3)));
+ if_not_condition_goto_computed(binop(Iop_CmpLT32S, mkexpr(value), get_gpr_w1(r1)), mkexpr(op2addr));
+
+ return "bxle";
+}
+
+HChar *
+s390_irgen_BXLEG(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp value = newTemp(Ity_I64);
+
+ assign(value, get_gpr_dw0(r3 | 1));
+ put_gpr_dw0(r1, binop(Iop_Add64, get_gpr_dw0(r1), get_gpr_dw0(r3)));
+ if_not_condition_goto_computed(binop(Iop_CmpLT64S, mkexpr(value), get_gpr_dw0(r1)), mkexpr(op2addr));
+
+ return "bxleg";
+}
+
+HChar *
+s390_irgen_BRAS(UChar r1, UShort i2)
+{
+ put_gpr_dw0(r1, mkU64(guest_IA_curr_instr + 4ULL));
+ call_function(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1)));
+
+ return "bras";
+}
+
+HChar *
+s390_irgen_BRASL(UChar r1, UInt i2)
+{
+ put_gpr_dw0(r1, mkU64(guest_IA_curr_instr + 6ULL));
+ call_function(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)));
+
+ return "brasl";
+}
+
+HChar *
+s390_irgen_BRC(UChar r1, UShort i2)
+{
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (r1 == 0) {
+ } else {
+ if (r1 == 15) {
+ always_goto(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1)));
+ } else {
+ assign(cond, s390_call_calculate_cond(r1));
+ if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)), guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+ }
+ }
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC2(XMNM, PCREL), S390_XMNM_BRC, r1, (Int)(Short)i2);
+
+ return "brc";
+}
+
+HChar *
+s390_irgen_BRCL(UChar r1, UInt i2)
+{
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (r1 == 0) {
+ } else {
+ if (r1 == 15) {
+ always_goto(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)));
+ } else {
+ assign(cond, s390_call_calculate_cond(r1));
+ if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)), guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1));
+ }
+ }
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC2(XMNM, PCREL), S390_XMNM_BRCL, r1, i2);
+
+ return "brcl";
+}
+
+HChar *
+s390_irgen_BRCT(UChar r1, UShort i2)
+{
+ put_gpr_w1(r1, binop(Iop_Sub32, get_gpr_w1(r1), mkU32(1)));
+ if_condition_goto(binop(Iop_CmpNE32, get_gpr_w1(r1), mkU32(0)), guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+
+ return "brct";
+}
+
+HChar *
+s390_irgen_BRCTG(UChar r1, UShort i2)
+{
+ put_gpr_dw0(r1, binop(Iop_Sub64, get_gpr_dw0(r1), mkU64(1)));
+ if_condition_goto(binop(Iop_CmpNE64, get_gpr_dw0(r1), mkU64(0)), guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+
+ return "brctg";
+}
+
+HChar *
+s390_irgen_BRXH(UChar r1, UChar r3, UShort i2)
+{
+ IRTemp value = newTemp(Ity_I32);
+
+ assign(value, get_gpr_w1(r3 | 1));
+ put_gpr_w1(r1, binop(Iop_Add32, get_gpr_w1(r1), get_gpr_w1(r3)));
+ if_condition_goto(binop(Iop_CmpLT32S, mkexpr(value), get_gpr_w1(r1)), guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+
+ return "brxh";
+}
+
+HChar *
+s390_irgen_BRXHG(UChar r1, UChar r3, UShort i2)
+{
+ IRTemp value = newTemp(Ity_I64);
+
+ assign(value, get_gpr_dw0(r3 | 1));
+ put_gpr_dw0(r1, binop(Iop_Add64, get_gpr_dw0(r1), get_gpr_dw0(r3)));
+ if_condition_goto(binop(Iop_CmpLT64S, mkexpr(value), get_gpr_dw0(r1)), guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+
+ return "brxhg";
+}
+
+HChar *
+s390_irgen_BRXLE(UChar r1, UChar r3, UShort i2)
+{
+ IRTemp value = newTemp(Ity_I32);
+
+ assign(value, get_gpr_w1(r3 | 1));
+ put_gpr_w1(r1, binop(Iop_Add32, get_gpr_w1(r1), get_gpr_w1(r3)));
+ if_condition_goto(binop(Iop_CmpLE32S, get_gpr_w1(r1), mkexpr(value)), guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+
+ return "brxle";
+}
+
+HChar *
+s390_irgen_BRXLG(UChar r1, UChar r3, UShort i2)
+{
+ IRTemp value = newTemp(Ity_I64);
+
+ assign(value, get_gpr_dw0(r3 | 1));
+ put_gpr_dw0(r1, binop(Iop_Add64, get_gpr_dw0(r1), get_gpr_dw0(r3)));
+ if_condition_goto(binop(Iop_CmpLE64S, get_gpr_dw0(r1), mkexpr(value)), guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+
+ return "brxlg";
+}
+
+HChar *
+s390_irgen_CR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, get_gpr_w1(r2));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "cr";
+}
+
+HChar *
+s390_irgen_CGR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, get_gpr_dw0(r2));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "cgr";
+}
+
+HChar *
+s390_irgen_CGFR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, unop(Iop_32Sto64, get_gpr_w1(r2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "cgfr";
+}
+
+HChar *
+s390_irgen_C(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "c";
+}
+
+HChar *
+s390_irgen_CY(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "cy";
+}
+
+HChar *
+s390_irgen_CG(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "cg";
+}
+
+HChar *
+s390_irgen_CGF(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, unop(Iop_32Sto64, load(Ity_I32, mkexpr(op2addr))));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "cgf";
+}
+
+HChar *
+s390_irgen_CFI(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ Int op2;
+
+ assign(op1, get_gpr_w1(r1));
+ op2 = (Int)i2;
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I32, mkU32((UInt)op2)));
+
+ return "cfi";
+}
+
+HChar *
+s390_irgen_CGFI(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ Long op2;
+
+ assign(op1, get_gpr_dw0(r1));
+ op2 = (Long)(Int)i2;
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I64, mkU64((ULong)op2)));
+
+ return "cgfi";
+}
+
+HChar *
+s390_irgen_CRL(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1))));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "crl";
+}
+
+HChar *
+s390_irgen_CGRL(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, load(Ity_I64, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1))));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "cgrl";
+}
+
+HChar *
+s390_irgen_CGFRL(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, unop(Iop_32Sto64, load(Ity_I32, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)))));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "cgfrl";
+}
+
+HChar *
+s390_irgen_CRB(UChar r1, UChar r2, UChar m3, IRTemp op4addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp icc = newTemp(Ity_I32);
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (m3 == 0) {
+ } else {
+ if (m3 == 14) {
+ always_goto(mkexpr(op4addr));
+ } else {
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, get_gpr_w1(r2));
+ assign(icc, s390_call_calculate_iccSS(S390_CC_OP_SIGNED_COMPARE, op1, op2));
+ assign(cond, binop(Iop_And32, binop(Iop_Shl32, mkU32(m3), unop(Iop_32to8, mkexpr(icc))), mkU32(8)));
+ if_not_condition_goto_computed(binop(Iop_CmpEQ32, mkexpr(cond), mkU32(0)), mkexpr(op4addr));
+ }
+ }
+
+ return "crb";
+}
+
+HChar *
+s390_irgen_CGRB(UChar r1, UChar r2, UChar m3, IRTemp op4addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp icc = newTemp(Ity_I32);
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (m3 == 0) {
+ } else {
+ if (m3 == 14) {
+ always_goto(mkexpr(op4addr));
+ } else {
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, get_gpr_dw0(r2));
+ assign(icc, s390_call_calculate_iccSS(S390_CC_OP_SIGNED_COMPARE, op1, op2));
+ assign(cond, binop(Iop_And32, binop(Iop_Shl32, mkU32(m3), unop(Iop_32to8, mkexpr(icc))), mkU32(8)));
+ if_not_condition_goto_computed(binop(Iop_CmpEQ32, mkexpr(cond), mkU32(0)), mkexpr(op4addr));
+ }
+ }
+
+ return "cgrb";
+}
+
+HChar *
+s390_irgen_CRJ(UChar r1, UChar r2, UShort i4, UChar m3)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp icc = newTemp(Ity_I32);
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (m3 == 0) {
+ } else {
+ if (m3 == 14) {
+ always_goto(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1)));
+ } else {
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, get_gpr_w1(r2));
+ assign(icc, s390_call_calculate_iccSS(S390_CC_OP_SIGNED_COMPARE, op1, op2));
+ assign(cond, binop(Iop_And32, binop(Iop_Shl32, mkU32(m3), unop(Iop_32to8, mkexpr(icc))), mkU32(8)));
+ if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)), guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ }
+ }
+
+ return "crj";
+}
+
+HChar *
+s390_irgen_CGRJ(UChar r1, UChar r2, UShort i4, UChar m3)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp icc = newTemp(Ity_I32);
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (m3 == 0) {
+ } else {
+ if (m3 == 14) {
+ always_goto(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1)));
+ } else {
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, get_gpr_dw0(r2));
+ assign(icc, s390_call_calculate_iccSS(S390_CC_OP_SIGNED_COMPARE, op1, op2));
+ assign(cond, binop(Iop_And32, binop(Iop_Shl32, mkU32(m3), unop(Iop_32to8, mkexpr(icc))), mkU32(8)));
+ if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)), guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ }
+ }
+
+ return "cgrj";
+}
+
+HChar *
+s390_irgen_CIB(UChar r1, UChar m3, UChar i2, IRTemp op4addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ Int op2;
+ IRTemp icc = newTemp(Ity_I32);
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (m3 == 0) {
+ } else {
+ if (m3 == 14) {
+ always_goto(mkexpr(op4addr));
+ } else {
+ assign(op1, get_gpr_w1(r1));
+ op2 = (Int)(Char)i2;
+ assign(icc, s390_call_calculate_iccSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I32, mkU32((UInt)op2))));
+ assign(cond, binop(Iop_And32, binop(Iop_Shl32, mkU32(m3), unop(Iop_32to8, mkexpr(icc))), mkU32(8)));
+ if_not_condition_goto_computed(binop(Iop_CmpEQ32, mkexpr(cond), mkU32(0)), mkexpr(op4addr));
+ }
+ }
+
+ return "cib";
+}
+
+HChar *
+s390_irgen_CGIB(UChar r1, UChar m3, UChar i2, IRTemp op4addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ Long op2;
+ IRTemp icc = newTemp(Ity_I32);
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (m3 == 0) {
+ } else {
+ if (m3 == 14) {
+ always_goto(mkexpr(op4addr));
+ } else {
+ assign(op1, get_gpr_dw0(r1));
+ op2 = (Long)(Char)i2;
+ assign(icc, s390_call_calculate_iccSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I64, mkU64((ULong)op2))));
+ assign(cond, binop(Iop_And32, binop(Iop_Shl32, mkU32(m3), unop(Iop_32to8, mkexpr(icc))), mkU32(8)));
+ if_not_condition_goto_computed(binop(Iop_CmpEQ32, mkexpr(cond), mkU32(0)), mkexpr(op4addr));
+ }
+ }
+
+ return "cgib";
+}
+
+HChar *
+s390_irgen_CIJ(UChar r1, UChar m3, UShort i4, UChar i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ Int op2;
+ IRTemp icc = newTemp(Ity_I32);
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (m3 == 0) {
+ } else {
+ if (m3 == 14) {
+ always_goto(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1)));
+ } else {
+ assign(op1, get_gpr_w1(r1));
+ op2 = (Int)(Char)i2;
+ assign(icc, s390_call_calculate_iccSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I32, mkU32((UInt)op2))));
+ assign(cond, binop(Iop_And32, binop(Iop_Shl32, mkU32(m3), unop(Iop_32to8, mkexpr(icc))), mkU32(8)));
+ if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)), guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ }
+ }
+
+ return "cij";
+}
+
+HChar *
+s390_irgen_CGIJ(UChar r1, UChar m3, UShort i4, UChar i2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ Long op2;
+ IRTemp icc = newTemp(Ity_I32);
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (m3 == 0) {
+ } else {
+ if (m3 == 14) {
+ always_goto(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1)));
+ } else {
+ assign(op1, get_gpr_dw0(r1));
+ op2 = (Long)(Char)i2;
+ assign(icc, s390_call_calculate_iccSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I64, mkU64((ULong)op2))));
+ assign(cond, binop(Iop_And32, binop(Iop_Shl32, mkU32(m3), unop(Iop_32to8, mkexpr(icc))), mkU32(8)));
+ if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)), guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ }
+ }
+
+ return "cgij";
+}
+
+HChar *
+s390_irgen_CH(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "ch";
+}
+
+HChar *
+s390_irgen_CHY(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "chy";
+}
+
+HChar *
+s390_irgen_CGH(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, unop(Iop_16Sto64, load(Ity_I16, mkexpr(op2addr))));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "cgh";
+}
+
+HChar *
+s390_irgen_CHI(UChar r1, UShort i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ Int op2;
+
+ assign(op1, get_gpr_w1(r1));
+ op2 = (Int)(Short)i2;
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I32, mkU32((UInt)op2)));
+
+ return "chi";
+}
+
+HChar *
+s390_irgen_CGHI(UChar r1, UShort i2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ Long op2;
+
+ assign(op1, get_gpr_dw0(r1));
+ op2 = (Long)(Short)i2;
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I64, mkU64((ULong)op2)));
+
+ return "cghi";
+}
+
+HChar *
+s390_irgen_CHHSI(UShort i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I16);
+ Short op2;
+
+ assign(op1, load(Ity_I16, mkexpr(op1addr)));
+ op2 = (Short)i2;
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I16, mkU16((UShort)op2)));
+
+ return "chhsi";
+}
+
+HChar *
+s390_irgen_CHSI(UShort i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ Int op2;
+
+ assign(op1, load(Ity_I32, mkexpr(op1addr)));
+ op2 = (Int)(Short)i2;
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I32, mkU32((UInt)op2)));
+
+ return "chsi";
+}
+
+HChar *
+s390_irgen_CGHSI(UShort i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ Long op2;
+
+ assign(op1, load(Ity_I64, mkexpr(op1addr)));
+ op2 = (Long)(Short)i2;
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I64, mkU64((ULong)op2)));
+
+ return "cghsi";
+}
+
+HChar *
+s390_irgen_CHRL(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, unop(Iop_16Sto32, load(Ity_I16, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)))));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "chrl";
+}
+
+HChar *
+s390_irgen_CGHRL(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, unop(Iop_16Sto64, load(Ity_I16, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)))));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "cghrl";
+}
+
+HChar *
+s390_irgen_CHHR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w0(r1));
+ assign(op2, get_gpr_w0(r2));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "chhr";
+}
+
+HChar *
+s390_irgen_CHLR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w0(r1));
+ assign(op2, get_gpr_w1(r2));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "chlr";
+}
+
+HChar *
+s390_irgen_CHF(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w0(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+ return "chf";
+}
+
+HChar *
+s390_irgen_CIH(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ Int op2;
+
+ assign(op1, get_gpr_w0(r1));
+ op2 = (Int)i2;
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I32, mkU32((UInt)op2)));
+
+ return "cih";
+}
+
+HChar *
+s390_irgen_CLR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, get_gpr_w1(r2));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "clr";
+}
+
+HChar *
+s390_irgen_CLGR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, get_gpr_dw0(r2));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "clgr";
+}
+
+HChar *
+s390_irgen_CLGFR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, unop(Iop_32Uto64, get_gpr_w1(r2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "clgfr";
+}
+
+HChar *
+s390_irgen_CL(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "cl";
+}
+
+HChar *
+s390_irgen_CLY(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "cly";
+}
+
+HChar *
+s390_irgen_CLG(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "clg";
+}
+
+HChar *
+s390_irgen_CLGF(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, unop(Iop_32Uto64, load(Ity_I32, mkexpr(op2addr))));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "clgf";
+}
+
+HChar *
+s390_irgen_CLFI(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ UInt op2;
+
+ assign(op1, get_gpr_w1(r1));
+ op2 = i2;
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I32, mkU32(op2)));
+
+ return "clfi";
+}
+
+HChar *
+s390_irgen_CLGFI(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ ULong op2;
+
+ assign(op1, get_gpr_dw0(r1));
+ op2 = (ULong)i2;
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I64, mkU64(op2)));
+
+ return "clgfi";
+}
+
+HChar *
+s390_irgen_CLI(UChar i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I8);
+ UChar op2;
+
+ assign(op1, load(Ity_I8, mkexpr(op1addr)));
+ op2 = i2;
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I8, mkU8(op2)));
+
+ return "cli";
+}
+
+HChar *
+s390_irgen_CLIY(UChar i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I8);
+ UChar op2;
+
+ assign(op1, load(Ity_I8, mkexpr(op1addr)));
+ op2 = i2;
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I8, mkU8(op2)));
+
+ return "cliy";
+}
+
+HChar *
+s390_irgen_CLFHSI(UShort i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ UInt op2;
+
+ assign(op1, load(Ity_I32, mkexpr(op1addr)));
+ op2 = (UInt)i2;
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I32, mkU32(op2)));
+
+ return "clfhsi";
+}
+
+HChar *
+s390_irgen_CLGHSI(UShort i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ ULong op2;
+
+ assign(op1, load(Ity_I64, mkexpr(op1addr)));
+ op2 = (ULong)i2;
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I64, mkU64(op2)));
+
+ return "clghsi";
+}
+
+HChar *
+s390_irgen_CLHHSI(UShort i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I16);
+ UShort op2;
+
+ assign(op1, load(Ity_I16, mkexpr(op1addr)));
+ op2 = i2;
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I16, mkU16(op2)));
+
+ return "clhhsi";
+}
+
+HChar *
+s390_irgen_CLRL(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1))));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "clrl";
+}
+
+HChar *
+s390_irgen_CLGRL(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, load(Ity_I64, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1))));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "clgrl";
+}
+
+HChar *
+s390_irgen_CLGFRL(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, unop(Iop_32Uto64, load(Ity_I32, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)))));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "clgfrl";
+}
+
+HChar *
+s390_irgen_CLHRL(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, unop(Iop_16Uto32, load(Ity_I16, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)))));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "clhrl";
+}
+
+HChar *
+s390_irgen_CLGHRL(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, unop(Iop_16Uto64, load(Ity_I16, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)))));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "clghrl";
+}
+
+HChar *
+s390_irgen_CLRB(UChar r1, UChar r2, UChar m3, IRTemp op4addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp icc = newTemp(Ity_I32);
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (m3 == 0) {
+ } else {
+ if (m3 == 14) {
+ always_goto(mkexpr(op4addr));
+ } else {
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, get_gpr_w1(r2));
+ assign(icc, s390_call_calculate_iccZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2));
+ assign(cond, binop(Iop_And32, binop(Iop_Shl32, mkU32(m3), unop(Iop_32to8, mkexpr(icc))), mkU32(8)));
+ if_not_condition_goto_computed(binop(Iop_CmpEQ32, mkexpr(cond), mkU32(0)), mkexpr(op4addr));
+ }
+ }
+
+ return "clrb";
+}
+
+HChar *
+s390_irgen_CLGRB(UChar r1, UChar r2, UChar m3, IRTemp op4addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp icc = newTemp(Ity_I32);
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (m3 == 0) {
+ } else {
+ if (m3 == 14) {
+ always_goto(mkexpr(op4addr));
+ } else {
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, get_gpr_dw0(r2));
+ assign(icc, s390_call_calculate_iccZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2));
+ assign(cond, binop(Iop_And32, binop(Iop_Shl32, mkU32(m3), unop(Iop_32to8, mkexpr(icc))), mkU32(8)));
+ if_not_condition_goto_computed(binop(Iop_CmpEQ32, mkexpr(cond), mkU32(0)), mkexpr(op4addr));
+ }
+ }
+
+ return "clgrb";
+}
+
+HChar *
+s390_irgen_CLRJ(UChar r1, UChar r2, UShort i4, UChar m3)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp icc = newTemp(Ity_I32);
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (m3 == 0) {
+ } else {
+ if (m3 == 14) {
+ always_goto(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1)));
+ } else {
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, get_gpr_w1(r2));
+ assign(icc, s390_call_calculate_iccZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2));
+ assign(cond, binop(Iop_And32, binop(Iop_Shl32, mkU32(m3), unop(Iop_32to8, mkexpr(icc))), mkU32(8)));
+ if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)), guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ }
+ }
+
+ return "clrj";
+}
+
+HChar *
+s390_irgen_CLGRJ(UChar r1, UChar r2, UShort i4, UChar m3)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp icc = newTemp(Ity_I32);
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (m3 == 0) {
+ } else {
+ if (m3 == 14) {
+ always_goto(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1)));
+ } else {
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, get_gpr_dw0(r2));
+ assign(icc, s390_call_calculate_iccZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2));
+ assign(cond, binop(Iop_And32, binop(Iop_Shl32, mkU32(m3), unop(Iop_32to8, mkexpr(icc))), mkU32(8)));
+ if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)), guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ }
+ }
+
+ return "clgrj";
+}
+
+HChar *
+s390_irgen_CLIB(UChar r1, UChar m3, UChar i2, IRTemp op4addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ UInt op2;
+ IRTemp icc = newTemp(Ity_I32);
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (m3 == 0) {
+ } else {
+ if (m3 == 14) {
+ always_goto(mkexpr(op4addr));
+ } else {
+ assign(op1, get_gpr_w1(r1));
+ op2 = (UInt)i2;
+ assign(icc, s390_call_calculate_iccZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I32, mkU32(op2))));
+ assign(cond, binop(Iop_And32, binop(Iop_Shl32, mkU32(m3), unop(Iop_32to8, mkexpr(icc))), mkU32(8)));
+ if_not_condition_goto_computed(binop(Iop_CmpEQ32, mkexpr(cond), mkU32(0)), mkexpr(op4addr));
+ }
+ }
+
+ return "clib";
+}
+
+HChar *
+s390_irgen_CLGIB(UChar r1, UChar m3, UChar i2, IRTemp op4addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ ULong op2;
+ IRTemp icc = newTemp(Ity_I32);
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (m3 == 0) {
+ } else {
+ if (m3 == 14) {
+ always_goto(mkexpr(op4addr));
+ } else {
+ assign(op1, get_gpr_dw0(r1));
+ op2 = (ULong)i2;
+ assign(icc, s390_call_calculate_iccZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I64, mkU64(op2))));
+ assign(cond, binop(Iop_And32, binop(Iop_Shl32, mkU32(m3), unop(Iop_32to8, mkexpr(icc))), mkU32(8)));
+ if_not_condition_goto_computed(binop(Iop_CmpEQ32, mkexpr(cond), mkU32(0)), mkexpr(op4addr));
+ }
+ }
+
+ return "clgib";
+}
+
+HChar *
+s390_irgen_CLIJ(UChar r1, UChar m3, UShort i4, UChar i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ UInt op2;
+ IRTemp icc = newTemp(Ity_I32);
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (m3 == 0) {
+ } else {
+ if (m3 == 14) {
+ always_goto(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1)));
+ } else {
+ assign(op1, get_gpr_w1(r1));
+ op2 = (UInt)i2;
+ assign(icc, s390_call_calculate_iccZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I32, mkU32(op2))));
+ assign(cond, binop(Iop_And32, binop(Iop_Shl32, mkU32(m3), unop(Iop_32to8, mkexpr(icc))), mkU32(8)));
+ if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)), guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ }
+ }
+
+ return "clij";
+}
+
+HChar *
+s390_irgen_CLGIJ(UChar r1, UChar m3, UShort i4, UChar i2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ ULong op2;
+ IRTemp icc = newTemp(Ity_I32);
+ IRTemp cond = newTemp(Ity_I32);
+
+ if (m3 == 0) {
+ } else {
+ if (m3 == 14) {
+ always_goto(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1)));
+ } else {
+ assign(op1, get_gpr_dw0(r1));
+ op2 = (ULong)i2;
+ assign(icc, s390_call_calculate_iccZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I64, mkU64(op2))));
+ assign(cond, binop(Iop_And32, binop(Iop_Shl32, mkU32(m3), unop(Iop_32to8, mkexpr(icc))), mkU32(8)));
+ if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)), guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+ }
+ }
+
+ return "clgij";
+}
+
+HChar *
+s390_irgen_CLM(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp b0 = newTemp(Ity_I32);
+ IRTemp b1 = newTemp(Ity_I32);
+ IRTemp b2 = newTemp(Ity_I32);
+ IRTemp b3 = newTemp(Ity_I32);
+ IRTemp c0 = newTemp(Ity_I32);
+ IRTemp c1 = newTemp(Ity_I32);
+ IRTemp c2 = newTemp(Ity_I32);
+ IRTemp c3 = newTemp(Ity_I32);
+ UChar n;
+
+ n = 0;
+ if ((r3 & 8) != 0) {
+ assign(b0, unop(Iop_8Uto32, get_gpr_b4(r1)));
+ assign(c0, unop(Iop_8Uto32, load(Ity_I8, mkexpr(op2addr))));
+ n = n + 1;
+ } else {
+ assign(b0, mkU32(0));
+ assign(c0, mkU32(0));
+ }
+ if ((r3 & 4) != 0) {
+ assign(b1, unop(Iop_8Uto32, get_gpr_b5(r1)));
+ assign(c1, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n)))));
+ n = n + 1;
+ } else {
+ assign(b1, mkU32(0));
+ assign(c1, mkU32(0));
+ }
+ if ((r3 & 2) != 0) {
+ assign(b2, unop(Iop_8Uto32, get_gpr_b6(r1)));
+ assign(c2, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n)))));
+ n = n + 1;
+ } else {
+ assign(b2, mkU32(0));
+ assign(c2, mkU32(0));
+ }
+ if ((r3 & 1) != 0) {
+ assign(b3, unop(Iop_8Uto32, get_gpr_b7(r1)));
+ assign(c3, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n)))));
+ n = n + 1;
+ } else {
+ assign(b3, mkU32(0));
+ assign(c3, mkU32(0));
+ }
+ assign(op1, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Shl32, mkexpr(b0), mkU8(24)), binop(Iop_Shl32, mkexpr(b1), mkU8(16))), binop(Iop_Shl32, mkexpr(b2), mkU8(8))), mkexpr(b3)));
+ assign(op2, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Shl32, mkexpr(c0), mkU8(24)), binop(Iop_Shl32, mkexpr(c1), mkU8(16))), binop(Iop_Shl32, mkexpr(c2), mkU8(8))), mkexpr(c3)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "clm";
+}
+
+HChar *
+s390_irgen_CLMY(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp b0 = newTemp(Ity_I32);
+ IRTemp b1 = newTemp(Ity_I32);
+ IRTemp b2 = newTemp(Ity_I32);
+ IRTemp b3 = newTemp(Ity_I32);
+ IRTemp c0 = newTemp(Ity_I32);
+ IRTemp c1 = newTemp(Ity_I32);
+ IRTemp c2 = newTemp(Ity_I32);
+ IRTemp c3 = newTemp(Ity_I32);
+ UChar n;
+
+ n = 0;
+ if ((r3 & 8) != 0) {
+ assign(b0, unop(Iop_8Uto32, get_gpr_b4(r1)));
+ assign(c0, unop(Iop_8Uto32, load(Ity_I8, mkexpr(op2addr))));
+ n = n + 1;
+ } else {
+ assign(b0, mkU32(0));
+ assign(c0, mkU32(0));
+ }
+ if ((r3 & 4) != 0) {
+ assign(b1, unop(Iop_8Uto32, get_gpr_b5(r1)));
+ assign(c1, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n)))));
+ n = n + 1;
+ } else {
+ assign(b1, mkU32(0));
+ assign(c1, mkU32(0));
+ }
+ if ((r3 & 2) != 0) {
+ assign(b2, unop(Iop_8Uto32, get_gpr_b6(r1)));
+ assign(c2, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n)))));
+ n = n + 1;
+ } else {
+ assign(b2, mkU32(0));
+ assign(c2, mkU32(0));
+ }
+ if ((r3 & 1) != 0) {
+ assign(b3, unop(Iop_8Uto32, get_gpr_b7(r1)));
+ assign(c3, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n)))));
+ n = n + 1;
+ } else {
+ assign(b3, mkU32(0));
+ assign(c3, mkU32(0));
+ }
+ assign(op1, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Shl32, mkexpr(b0), mkU8(24)), binop(Iop_Shl32, mkexpr(b1), mkU8(16))), binop(Iop_Shl32, mkexpr(b2), mkU8(8))), mkexpr(b3)));
+ assign(op2, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Shl32, mkexpr(c0), mkU8(24)), binop(Iop_Shl32, mkexpr(c1), mkU8(16))), binop(Iop_Shl32, mkexpr(c2), mkU8(8))), mkexpr(c3)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "clmy";
+}
+
+HChar *
+s390_irgen_CLMH(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp b0 = newTemp(Ity_I32);
+ IRTemp b1 = newTemp(Ity_I32);
+ IRTemp b2 = newTemp(Ity_I32);
+ IRTemp b3 = newTemp(Ity_I32);
+ IRTemp c0 = newTemp(Ity_I32);
+ IRTemp c1 = newTemp(Ity_I32);
+ IRTemp c2 = newTemp(Ity_I32);
+ IRTemp c3 = newTemp(Ity_I32);
+ UChar n;
+
+ n = 0;
+ if ((r3 & 8) != 0) {
+ assign(b0, unop(Iop_8Uto32, get_gpr_b0(r1)));
+ assign(c0, unop(Iop_8Uto32, load(Ity_I8, mkexpr(op2addr))));
+ n = n + 1;
+ } else {
+ assign(b0, mkU32(0));
+ assign(c0, mkU32(0));
+ }
+ if ((r3 & 4) != 0) {
+ assign(b1, unop(Iop_8Uto32, get_gpr_b1(r1)));
+ assign(c1, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n)))));
+ n = n + 1;
+ } else {
+ assign(b1, mkU32(0));
+ assign(c1, mkU32(0));
+ }
+ if ((r3 & 2) != 0) {
+ assign(b2, unop(Iop_8Uto32, get_gpr_b2(r1)));
+ assign(c2, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n)))));
+ n = n + 1;
+ } else {
+ assign(b2, mkU32(0));
+ assign(c2, mkU32(0));
+ }
+ if ((r3 & 1) != 0) {
+ assign(b3, unop(Iop_8Uto32, get_gpr_b3(r1)));
+ assign(c3, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n)))));
+ n = n + 1;
+ } else {
+ assign(b3, mkU32(0));
+ assign(c3, mkU32(0));
+ }
+ assign(op1, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Shl32, mkexpr(b0), mkU8(24)), binop(Iop_Shl32, mkexpr(b1), mkU8(16))), binop(Iop_Shl32, mkexpr(b2), mkU8(8))), mkexpr(b3)));
+ assign(op2, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Shl32, mkexpr(c0), mkU8(24)), binop(Iop_Shl32, mkexpr(c1), mkU8(16))), binop(Iop_Shl32, mkexpr(c2), mkU8(8))), mkexpr(c3)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "clmh";
+}
+
+HChar *
+s390_irgen_CLHHR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w0(r1));
+ assign(op2, get_gpr_w0(r2));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "clhhr";
+}
+
+HChar *
+s390_irgen_CLHLR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w0(r1));
+ assign(op2, get_gpr_w1(r2));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "clhlr";
+}
+
+HChar *
+s390_irgen_CLHF(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w0(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+ return "clhf";
+}
+
+HChar *
+s390_irgen_CLIH(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ UInt op2;
+
+ assign(op1, get_gpr_w0(r1));
+ op2 = i2;
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I32, mkU32(op2)));
+
+ return "clih";
+}
+
+HChar *
+s390_irgen_CPYA(UChar r1, UChar r2)
+{
+ put_ar_w0(r1, get_ar_w0(r2));
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, AR, AR), "cpya", r1, r2);
+
+ return "cpya";
+}
+
+HChar *
+s390_irgen_XR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ if (r1 == r2) {
+ assign(result, mkU32(0));
+ } else {
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, get_gpr_w1(r2));
+ assign(result, binop(Iop_Xor32, mkexpr(op1), mkexpr(op2)));
+ }
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "xr";
+}
+
+HChar *
+s390_irgen_XGR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ if (r1 == r2) {
+ assign(result, mkU64(0));
+ } else {
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, get_gpr_dw0(r2));
+ assign(result, binop(Iop_Xor64, mkexpr(op1), mkexpr(op2)));
+ }
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "xgr";
+}
+
+HChar *
+s390_irgen_XRK(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ assign(op3, get_gpr_w1(r3));
+ assign(result, binop(Iop_Xor32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "xrk";
+}
+
+HChar *
+s390_irgen_XGRK(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp op3 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ assign(op3, get_gpr_dw0(r3));
+ assign(result, binop(Iop_Xor64, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "xgrk";
+}
+
+HChar *
+s390_irgen_X(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_Xor32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "x";
+}
+
+HChar *
+s390_irgen_XY(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_Xor32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "xy";
+}
+
+HChar *
+s390_irgen_XG(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ assign(result, binop(Iop_Xor64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "xg";
+}
+
+HChar *
+s390_irgen_XI(UChar i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I8);
+ UChar op2;
+ IRTemp result = newTemp(Ity_I8);
+
+ assign(op1, load(Ity_I8, mkexpr(op1addr)));
+ op2 = i2;
+ assign(result, binop(Iop_Xor8, mkexpr(op1), mkU8(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ store(mkexpr(op1addr), mkexpr(result));
+
+ return "xi";
+}
+
+HChar *
+s390_irgen_XIY(UChar i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I8);
+ UChar op2;
+ IRTemp result = newTemp(Ity_I8);
+
+ assign(op1, load(Ity_I8, mkexpr(op1addr)));
+ op2 = i2;
+ assign(result, binop(Iop_Xor8, mkexpr(op1), mkU8(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ store(mkexpr(op1addr), mkexpr(result));
+
+ return "xiy";
+}
+
+HChar *
+s390_irgen_XIHF(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ UInt op2;
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w0(r1));
+ op2 = i2;
+ assign(result, binop(Iop_Xor32, mkexpr(op1), mkU32(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w0(r1, mkexpr(result));
+
+ return "xihf";
+}
+
+HChar *
+s390_irgen_XILF(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ UInt op2;
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ op2 = i2;
+ assign(result, binop(Iop_Xor32, mkexpr(op1), mkU32(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "xilf";
+}
+
+HChar *
+s390_irgen_EAR(UChar r1, UChar r2)
+{
+ put_gpr_w1(r1, get_ar_w0(r2));
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, GPR, AR), "ear", r1, r2);
+
+ return "ear";
+}
+
+HChar *
+s390_irgen_IC(UChar r1, IRTemp op2addr)
+{
+ put_gpr_b7(r1, load(Ity_I8, mkexpr(op2addr)));
+
+ return "ic";
+}
+
+HChar *
+s390_irgen_ICY(UChar r1, IRTemp op2addr)
+{
+ put_gpr_b7(r1, load(Ity_I8, mkexpr(op2addr)));
+
+ return "icy";
+}
+
+HChar *
+s390_irgen_ICM(UChar r1, UChar r3, IRTemp op2addr)
+{
+ UChar n;
+ IRTemp result = newTemp(Ity_I32);
+ UInt mask;
+
+ n = 0;
+ mask = (UInt)r3;
+ if ((mask & 8) != 0) {
+ put_gpr_b4(r1, load(Ity_I8, mkexpr(op2addr)));
+ n = n + 1;
+ }
+ if ((mask & 4) != 0) {
+ put_gpr_b5(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+ n = n + 1;
+ }
+ if ((mask & 2) != 0) {
+ put_gpr_b6(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+ n = n + 1;
+ }
+ if ((mask & 1) != 0) {
+ put_gpr_b7(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+ n = n + 1;
+ }
+ assign(result, get_gpr_w1(r1));
+ s390_cc_thunk_putZZ(S390_CC_OP_INSERT_CHAR_MASK_32, result, mktemp(Ity_I32, mkU32(mask)));
+
+ return "icm";
+}
+
+HChar *
+s390_irgen_ICMY(UChar r1, UChar r3, IRTemp op2addr)
+{
+ UChar n;
+ IRTemp result = newTemp(Ity_I32);
+ UInt mask;
+
+ n = 0;
+ mask = (UInt)r3;
+ if ((mask & 8) != 0) {
+ put_gpr_b4(r1, load(Ity_I8, mkexpr(op2addr)));
+ n = n + 1;
+ }
+ if ((mask & 4) != 0) {
+ put_gpr_b5(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+ n = n + 1;
+ }
+ if ((mask & 2) != 0) {
+ put_gpr_b6(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+ n = n + 1;
+ }
+ if ((mask & 1) != 0) {
+ put_gpr_b7(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+ n = n + 1;
+ }
+ assign(result, get_gpr_w1(r1));
+ s390_cc_thunk_putZZ(S390_CC_OP_INSERT_CHAR_MASK_32, result, mktemp(Ity_I32, mkU32(mask)));
+
+ return "icmy";
+}
+
+HChar *
+s390_irgen_ICMH(UChar r1, UChar r3, IRTemp op2addr)
+{
+ UChar n;
+ IRTemp result = newTemp(Ity_I32);
+ UInt mask;
+
+ n = 0;
+ mask = (UInt)r3;
+ if ((mask & 8) != 0) {
+ put_gpr_b0(r1, load(Ity_I8, mkexpr(op2addr)));
+ n = n + 1;
+ }
+ if ((mask & 4) != 0) {
+ put_gpr_b1(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+ n = n + 1;
+ }
+ if ((mask & 2) != 0) {
+ put_gpr_b2(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+ n = n + 1;
+ }
+ if ((mask & 1) != 0) {
+ put_gpr_b3(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+ n = n + 1;
+ }
+ assign(result, get_gpr_w0(r1));
+ s390_cc_thunk_putZZ(S390_CC_OP_INSERT_CHAR_MASK_32, result, mktemp(Ity_I32, mkU32(mask)));
+
+ return "icmh";
+}
+
+HChar *
+s390_irgen_IIHF(UChar r1, UInt i2)
+{
+ put_gpr_w0(r1, mkU32(i2));
+
+ return "iihf";
+}
+
+HChar *
+s390_irgen_IIHH(UChar r1, UShort i2)
+{
+ put_gpr_hw0(r1, mkU16(i2));
+
+ return "iihh";
+}
+
+HChar *
+s390_irgen_IIHL(UChar r1, UShort i2)
+{
+ put_gpr_hw1(r1, mkU16(i2));
+
+ return "iihl";
+}
+
+HChar *
+s390_irgen_IILF(UChar r1, UInt i2)
+{
+ put_gpr_w1(r1, mkU32(i2));
+
+ return "iilf";
+}
+
+HChar *
+s390_irgen_IILH(UChar r1, UShort i2)
+{
+ put_gpr_hw2(r1, mkU16(i2));
+
+ return "iilh";
+}
+
+HChar *
+s390_irgen_IILL(UChar r1, UShort i2)
+{
+ put_gpr_hw3(r1, mkU16(i2));
+
+ return "iill";
+}
+
+HChar *
+s390_irgen_LR(UChar r1, UChar r2)
+{
+ put_gpr_w1(r1, get_gpr_w1(r2));
+
+ return "lr";
+}
+
+HChar *
+s390_irgen_LGR(UChar r1, UChar r2)
+{
+ put_gpr_dw0(r1, get_gpr_dw0(r2));
+
+ return "lgr";
+}
+
+HChar *
+s390_irgen_LGFR(UChar r1, UChar r2)
+{
+ put_gpr_dw0(r1, unop(Iop_32Sto64, get_gpr_w1(r2)));
+
+ return "lgfr";
+}
+
+HChar *
+s390_irgen_L(UChar r1, IRTemp op2addr)
+{
+ put_gpr_w1(r1, load(Ity_I32, mkexpr(op2addr)));
+
+ return "l";
+}
+
+HChar *
+s390_irgen_LY(UChar r1, IRTemp op2addr)
+{
+ put_gpr_w1(r1, load(Ity_I32, mkexpr(op2addr)));
+
+ return "ly";
+}
+
+HChar *
+s390_irgen_LG(UChar r1, IRTemp op2addr)
+{
+ put_gpr_dw0(r1, load(Ity_I64, mkexpr(op2addr)));
+
+ return "lg";
+}
+
+HChar *
+s390_irgen_LGF(UChar r1, IRTemp op2addr)
+{
+ put_gpr_dw0(r1, unop(Iop_32Sto64, load(Ity_I32, mkexpr(op2addr))));
+
+ return "lgf";
+}
+
+HChar *
+s390_irgen_LGFI(UChar r1, UInt i2)
+{
+ put_gpr_dw0(r1, mkU64((ULong)(Long)(Int)i2));
+
+ return "lgfi";
+}
+
+HChar *
+s390_irgen_LRL(UChar r1, UInt i2)
+{
+ put_gpr_w1(r1, load(Ity_I32, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1))));
+
+ return "lrl";
+}
+
+HChar *
+s390_irgen_LGRL(UChar r1, UInt i2)
+{
+ put_gpr_dw0(r1, load(Ity_I64, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1))));
+
+ return "lgrl";
+}
+
+HChar *
+s390_irgen_LGFRL(UChar r1, UInt i2)
+{
+ put_gpr_dw0(r1, unop(Iop_32Sto64, load(Ity_I32, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)))));
+
+ return "lgfrl";
+}
+
+HChar *
+s390_irgen_LA(UChar r1, IRTemp op2addr)
+{
+ put_gpr_dw0(r1, mkexpr(op2addr));
+
+ return "la";
+}
+
+HChar *
+s390_irgen_LAY(UChar r1, IRTemp op2addr)
+{
+ put_gpr_dw0(r1, mkexpr(op2addr));
+
+ return "lay";
+}
+
+HChar *
+s390_irgen_LAE(UChar r1, IRTemp op2addr)
+{
+ put_gpr_dw0(r1, mkexpr(op2addr));
+
+ return "lae";
+}
+
+HChar *
+s390_irgen_LAEY(UChar r1, IRTemp op2addr)
+{
+ put_gpr_dw0(r1, mkexpr(op2addr));
+
+ return "laey";
+}
+
+HChar *
+s390_irgen_LARL(UChar r1, UInt i2)
+{
+ put_gpr_dw0(r1, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)));
+
+ return "larl";
+}
+
+HChar *
+s390_irgen_LAA(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(op3, get_gpr_w1(r3));
+ assign(result, binop(Iop_Add32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op2, op3);
+ store(mkexpr(op2addr), mkexpr(result));
+ put_gpr_w1(r1, mkexpr(op2));
+
+ return "laa";
+}
+
+HChar *
+s390_irgen_LAAG(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp op3 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ assign(op3, get_gpr_dw0(r3));
+ assign(result, binop(Iop_Add64, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op2, op3);
+ store(mkexpr(op2addr), mkexpr(result));
+ put_gpr_dw0(r1, mkexpr(op2));
+
+ return "laag";
+}
+
+HChar *
+s390_irgen_LAAL(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(op3, get_gpr_w1(r3));
+ assign(result, binop(Iop_Add32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op2, op3);
+ store(mkexpr(op2addr), mkexpr(result));
+ put_gpr_w1(r1, mkexpr(op2));
+
+ return "laal";
+}
+
+HChar *
+s390_irgen_LAALG(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp op3 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ assign(op3, get_gpr_dw0(r3));
+ assign(result, binop(Iop_Add64, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, op2, op3);
+ store(mkexpr(op2addr), mkexpr(result));
+ put_gpr_dw0(r1, mkexpr(op2));
+
+ return "laalg";
+}
+
+HChar *
+s390_irgen_LAN(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(op3, get_gpr_w1(r3));
+ assign(result, binop(Iop_And32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ store(mkexpr(op2addr), mkexpr(result));
+ put_gpr_w1(r1, mkexpr(op2));
+
+ return "lan";
+}
+
+HChar *
+s390_irgen_LANG(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp op3 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ assign(op3, get_gpr_dw0(r3));
+ assign(result, binop(Iop_And64, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ store(mkexpr(op2addr), mkexpr(result));
+ put_gpr_dw0(r1, mkexpr(op2));
+
+ return "lang";
+}
+
+HChar *
+s390_irgen_LAX(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(op3, get_gpr_w1(r3));
+ assign(result, binop(Iop_Xor32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ store(mkexpr(op2addr), mkexpr(result));
+ put_gpr_w1(r1, mkexpr(op2));
+
+ return "lax";
+}
+
+HChar *
+s390_irgen_LAXG(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp op3 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ assign(op3, get_gpr_dw0(r3));
+ assign(result, binop(Iop_Xor64, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ store(mkexpr(op2addr), mkexpr(result));
+ put_gpr_dw0(r1, mkexpr(op2));
+
+ return "laxg";
+}
+
+HChar *
+s390_irgen_LAO(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(op3, get_gpr_w1(r3));
+ assign(result, binop(Iop_Or32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ store(mkexpr(op2addr), mkexpr(result));
+ put_gpr_w1(r1, mkexpr(op2));
+
+ return "lao";
+}
+
+HChar *
+s390_irgen_LAOG(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp op3 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ assign(op3, get_gpr_dw0(r3));
+ assign(result, binop(Iop_Or64, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ store(mkexpr(op2addr), mkexpr(result));
+ put_gpr_dw0(r1, mkexpr(op2));
+
+ return "laog";
+}
+
+HChar *
+s390_irgen_LTR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ put_gpr_w1(r1, mkexpr(op2));
+ s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, op2);
+
+ return "ltr";
+}
+
+HChar *
+s390_irgen_LTGR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ put_gpr_dw0(r1, mkexpr(op2));
+ s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, op2);
+
+ return "ltgr";
+}
+
+HChar *
+s390_irgen_LTGFR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, unop(Iop_32Sto64, get_gpr_w1(r2)));
+ put_gpr_dw0(r1, mkexpr(op2));
+ s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, op2);
+
+ return "ltgfr";
+}
+
+HChar *
+s390_irgen_LT(UChar r1, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ put_gpr_w1(r1, mkexpr(op2));
+ s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, op2);
+
+ return "lt";
+}
+
+HChar *
+s390_irgen_LTG(UChar r1, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ put_gpr_dw0(r1, mkexpr(op2));
+ s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, op2);
+
+ return "ltg";
+}
+
+HChar *
+s390_irgen_LTGF(UChar r1, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, unop(Iop_32Sto64, load(Ity_I32, mkexpr(op2addr))));
+ put_gpr_dw0(r1, mkexpr(op2));
+ s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, op2);
+
+ return "ltgf";
+}
+
+HChar *
+s390_irgen_LBR(UChar r1, UChar r2)
+{
+ put_gpr_w1(r1, unop(Iop_8Sto32, get_gpr_b7(r2)));
+
+ return "lbr";
+}
+
+HChar *
+s390_irgen_LGBR(UChar r1, UChar r2)
+{
+ put_gpr_dw0(r1, unop(Iop_8Sto64, get_gpr_b7(r2)));
+
+ return "lgbr";
+}
+
+HChar *
+s390_irgen_LB(UChar r1, IRTemp op2addr)
+{
+ put_gpr_w1(r1, unop(Iop_8Sto32, load(Ity_I8, mkexpr(op2addr))));
+
+ return "lb";
+}
+
+HChar *
+s390_irgen_LGB(UChar r1, IRTemp op2addr)
+{
+ put_gpr_dw0(r1, unop(Iop_8Sto64, load(Ity_I8, mkexpr(op2addr))));
+
+ return "lgb";
+}
+
+HChar *
+s390_irgen_LBH(UChar r1, IRTemp op2addr)
+{
+ put_gpr_w0(r1, unop(Iop_8Sto32, load(Ity_I8, mkexpr(op2addr))));
+
+ return "lbh";
+}
+
+HChar *
+s390_irgen_LCR(UChar r1, UChar r2)
+{
+ Int op1;
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ op1 = 0;
+ assign(op2, get_gpr_w1(r2));
+ assign(result, binop(Iop_Sub32, mkU32((UInt)op1), mkexpr(op2)));
+ put_gpr_w1(r1, mkexpr(result));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, mktemp(Ity_I32, mkU32((UInt)op1)), op2);
+
+ return "lcr";
+}
+
+HChar *
+s390_irgen_LCGR(UChar r1, UChar r2)
+{
+ Long op1;
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ op1 = 0ULL;
+ assign(op2, get_gpr_dw0(r2));
+ assign(result, binop(Iop_Sub64, mkU64((ULong)op1), mkexpr(op2)));
+ put_gpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_64, mktemp(Ity_I64, mkU64((ULong)op1)), op2);
+
+ return "lcgr";
+}
+
+HChar *
+s390_irgen_LCGFR(UChar r1, UChar r2)
+{
+ Long op1;
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ op1 = 0ULL;
+ assign(op2, unop(Iop_32Sto64, get_gpr_w1(r2)));
+ assign(result, binop(Iop_Sub64, mkU64((ULong)op1), mkexpr(op2)));
+ put_gpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_64, mktemp(Ity_I64, mkU64((ULong)op1)), op2);
+
+ return "lcgfr";
+}
+
+HChar *
+s390_irgen_LHR(UChar r1, UChar r2)
+{
+ put_gpr_w1(r1, unop(Iop_16Sto32, get_gpr_hw3(r2)));
+
+ return "lhr";
+}
+
+HChar *
+s390_irgen_LGHR(UChar r1, UChar r2)
+{
+ put_gpr_dw0(r1, unop(Iop_16Sto64, get_gpr_hw3(r2)));
+
+ return "lghr";
+}
+
+HChar *
+s390_irgen_LH(UChar r1, IRTemp op2addr)
+{
+ put_gpr_w1(r1, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+
+ return "lh";
+}
+
+HChar *
+s390_irgen_LHY(UChar r1, IRTemp op2addr)
+{
+ put_gpr_w1(r1, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+
+ return "lhy";
+}
+
+HChar *
+s390_irgen_LGH(UChar r1, IRTemp op2addr)
+{
+ put_gpr_dw0(r1, unop(Iop_16Sto64, load(Ity_I16, mkexpr(op2addr))));
+
+ return "lgh";
+}
+
+HChar *
+s390_irgen_LHI(UChar r1, UShort i2)
+{
+ put_gpr_w1(r1, mkU32((UInt)(Int)(Short)i2));
+
+ return "lhi";
+}
+
+HChar *
+s390_irgen_LGHI(UChar r1, UShort i2)
+{
+ put_gpr_dw0(r1, mkU64((ULong)(Long)(Short)i2));
+
+ return "lghi";
+}
+
+HChar *
+s390_irgen_LHRL(UChar r1, UInt i2)
+{
+ put_gpr_w1(r1, unop(Iop_16Sto32, load(Ity_I16, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)))));
+
+ return "lhrl";
+}
+
+HChar *
+s390_irgen_LGHRL(UChar r1, UInt i2)
+{
+ put_gpr_dw0(r1, unop(Iop_16Sto64, load(Ity_I16, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)))));
+
+ return "lghrl";
+}
+
+HChar *
+s390_irgen_LHH(UChar r1, IRTemp op2addr)
+{
+ put_gpr_w0(r1, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+
+ return "lhh";
+}
+
+HChar *
+s390_irgen_LFH(UChar r1, IRTemp op2addr)
+{
+ put_gpr_w0(r1, load(Ity_I32, mkexpr(op2addr)));
+
+ return "lfh";
+}
+
+HChar *
+s390_irgen_LLGFR(UChar r1, UChar r2)
+{
+ put_gpr_dw0(r1, unop(Iop_32Uto64, get_gpr_w1(r2)));
+
+ return "llgfr";
+}
+
+HChar *
+s390_irgen_LLGF(UChar r1, IRTemp op2addr)
+{
+ put_gpr_dw0(r1, unop(Iop_32Uto64, load(Ity_I32, mkexpr(op2addr))));
+
+ return "llgf";
+}
+
+HChar *
+s390_irgen_LLGFRL(UChar r1, UInt i2)
+{
+ put_gpr_dw0(r1, unop(Iop_32Uto64, load(Ity_I32, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)))));
+
+ return "llgfrl";
+}
+
+HChar *
+s390_irgen_LLCR(UChar r1, UChar r2)
+{
+ put_gpr_w1(r1, unop(Iop_8Uto32, get_gpr_b7(r2)));
+
+ return "llcr";
+}
+
+HChar *
+s390_irgen_LLGCR(UChar r1, UChar r2)
+{
+ put_gpr_dw0(r1, unop(Iop_8Uto64, get_gpr_b7(r2)));
+
+ return "llgcr";
+}
+
+HChar *
+s390_irgen_LLC(UChar r1, IRTemp op2addr)
+{
+ put_gpr_w1(r1, unop(Iop_8Uto32, load(Ity_I8, mkexpr(op2addr))));
+
+ return "llc";
+}
+
+HChar *
+s390_irgen_LLGC(UChar r1, IRTemp op2addr)
+{
+ put_gpr_dw0(r1, unop(Iop_8Uto64, load(Ity_I8, mkexpr(op2addr))));
+
+ return "llgc";
+}
+
+HChar *
+s390_irgen_LLCH(UChar r1, IRTemp op2addr)
+{
+ put_gpr_w0(r1, unop(Iop_8Uto32, load(Ity_I8, mkexpr(op2addr))));
+
+ return "llch";
+}
+
+HChar *
+s390_irgen_LLHR(UChar r1, UChar r2)
+{
+ put_gpr_w1(r1, unop(Iop_16Uto32, get_gpr_hw3(r2)));
+
+ return "llhr";
+}
+
+HChar *
+s390_irgen_LLGHR(UChar r1, UChar r2)
+{
+ put_gpr_dw0(r1, unop(Iop_16Uto64, get_gpr_hw3(r2)));
+
+ return "llghr";
+}
+
+HChar *
+s390_irgen_LLH(UChar r1, IRTemp op2addr)
+{
+ put_gpr_w1(r1, unop(Iop_16Uto32, load(Ity_I16, mkexpr(op2addr))));
+
+ return "llh";
+}
+
+HChar *
+s390_irgen_LLGH(UChar r1, IRTemp op2addr)
+{
+ put_gpr_dw0(r1, unop(Iop_16Uto64, load(Ity_I16, mkexpr(op2addr))));
+
+ return "llgh";
+}
+
+HChar *
+s390_irgen_LLHRL(UChar r1, UInt i2)
+{
+ put_gpr_w1(r1, unop(Iop_16Uto32, load(Ity_I16, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)))));
+
+ return "llhrl";
+}
+
+HChar *
+s390_irgen_LLGHRL(UChar r1, UInt i2)
+{
+ put_gpr_dw0(r1, unop(Iop_16Uto64, load(Ity_I16, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)))));
+
+ return "llghrl";
+}
+
+HChar *
+s390_irgen_LLHH(UChar r1, IRTemp op2addr)
+{
+ put_gpr_w0(r1, unop(Iop_16Uto32, load(Ity_I16, mkexpr(op2addr))));
+
+ return "llhh";
+}
+
+HChar *
+s390_irgen_LLIHF(UChar r1, UInt i2)
+{
+ put_gpr_dw0(r1, mkU64(((ULong)i2) << 32));
+
+ return "llihf";
+}
+
+HChar *
+s390_irgen_LLIHH(UChar r1, UShort i2)
+{
+ put_gpr_dw0(r1, mkU64(((ULong)i2) << 48));
+
+ return "llihh";
+}
+
+HChar *
+s390_irgen_LLIHL(UChar r1, UShort i2)
+{
+ put_gpr_dw0(r1, mkU64(((ULong)i2) << 32));
+
+ return "llihl";
+}
+
+HChar *
+s390_irgen_LLILF(UChar r1, UInt i2)
+{
+ put_gpr_dw0(r1, mkU64(i2));
+
+ return "llilf";
+}
+
+HChar *
+s390_irgen_LLILH(UChar r1, UShort i2)
+{
+ put_gpr_dw0(r1, mkU64(((ULong)i2) << 16));
+
+ return "llilh";
+}
+
+HChar *
+s390_irgen_LLILL(UChar r1, UShort i2)
+{
+ put_gpr_dw0(r1, mkU64(i2));
+
+ return "llill";
+}
+
+HChar *
+s390_irgen_LLGTR(UChar r1, UChar r2)
+{
+ put_gpr_dw0(r1, unop(Iop_32Uto64, binop(Iop_And32, get_gpr_w1(r2), mkU32(2147483647))));
+
+ return "llgtr";
+}
+
+HChar *
+s390_irgen_LLGT(UChar r1, IRTemp op2addr)
+{
+ put_gpr_dw0(r1, unop(Iop_32Uto64, binop(Iop_And32, load(Ity_I32, mkexpr(op2addr)), mkU32(2147483647))));
+
+ return "llgt";
+}
+
+HChar *
+s390_irgen_LNR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ assign(result, mkite(binop(Iop_CmpLE32S, mkexpr(op2), mkU32(0)), mkexpr(op2), binop(Iop_Sub32, mkU32(0), mkexpr(op2))));
+ put_gpr_w1(r1, mkexpr(result));
+ s390_cc_thunk_putS(S390_CC_OP_BITWISE, result);
+
+ return "lnr";
+}
+
+HChar *
+s390_irgen_LNGR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ assign(result, mkite(binop(Iop_CmpLE64S, mkexpr(op2), mkU64(0)), mkexpr(op2), binop(Iop_Sub64, mkU64(0), mkexpr(op2))));
+ put_gpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_putS(S390_CC_OP_BITWISE, result);
+
+ return "lngr";
+}
+
+HChar *
+s390_irgen_LNGFR(UChar r1, UChar r2 __attribute__((unused)))
+{
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op2, unop(Iop_32Sto64, get_gpr_w1(r1)));
+ assign(result, mkite(binop(Iop_CmpLE64S, mkexpr(op2), mkU64(0)), mkexpr(op2), binop(Iop_Sub64, mkU64(0), mkexpr(op2))));
+ put_gpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_putS(S390_CC_OP_BITWISE, result);
+
+ return "lngfr";
+}
+
+HChar *
+s390_irgen_LPQ(UChar r1, IRTemp op2addr)
+{
+ put_gpr_dw0(r1, load(Ity_I64, mkexpr(op2addr)));
+ put_gpr_dw0(r1 + 1, load(Ity_I64, binop(Iop_Add64, mkexpr(op2addr), mkU64(8))));
+
+ return "lpq";
+}
+
+HChar *
+s390_irgen_LPR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ assign(result, mkite(binop(Iop_CmpLT32S, mkexpr(op2), mkU32(0)), binop(Iop_Sub32, mkU32(0), mkexpr(op2)), mkexpr(op2)));
+ put_gpr_w1(r1, mkexpr(result));
+ s390_cc_thunk_putS(S390_CC_OP_LOAD_POSITIVE_32, op2);
+
+ return "lpr";
+}
+
+HChar *
+s390_irgen_LPGR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ assign(result, mkite(binop(Iop_CmpLT64S, mkexpr(op2), mkU64(0)), binop(Iop_Sub64, mkU64(0), mkexpr(op2)), mkexpr(op2)));
+ put_gpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_putS(S390_CC_OP_LOAD_POSITIVE_64, op2);
+
+ return "lpgr";
+}
+
+HChar *
+s390_irgen_LPGFR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op2, unop(Iop_32Sto64, get_gpr_w1(r2)));
+ assign(result, mkite(binop(Iop_CmpLT64S, mkexpr(op2), mkU64(0)), binop(Iop_Sub64, mkU64(0), mkexpr(op2)), mkexpr(op2)));
+ put_gpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_putS(S390_CC_OP_LOAD_POSITIVE_64, op2);
+
+ return "lpgfr";
+}
+
+HChar *
+s390_irgen_LRVR(UChar r1, UChar r2)
+{
+ IRTemp b0 = newTemp(Ity_I8);
+ IRTemp b1 = newTemp(Ity_I8);
+ IRTemp b2 = newTemp(Ity_I8);
+ IRTemp b3 = newTemp(Ity_I8);
+
+ assign(b3, get_gpr_b7(r2));
+ assign(b2, get_gpr_b6(r2));
+ assign(b1, get_gpr_b5(r2));
+ assign(b0, get_gpr_b4(r2));
+ put_gpr_b4(r1, mkexpr(b3));
+ put_gpr_b5(r1, mkexpr(b2));
+ put_gpr_b6(r1, mkexpr(b1));
+ put_gpr_b7(r1, mkexpr(b0));
+
+ return "lrvr";
+}
+
+HChar *
+s390_irgen_LRVGR(UChar r1, UChar r2)
+{
+ IRTemp b0 = newTemp(Ity_I8);
+ IRTemp b1 = newTemp(Ity_I8);
+ IRTemp b2 = newTemp(Ity_I8);
+ IRTemp b3 = newTemp(Ity_I8);
+ IRTemp b4 = newTemp(Ity_I8);
+ IRTemp b5 = newTemp(Ity_I8);
+ IRTemp b6 = newTemp(Ity_I8);
+ IRTemp b7 = newTemp(Ity_I8);
+
+ assign(b7, get_gpr_b7(r2));
+ assign(b6, get_gpr_b6(r2));
+ assign(b5, get_gpr_b5(r2));
+ assign(b4, get_gpr_b4(r2));
+ assign(b3, get_gpr_b3(r2));
+ assign(b2, get_gpr_b2(r2));
+ assign(b1, get_gpr_b1(r2));
+ assign(b0, get_gpr_b0(r2));
+ put_gpr_b0(r1, mkexpr(b7));
+ put_gpr_b1(r1, mkexpr(b6));
+ put_gpr_b2(r1, mkexpr(b5));
+ put_gpr_b3(r1, mkexpr(b4));
+ put_gpr_b4(r1, mkexpr(b3));
+ put_gpr_b5(r1, mkexpr(b2));
+ put_gpr_b6(r1, mkexpr(b1));
+ put_gpr_b7(r1, mkexpr(b0));
+
+ return "lrvgr";
+}
+
+HChar *
+s390_irgen_LRVH(UChar r1, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I16);
+
+ assign(op2, load(Ity_I16, mkexpr(op2addr)));
+ put_gpr_b6(r1, unop(Iop_16to8, mkexpr(op2)));
+ put_gpr_b7(r1, unop(Iop_16HIto8, mkexpr(op2)));
+
+ return "lrvh";
+}
+
+HChar *
+s390_irgen_LRV(UChar r1, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ put_gpr_b4(r1, unop(Iop_32to8, binop(Iop_And32, mkexpr(op2), mkU32(255))));
+ put_gpr_b5(r1, unop(Iop_32to8, binop(Iop_And32, binop(Iop_Shr32, mkexpr(op2), mkU8(8)), mkU32(255))));
+ put_gpr_b6(r1, unop(Iop_32to8, binop(Iop_And32, binop(Iop_Shr32, mkexpr(op2), mkU8(16)), mkU32(255))));
+ put_gpr_b7(r1, unop(Iop_32to8, binop(Iop_And32, binop(Iop_Shr32, mkexpr(op2), mkU8(24)), mkU32(255))));
+
+ return "lrv";
+}
+
+HChar *
+s390_irgen_LRVG(UChar r1, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ put_gpr_b0(r1, unop(Iop_64to8, binop(Iop_And64, mkexpr(op2), mkU64(255))));
+ put_gpr_b1(r1, unop(Iop_64to8, binop(Iop_And64, binop(Iop_Shr64, mkexpr(op2), mkU8(8)), mkU64(255))));
+ put_gpr_b2(r1, unop(Iop_64to8, binop(Iop_And64, binop(Iop_Shr64, mkexpr(op2), mkU8(16)), mkU64(255))));
+ put_gpr_b3(r1, unop(Iop_64to8, binop(Iop_And64, binop(Iop_Shr64, mkexpr(op2), mkU8(24)), mkU64(255))));
+ put_gpr_b4(r1, unop(Iop_64to8, binop(Iop_And64, binop(Iop_Shr64, mkexpr(op2), mkU8(32)), mkU64(255))));
+ put_gpr_b5(r1, unop(Iop_64to8, binop(Iop_And64, binop(Iop_Shr64, mkexpr(op2), mkU8(40)), mkU64(255))));
+ put_gpr_b6(r1, unop(Iop_64to8, binop(Iop_And64, binop(Iop_Shr64, mkexpr(op2), mkU8(48)), mkU64(255))));
+ put_gpr_b7(r1, unop(Iop_64to8, binop(Iop_And64, binop(Iop_Shr64, mkexpr(op2), mkU8(56)), mkU64(255))));
+
+ return "lrvg";
+}
+
+HChar *
+s390_irgen_MVHHI(UShort i2, IRTemp op1addr)
+{
+ store(mkexpr(op1addr), mkU16(i2));
+
+ return "mvhhi";
+}
+
+HChar *
+s390_irgen_MVHI(UShort i2, IRTemp op1addr)
+{
+ store(mkexpr(op1addr), mkU32((UInt)(Int)(Short)i2));
+
+ return "mvhi";
+}
+
+HChar *
+s390_irgen_MVGHI(UShort i2, IRTemp op1addr)
+{
+ store(mkexpr(op1addr), mkU64((ULong)(Long)(Short)i2));
+
+ return "mvghi";
+}
+
+HChar *
+s390_irgen_MVI(UChar i2, IRTemp op1addr)
+{
+ store(mkexpr(op1addr), mkU8(i2));
+
+ return "mvi";
+}
+
+HChar *
+s390_irgen_MVIY(UChar i2, IRTemp op1addr)
+{
+ store(mkexpr(op1addr), mkU8(i2));
+
+ return "mviy";
+}
+
+HChar *
+s390_irgen_MR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_w1(r1 + 1));
+ assign(op2, get_gpr_w1(r2));
+ assign(result, binop(Iop_MullS32, mkexpr(op1), mkexpr(op2)));
+ put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+ put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+
+ return "mr";
+}
+
+HChar *
+s390_irgen_M(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_w1(r1 + 1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_MullS32, mkexpr(op1), mkexpr(op2)));
+ put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+ put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+
+ return "m";
+}
+
+HChar *
+s390_irgen_MFY(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_w1(r1 + 1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_MullS32, mkexpr(op1), mkexpr(op2)));
+ put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+ put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+
+ return "mfy";
+}
+
+HChar *
+s390_irgen_MH(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I16);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I16, mkexpr(op2addr)));
+ assign(result, binop(Iop_MullS32, mkexpr(op1), unop(Iop_16Sto32, mkexpr(op2))));
+ put_gpr_w1(r1, unop(Iop_64to32, mkexpr(result)));
+
+ return "mh";
+}
+
+HChar *
+s390_irgen_MHY(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I16);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I16, mkexpr(op2addr)));
+ assign(result, binop(Iop_MullS32, mkexpr(op1), unop(Iop_16Sto32, mkexpr(op2))));
+ put_gpr_w1(r1, unop(Iop_64to32, mkexpr(result)));
+
+ return "mhy";
+}
+
+HChar *
+s390_irgen_MHI(UChar r1, UShort i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ Short op2;
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_w1(r1));
+ op2 = (Short)i2;
+ assign(result, binop(Iop_MullS32, mkexpr(op1), unop(Iop_16Sto32, mkU16((UShort)op2))));
+ put_gpr_w1(r1, unop(Iop_64to32, mkexpr(result)));
+
+ return "mhi";
+}
+
+HChar *
+s390_irgen_MGHI(UChar r1, UShort i2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ Short op2;
+ IRTemp result = newTemp(Ity_I128);
+
+ assign(op1, get_gpr_dw0(r1));
+ op2 = (Short)i2;
+ assign(result, binop(Iop_MullS64, mkexpr(op1), unop(Iop_16Sto64, mkU16((UShort)op2))));
+ put_gpr_dw0(r1, unop(Iop_128to64, mkexpr(result)));
+
+ return "mghi";
+}
+
+HChar *
+s390_irgen_MLR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_w1(r1 + 1));
+ assign(op2, get_gpr_w1(r2));
+ assign(result, binop(Iop_MullU32, mkexpr(op1), mkexpr(op2)));
+ put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+ put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+
+ return "mlr";
+}
+
+HChar *
+s390_irgen_MLGR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I128);
+
+ assign(op1, get_gpr_dw0(r1 + 1));
+ assign(op2, get_gpr_dw0(r2));
+ assign(result, binop(Iop_MullU64, mkexpr(op1), mkexpr(op2)));
+ put_gpr_dw0(r1, unop(Iop_128HIto64, mkexpr(result)));
+ put_gpr_dw0(r1 + 1, unop(Iop_128to64, mkexpr(result)));
+
+ return "mlgr";
+}
+
+HChar *
+s390_irgen_ML(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_w1(r1 + 1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_MullU32, mkexpr(op1), mkexpr(op2)));
+ put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+ put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+
+ return "ml";
+}
+
+HChar *
+s390_irgen_MLG(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I128);
+
+ assign(op1, get_gpr_dw0(r1 + 1));
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ assign(result, binop(Iop_MullU64, mkexpr(op1), mkexpr(op2)));
+ put_gpr_dw0(r1, unop(Iop_128HIto64, mkexpr(result)));
+ put_gpr_dw0(r1 + 1, unop(Iop_128to64, mkexpr(result)));
+
+ return "mlg";
+}
+
+HChar *
+s390_irgen_MSR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, get_gpr_w1(r2));
+ assign(result, binop(Iop_MullS32, mkexpr(op1), mkexpr(op2)));
+ put_gpr_w1(r1, unop(Iop_64to32, mkexpr(result)));
+
+ return "msr";
+}
+
+HChar *
+s390_irgen_MSGR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I128);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, get_gpr_dw0(r2));
+ assign(result, binop(Iop_MullS64, mkexpr(op1), mkexpr(op2)));
+ put_gpr_dw0(r1, unop(Iop_128to64, mkexpr(result)));
+
+ return "msgr";
+}
+
+HChar *
+s390_irgen_MSGFR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I128);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, get_gpr_w1(r2));
+ assign(result, binop(Iop_MullS64, mkexpr(op1), unop(Iop_32Sto64, mkexpr(op2))));
+ put_gpr_dw0(r1, unop(Iop_128to64, mkexpr(result)));
+
+ return "msgfr";
+}
+
+HChar *
+s390_irgen_MS(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_MullS32, mkexpr(op1), mkexpr(op2)));
+ put_gpr_w1(r1, unop(Iop_64to32, mkexpr(result)));
+
+ return "ms";
+}
+
+HChar *
+s390_irgen_MSY(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_MullS32, mkexpr(op1), mkexpr(op2)));
+ put_gpr_w1(r1, unop(Iop_64to32, mkexpr(result)));
+
+ return "msy";
+}
+
+HChar *
+s390_irgen_MSG(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I128);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ assign(result, binop(Iop_MullS64, mkexpr(op1), mkexpr(op2)));
+ put_gpr_dw0(r1, unop(Iop_128to64, mkexpr(result)));
+
+ return "msg";
+}
+
+HChar *
+s390_irgen_MSGF(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I128);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_MullS64, mkexpr(op1), unop(Iop_32Sto64, mkexpr(op2))));
+ put_gpr_dw0(r1, unop(Iop_128to64, mkexpr(result)));
+
+ return "msgf";
+}
+
+HChar *
+s390_irgen_MSFI(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ Int op2;
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_w1(r1));
+ op2 = (Int)i2;
+ assign(result, binop(Iop_MullS32, mkexpr(op1), mkU32((UInt)op2)));
+ put_gpr_w1(r1, unop(Iop_64to32, mkexpr(result)));
+
+ return "msfi";
+}
+
+HChar *
+s390_irgen_MSGFI(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ Int op2;
+ IRTemp result = newTemp(Ity_I128);
+
+ assign(op1, get_gpr_dw0(r1));
+ op2 = (Int)i2;
+ assign(result, binop(Iop_MullS64, mkexpr(op1), unop(Iop_32Sto64, mkU32((UInt)op2))));
+ put_gpr_dw0(r1, unop(Iop_128to64, mkexpr(result)));
+
+ return "msgfi";
+}
+
+HChar *
+s390_irgen_OR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, get_gpr_w1(r2));
+ assign(result, binop(Iop_Or32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "or";
+}
+
+HChar *
+s390_irgen_OGR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, get_gpr_dw0(r2));
+ assign(result, binop(Iop_Or64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "ogr";
+}
+
+HChar *
+s390_irgen_ORK(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ assign(op3, get_gpr_w1(r3));
+ assign(result, binop(Iop_Or32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "ork";
+}
+
+HChar *
+s390_irgen_OGRK(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp op3 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ assign(op3, get_gpr_dw0(r3));
+ assign(result, binop(Iop_Or64, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "ogrk";
+}
+
+HChar *
+s390_irgen_O(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_Or32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "o";
+}
+
+HChar *
+s390_irgen_OY(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_Or32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "oy";
+}
+
+HChar *
+s390_irgen_OG(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ assign(result, binop(Iop_Or64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "og";
+}
+
+HChar *
+s390_irgen_OI(UChar i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I8);
+ UChar op2;
+ IRTemp result = newTemp(Ity_I8);
+
+ assign(op1, load(Ity_I8, mkexpr(op1addr)));
+ op2 = i2;
+ assign(result, binop(Iop_Or8, mkexpr(op1), mkU8(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ store(mkexpr(op1addr), mkexpr(result));
+
+ return "oi";
+}
+
+HChar *
+s390_irgen_OIY(UChar i2, IRTemp op1addr)
+{
+ IRTemp op1 = newTemp(Ity_I8);
+ UChar op2;
+ IRTemp result = newTemp(Ity_I8);
+
+ assign(op1, load(Ity_I8, mkexpr(op1addr)));
+ op2 = i2;
+ assign(result, binop(Iop_Or8, mkexpr(op1), mkU8(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ store(mkexpr(op1addr), mkexpr(result));
+
+ return "oiy";
+}
+
+HChar *
+s390_irgen_OIHF(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ UInt op2;
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w0(r1));
+ op2 = i2;
+ assign(result, binop(Iop_Or32, mkexpr(op1), mkU32(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w0(r1, mkexpr(result));
+
+ return "oihf";
+}
+
+HChar *
+s390_irgen_OIHH(UChar r1, UShort i2)
+{
+ IRTemp op1 = newTemp(Ity_I16);
+ UShort op2;
+ IRTemp result = newTemp(Ity_I16);
+
+ assign(op1, get_gpr_hw0(r1));
+ op2 = i2;
+ assign(result, binop(Iop_Or16, mkexpr(op1), mkU16(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_hw0(r1, mkexpr(result));
+
+ return "oihh";
+}
+
+HChar *
+s390_irgen_OIHL(UChar r1, UShort i2)
+{
+ IRTemp op1 = newTemp(Ity_I16);
+ UShort op2;
+ IRTemp result = newTemp(Ity_I16);
+
+ assign(op1, get_gpr_hw1(r1));
+ op2 = i2;
+ assign(result, binop(Iop_Or16, mkexpr(op1), mkU16(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_hw1(r1, mkexpr(result));
+
+ return "oihl";
+}
+
+HChar *
+s390_irgen_OILF(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ UInt op2;
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ op2 = i2;
+ assign(result, binop(Iop_Or32, mkexpr(op1), mkU32(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "oilf";
+}
+
+HChar *
+s390_irgen_OILH(UChar r1, UShort i2)
+{
+ IRTemp op1 = newTemp(Ity_I16);
+ UShort op2;
+ IRTemp result = newTemp(Ity_I16);
+
+ assign(op1, get_gpr_hw2(r1));
+ op2 = i2;
+ assign(result, binop(Iop_Or16, mkexpr(op1), mkU16(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_hw2(r1, mkexpr(result));
+
+ return "oilh";
+}
+
+HChar *
+s390_irgen_OILL(UChar r1, UShort i2)
+{
+ IRTemp op1 = newTemp(Ity_I16);
+ UShort op2;
+ IRTemp result = newTemp(Ity_I16);
+
+ assign(op1, get_gpr_hw3(r1));
+ op2 = i2;
+ assign(result, binop(Iop_Or16, mkexpr(op1), mkU16(op2)));
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+ put_gpr_hw3(r1, mkexpr(result));
+
+ return "oill";
+}
+
+HChar *
+s390_irgen_PFD(void)
+{
+
+ return "pfd";
+}
+
+HChar *
+s390_irgen_PFDRL(void)
+{
+
+ return "pfdrl";
+}
+
+HChar *
+s390_irgen_RLL(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp amount = newTemp(Ity_I64);
+ IRTemp op = newTemp(Ity_I32);
+
+ assign(amount, binop(Iop_And64, mkexpr(op2addr), mkU64(31)));
+ assign(op, get_gpr_w1(r3));
+ put_gpr_w1(r1, binop(Iop_Or32, binop(Iop_Shl32, mkexpr(op), unop(Iop_64to8, mkexpr(amount))), binop(Iop_Shr32, mkexpr(op), unop(Iop_64to8, binop(Iop_Sub64, mkU64(32), mkexpr(amount))))));
+
+ return "rll";
+}
+
+HChar *
+s390_irgen_RLLG(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp amount = newTemp(Ity_I64);
+ IRTemp op = newTemp(Ity_I64);
+
+ assign(amount, binop(Iop_And64, mkexpr(op2addr), mkU64(63)));
+ assign(op, get_gpr_dw0(r3));
+ put_gpr_dw0(r1, binop(Iop_Or64, binop(Iop_Shl64, mkexpr(op), unop(Iop_64to8, mkexpr(amount))), binop(Iop_Shr64, mkexpr(op), unop(Iop_64to8, binop(Iop_Sub64, mkU64(64), mkexpr(amount))))));
+
+ return "rllg";
+}
+
+HChar *
+s390_irgen_RNSBG(UChar r1, UChar r2, UChar i3, UChar i4, UChar i5)
+{
+ UChar from;
+ UChar to;
+ UChar rot;
+ UChar t_bit;
+ ULong mask;
+ ULong maskc;
+ IRTemp result = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ from = i3 & 63;
+ to = i4 & 63;
+ rot = i5 & 63;
+ t_bit = i3 & 128;
+ assign(op2, rot == 0 ? get_gpr_dw0(r2) : binop(Iop_Or64, binop(Iop_Shl64, get_gpr_dw0(r2), mkU8(rot)), binop(Iop_Shr64, get_gpr_dw0(r2), mkU8(64 - rot))));
+ if (from <= to) {
+ mask = ~0ULL;
+ mask = (mask >> from) & (mask << (63 - to));
+ maskc = ~mask;
+ } else {
+ maskc = ~0ULL;
+ maskc = (maskc >> (to + 1)) & (maskc << (64 - from));
+ mask = ~maskc;
+ }
+ assign(result, binop(Iop_And64, binop(Iop_And64, get_gpr_dw0(r1), mkexpr(op2)), mkU64(mask)));
+ if (t_bit == 0) {
+ put_gpr_dw0(r1, binop(Iop_Or64, binop(Iop_And64, get_gpr_dw0(r1), mkU64(maskc)), mkexpr(result)));
+ }
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+
+ return "rnsbg";
+}
+
+HChar *
+s390_irgen_RXSBG(UChar r1, UChar r2, UChar i3, UChar i4, UChar i5)
+{
+ UChar from;
+ UChar to;
+ UChar rot;
+ UChar t_bit;
+ ULong mask;
+ ULong maskc;
+ IRTemp result = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ from = i3 & 63;
+ to = i4 & 63;
+ rot = i5 & 63;
+ t_bit = i3 & 128;
+ assign(op2, rot == 0 ? get_gpr_dw0(r2) : binop(Iop_Or64, binop(Iop_Shl64, get_gpr_dw0(r2), mkU8(rot)), binop(Iop_Shr64, get_gpr_dw0(r2), mkU8(64 - rot))));
+ if (from <= to) {
+ mask = ~0ULL;
+ mask = (mask >> from) & (mask << (63 - to));
+ maskc = ~mask;
+ } else {
+ maskc = ~0ULL;
+ maskc = (maskc >> (to + 1)) & (maskc << (64 - from));
+ mask = ~maskc;
+ }
+ assign(result, binop(Iop_And64, binop(Iop_Xor64, get_gpr_dw0(r1), mkexpr(op2)), mkU64(mask)));
+ if (t_bit == 0) {
+ put_gpr_dw0(r1, binop(Iop_Or64, binop(Iop_And64, get_gpr_dw0(r1), mkU64(maskc)), mkexpr(result)));
+ }
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+
+ return "rxsbg";
+}
+
+HChar *
+s390_irgen_ROSBG(UChar r1, UChar r2, UChar i3, UChar i4, UChar i5)
+{
+ UChar from;
+ UChar to;
+ UChar rot;
+ UChar t_bit;
+ ULong mask;
+ ULong maskc;
+ IRTemp result = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+
+ from = i3 & 63;
+ to = i4 & 63;
+ rot = i5 & 63;
+ t_bit = i3 & 128;
+ assign(op2, rot == 0 ? get_gpr_dw0(r2) : binop(Iop_Or64, binop(Iop_Shl64, get_gpr_dw0(r2), mkU8(rot)), binop(Iop_Shr64, get_gpr_dw0(r2), mkU8(64 - rot))));
+ if (from <= to) {
+ mask = ~0ULL;
+ mask = (mask >> from) & (mask << (63 - to));
+ maskc = ~mask;
+ } else {
+ maskc = ~0ULL;
+ maskc = (maskc >> (to + 1)) & (maskc << (64 - from));
+ mask = ~maskc;
+ }
+ assign(result, binop(Iop_And64, binop(Iop_Or64, get_gpr_dw0(r1), mkexpr(op2)), mkU64(mask)));
+ if (t_bit == 0) {
+ put_gpr_dw0(r1, binop(Iop_Or64, binop(Iop_And64, get_gpr_dw0(r1), mkU64(maskc)), mkexpr(result)));
+ }
+ s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+
+ return "rosbg";
+}
+
+HChar *
+s390_irgen_RISBG(UChar r1, UChar r2, UChar i3, UChar i4, UChar i5)
+{
+ UChar from;
+ UChar to;
+ UChar rot;
+ UChar z_bit;
+ ULong mask;
+ ULong maskc;
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ from = i3 & 63;
+ to = i4 & 63;
+ rot = i5 & 63;
+ z_bit = i4 & 128;
+ assign(op2, rot == 0 ? get_gpr_dw0(r2) : binop(Iop_Or64, binop(Iop_Shl64, get_gpr_dw0(r2), mkU8(rot)), binop(Iop_Shr64, get_gpr_dw0(r2), mkU8(64 - rot))));
+ if (from <= to) {
+ mask = ~0ULL;
+ mask = (mask >> from) & (mask << (63 - to));
+ maskc = ~mask;
+ } else {
+ maskc = ~0ULL;
+ maskc = (maskc >> (to + 1)) & (maskc << (64 - from));
+ mask = ~maskc;
+ }
+ if (z_bit == 0) {
+ put_gpr_dw0(r1, binop(Iop_Or64, binop(Iop_And64, get_gpr_dw0(r1), mkU64(maskc)), binop(Iop_And64, mkexpr(op2), mkU64(mask))));
+ } else {
+ put_gpr_dw0(r1, binop(Iop_And64, mkexpr(op2), mkU64(mask)));
+ }
+ assign(result, get_gpr_dw0(r1));
+ s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, op2);
+
+ return "risbg";
+}
+
+HChar *
+s390_irgen_SAR(UChar r1, UChar r2)
+{
+ put_ar_w0(r1, get_gpr_w1(r2));
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC3(MNM, AR, GPR), "sar", r1, r2);
+
+ return "sar";
+}
+
+HChar *
+s390_irgen_SLDA(UChar r1, IRTemp op2addr)
+{
+ IRTemp p1 = newTemp(Ity_I64);
+ IRTemp p2 = newTemp(Ity_I64);
+ IRTemp op = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+ Long sign_mask;
+ IRTemp shift_amount = newTemp(Ity_I64);
+
+ assign(p1, unop(Iop_32Uto64, get_gpr_w1(r1)));
+ assign(p2, unop(Iop_32Uto64, get_gpr_w1(r1 + 1)));
+ assign(op, binop(Iop_Or64, binop(Iop_Shl64, mkexpr(p1), mkU8(32)), mkexpr(p2)));
+ sign_mask = 1ULL << 63;
+ assign(shift_amount, binop(Iop_And64, mkexpr(op2addr), mkU64(63)));
+ assign(result, binop(Iop_Or64, binop(Iop_And64, binop(Iop_Shl64, mkexpr(op), unop(Iop_64to8, mkexpr(shift_amount))), mkU64((ULong)(~sign_mask))), binop(Iop_And64, mkexpr(op), mkU64((ULong)sign_mask))));
+ put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+ put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+ s390_cc_thunk_putZZ(S390_CC_OP_SHIFT_LEFT_64, op, shift_amount);
+
+ return "slda";
+}
+
+HChar *
+s390_irgen_SLDL(UChar r1, IRTemp op2addr)
+{
+ IRTemp p1 = newTemp(Ity_I64);
+ IRTemp p2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(p1, unop(Iop_32Uto64, get_gpr_w1(r1)));
+ assign(p2, unop(Iop_32Uto64, get_gpr_w1(r1 + 1)));
+ assign(result, binop(Iop_Shl64, binop(Iop_Or64, binop(Iop_Shl64, mkexpr(p1), mkU8(32)), mkexpr(p2)), unop(Iop_64to8, binop(Iop_And64, mkexpr(op2addr), mkU64(63)))));
+ put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+ put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+
+ return "sldl";
+}
+
+HChar *
+s390_irgen_SLA(UChar r1, IRTemp op2addr)
+{
+ IRTemp uop = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+ UInt sign_mask;
+ IRTemp shift_amount = newTemp(Ity_I64);
+ IRTemp op = newTemp(Ity_I32);
+
+ assign(op, get_gpr_w1(r1));
+ assign(uop, get_gpr_w1(r1));
+ sign_mask = 2147483648U;
+ assign(shift_amount, binop(Iop_And64, mkexpr(op2addr), mkU64(63)));
+ assign(result, binop(Iop_Or32, binop(Iop_And32, binop(Iop_Shl32, mkexpr(uop), unop(Iop_64to8, mkexpr(shift_amount))), mkU32(~sign_mask)), binop(Iop_And32, mkexpr(uop), mkU32(sign_mask))));
+ put_gpr_w1(r1, mkexpr(result));
+ s390_cc_thunk_putZZ(S390_CC_OP_SHIFT_LEFT_32, op, shift_amount);
+
+ return "sla";
+}
+
+HChar *
+s390_irgen_SLAK(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp uop = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+ UInt sign_mask;
+ IRTemp shift_amount = newTemp(Ity_I64);
+ IRTemp op = newTemp(Ity_I32);
+
+ assign(op, get_gpr_w1(r3));
+ assign(uop, get_gpr_w1(r3));
+ sign_mask = 2147483648U;
+ assign(shift_amount, binop(Iop_And64, mkexpr(op2addr), mkU64(63)));
+ assign(result, binop(Iop_Or32, binop(Iop_And32, binop(Iop_Shl32, mkexpr(uop), unop(Iop_64to8, mkexpr(shift_amount))), mkU32(~sign_mask)), binop(Iop_And32, mkexpr(uop), mkU32(sign_mask))));
+ put_gpr_w1(r1, mkexpr(result));
+ s390_cc_thunk_putZZ(S390_CC_OP_SHIFT_LEFT_32, op, shift_amount);
+
+ return "slak";
+}
+
+HChar *
+s390_irgen_SLAG(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp uop = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+ ULong sign_mask;
+ IRTemp shift_amount = newTemp(Ity_I64);
+ IRTemp op = newTemp(Ity_I64);
+
+ assign(op, get_gpr_dw0(r3));
+ assign(uop, get_gpr_dw0(r3));
+ sign_mask = 9223372036854775808ULL;
+ assign(shift_amount, binop(Iop_And64, mkexpr(op2addr), mkU64(63)));
+ assign(result, binop(Iop_Or64, binop(Iop_And64, binop(Iop_Shl64, mkexpr(uop), unop(Iop_64to8, mkexpr(shift_amount))), mkU64(~sign_mask)), binop(Iop_And64, mkexpr(uop), mkU64(sign_mask))));
+ put_gpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_putZZ(S390_CC_OP_SHIFT_LEFT_64, op, shift_amount);
+
+ return "slag";
+}
+
+HChar *
+s390_irgen_SLL(UChar r1, IRTemp op2addr)
+{
+ put_gpr_w1(r1, binop(Iop_Shl32, get_gpr_w1(r1), unop(Iop_64to8, binop(Iop_And64, mkexpr(op2addr), mkU64(63)))));
+
+ return "sll";
+}
+
+HChar *
+s390_irgen_SLLK(UChar r1, UChar r3, IRTemp op2addr)
+{
+ put_gpr_w1(r1, binop(Iop_Shl32, get_gpr_w1(r3), unop(Iop_64to8, binop(Iop_And64, mkexpr(op2addr), mkU64(63)))));
+
+ return "sllk";
+}
+
+HChar *
+s390_irgen_SLLG(UChar r1, UChar r3, IRTemp op2addr)
+{
+ put_gpr_dw0(r1, binop(Iop_Shl64, get_gpr_dw0(r3), unop(Iop_64to8, binop(Iop_And64, mkexpr(op2addr), mkU64(63)))));
+
+ return "sllg";
+}
+
+HChar *
+s390_irgen_SRDA(UChar r1, IRTemp op2addr)
+{
+ IRTemp p1 = newTemp(Ity_I64);
+ IRTemp p2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(p1, unop(Iop_32Uto64, get_gpr_w1(r1)));
+ assign(p2, unop(Iop_32Uto64, get_gpr_w1(r1 + 1)));
+ assign(result, binop(Iop_Sar64, binop(Iop_Or64, binop(Iop_Shl64, mkexpr(p1), mkU8(32)), mkexpr(p2)), unop(Iop_64to8, binop(Iop_And64, mkexpr(op2addr), mkU64(63)))));
+ put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+ put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+ s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, result);
+
+ return "srda";
+}
+
+HChar *
+s390_irgen_SRDL(UChar r1, IRTemp op2addr)
+{
+ IRTemp p1 = newTemp(Ity_I64);
+ IRTemp p2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(p1, unop(Iop_32Uto64, get_gpr_w1(r1)));
+ assign(p2, unop(Iop_32Uto64, get_gpr_w1(r1 + 1)));
+ assign(result, binop(Iop_Shr64, binop(Iop_Or64, binop(Iop_Shl64, mkexpr(p1), mkU8(32)), mkexpr(p2)), unop(Iop_64to8, binop(Iop_And64, mkexpr(op2addr), mkU64(63)))));
+ put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+ put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+
+ return "srdl";
+}
+
+HChar *
+s390_irgen_SRA(UChar r1, IRTemp op2addr)
+{
+ IRTemp result = newTemp(Ity_I32);
+ IRTemp op = newTemp(Ity_I32);
+
+ assign(op, get_gpr_w1(r1));
+ assign(result, binop(Iop_Sar32, mkexpr(op), unop(Iop_64to8, binop(Iop_And64, mkexpr(op2addr), mkU64(63)))));
+ put_gpr_w1(r1, mkexpr(result));
+ s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, result);
+
+ return "sra";
+}
+
+HChar *
+s390_irgen_SRAK(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp result = newTemp(Ity_I32);
+ IRTemp op = newTemp(Ity_I32);
+
+ assign(op, get_gpr_w1(r3));
+ assign(result, binop(Iop_Sar32, mkexpr(op), unop(Iop_64to8, binop(Iop_And64, mkexpr(op2addr), mkU64(63)))));
+ put_gpr_w1(r1, mkexpr(result));
+ s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, result);
+
+ return "srak";
+}
+
+HChar *
+s390_irgen_SRAG(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp result = newTemp(Ity_I64);
+ IRTemp op = newTemp(Ity_I64);
+
+ assign(op, get_gpr_dw0(r3));
+ assign(result, binop(Iop_Sar64, mkexpr(op), unop(Iop_64to8, binop(Iop_And64, mkexpr(op2addr), mkU64(63)))));
+ put_gpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, result);
+
+ return "srag";
+}
+
+HChar *
+s390_irgen_SRL(UChar r1, IRTemp op2addr)
+{
+ IRTemp op = newTemp(Ity_I32);
+
+ assign(op, get_gpr_w1(r1));
+ put_gpr_w1(r1, binop(Iop_Shr32, mkexpr(op), unop(Iop_64to8, binop(Iop_And64, mkexpr(op2addr), mkU64(63)))));
+
+ return "srl";
+}
+
+HChar *
+s390_irgen_SRLK(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp op = newTemp(Ity_I32);
+
+ assign(op, get_gpr_w1(r3));
+ put_gpr_w1(r1, binop(Iop_Shr32, mkexpr(op), unop(Iop_64to8, binop(Iop_And64, mkexpr(op2addr), mkU64(63)))));
+
+ return "srlk";
+}
+
+HChar *
+s390_irgen_SRLG(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRTemp op = newTemp(Ity_I64);
+
+ assign(op, get_gpr_dw0(r3));
+ put_gpr_dw0(r1, binop(Iop_Shr64, mkexpr(op), unop(Iop_64to8, binop(Iop_And64, mkexpr(op2addr), mkU64(63)))));
+
+ return "srlg";
+}
+
+HChar *
+s390_irgen_ST(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_gpr_w1(r1));
+
+ return "st";
+}
+
+HChar *
+s390_irgen_STY(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_gpr_w1(r1));
+
+ return "sty";
+}
+
+HChar *
+s390_irgen_STG(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_gpr_dw0(r1));
+
+ return "stg";
+}
+
+HChar *
+s390_irgen_STRL(UChar r1, UInt i2)
+{
+ store(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)), get_gpr_w1(r1));
+
+ return "strl";
+}
+
+HChar *
+s390_irgen_STGRL(UChar r1, UInt i2)
+{
+ store(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)), get_gpr_dw0(r1));
+
+ return "stgrl";
+}
+
+HChar *
+s390_irgen_STC(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_gpr_b7(r1));
+
+ return "stc";
+}
+
+HChar *
+s390_irgen_STCY(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_gpr_b7(r1));
+
+ return "stcy";
+}
+
+HChar *
+s390_irgen_STCH(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_gpr_b3(r1));
+
+ return "stch";
+}
+
+HChar *
+s390_irgen_STCM(UChar r1, UChar r3, IRTemp op2addr)
+{
+ UChar mask;
+ UChar n;
+
+ mask = (UChar)r3;
+ n = 0;
+ if ((mask & 8) != 0) {
+ store(mkexpr(op2addr), get_gpr_b4(r1));
+ n = n + 1;
+ }
+ if ((mask & 4) != 0) {
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b5(r1));
+ n = n + 1;
+ }
+ if ((mask & 2) != 0) {
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b6(r1));
+ n = n + 1;
+ }
+ if ((mask & 1) != 0) {
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b7(r1));
+ }
+
+ return "stcm";
+}
+
+HChar *
+s390_irgen_STCMY(UChar r1, UChar r3, IRTemp op2addr)
+{
+ UChar mask;
+ UChar n;
+
+ mask = (UChar)r3;
+ n = 0;
+ if ((mask & 8) != 0) {
+ store(mkexpr(op2addr), get_gpr_b4(r1));
+ n = n + 1;
+ }
+ if ((mask & 4) != 0) {
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b5(r1));
+ n = n + 1;
+ }
+ if ((mask & 2) != 0) {
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b6(r1));
+ n = n + 1;
+ }
+ if ((mask & 1) != 0) {
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b7(r1));
+ }
+
+ return "stcmy";
+}
+
+HChar *
+s390_irgen_STCMH(UChar r1, UChar r3, IRTemp op2addr)
+{
+ UChar mask;
+ UChar n;
+
+ mask = (UChar)r3;
+ n = 0;
+ if ((mask & 8) != 0) {
+ store(mkexpr(op2addr), get_gpr_b0(r1));
+ n = n + 1;
+ }
+ if ((mask & 4) != 0) {
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b1(r1));
+ n = n + 1;
+ }
+ if ((mask & 2) != 0) {
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b2(r1));
+ n = n + 1;
+ }
+ if ((mask & 1) != 0) {
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b3(r1));
+ }
+
+ return "stcmh";
+}
+
+HChar *
+s390_irgen_STH(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_gpr_hw3(r1));
+
+ return "sth";
+}
+
+HChar *
+s390_irgen_STHY(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_gpr_hw3(r1));
+
+ return "sthy";
+}
+
+HChar *
+s390_irgen_STHRL(UChar r1, UInt i2)
+{
+ store(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)), get_gpr_hw3(r1));
+
+ return "sthrl";
+}
+
+HChar *
+s390_irgen_STHH(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_gpr_hw1(r1));
+
+ return "sthh";
+}
+
+HChar *
+s390_irgen_STFH(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_gpr_w0(r1));
+
+ return "stfh";
+}
+
+HChar *
+s390_irgen_STPQ(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_gpr_dw0(r1));
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(8)), get_gpr_dw0(r1 + 1));
+
+ return "stpq";
+}
+
+HChar *
+s390_irgen_STRVH(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_gpr_b7(r1));
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(1)), get_gpr_b6(r1));
+
+ return "strvh";
+}
+
+HChar *
+s390_irgen_STRV(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_gpr_b7(r1));
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(1)), get_gpr_b6(r1));
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(2)), get_gpr_b5(r1));
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(3)), get_gpr_b4(r1));
+
+ return "strv";
+}
+
+HChar *
+s390_irgen_STRVG(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_gpr_b7(r1));
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(1)), get_gpr_b6(r1));
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(2)), get_gpr_b5(r1));
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(3)), get_gpr_b4(r1));
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(4)), get_gpr_b3(r1));
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(5)), get_gpr_b2(r1));
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(6)), get_gpr_b1(r1));
+ store(binop(Iop_Add64, mkexpr(op2addr), mkU64(7)), get_gpr_b0(r1));
+
+ return "strvg";
+}
+
+HChar *
+s390_irgen_SR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, get_gpr_w1(r2));
+ assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, op1, op2);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "sr";
+}
+
+HChar *
+s390_irgen_SGR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, get_gpr_dw0(r2));
+ assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_64, op1, op2);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "sgr";
+}
+
+HChar *
+s390_irgen_SGFR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, unop(Iop_32Sto64, get_gpr_w1(r2)));
+ assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_64, op1, op2);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "sgfr";
+}
+
+HChar *
+s390_irgen_SRK(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ assign(op3, get_gpr_w1(r3));
+ assign(result, binop(Iop_Sub32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, op2, op3);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "srk";
+}
+
+HChar *
+s390_irgen_SGRK(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp op3 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ assign(op3, get_gpr_dw0(r3));
+ assign(result, binop(Iop_Sub64, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_64, op2, op3);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "sgrk";
+}
+
+HChar *
+s390_irgen_S(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, op1, op2);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "s";
+}
+
+HChar *
+s390_irgen_SY(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, op1, op2);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "sy";
+}
+
+HChar *
+s390_irgen_SG(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_64, op1, op2);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "sg";
+}
+
+HChar *
+s390_irgen_SGF(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, unop(Iop_32Sto64, load(Ity_I32, mkexpr(op2addr))));
+ assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_64, op1, op2);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "sgf";
+}
+
+HChar *
+s390_irgen_SH(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+ assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, op1, op2);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "sh";
+}
+
+HChar *
+s390_irgen_SHY(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+ assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, op1, op2);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "shy";
+}
+
+HChar *
+s390_irgen_SHHHR(UChar r3 __attribute__((unused)), UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w0(r1));
+ assign(op3, get_gpr_w0(r2));
+ assign(result, binop(Iop_Sub32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, op2, op3);
+ put_gpr_w0(r1, mkexpr(result));
+
+ return "shhhr";
+}
+
+HChar *
+s390_irgen_SHHLR(UChar r3 __attribute__((unused)), UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w0(r1));
+ assign(op3, get_gpr_w1(r2));
+ assign(result, binop(Iop_Sub32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, op2, op3);
+ put_gpr_w0(r1, mkexpr(result));
+
+ return "shhlr";
+}
+
+HChar *
+s390_irgen_SLR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, get_gpr_w1(r2));
+ assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_32, op1, op2);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "slr";
+}
+
+HChar *
+s390_irgen_SLGR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, get_gpr_dw0(r2));
+ assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_64, op1, op2);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "slgr";
+}
+
+HChar *
+s390_irgen_SLGFR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, unop(Iop_32Uto64, get_gpr_w1(r2)));
+ assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_64, op1, op2);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "slgfr";
+}
+
+HChar *
+s390_irgen_SLRK(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ assign(op3, get_gpr_w1(r3));
+ assign(result, binop(Iop_Sub32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_32, op2, op3);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "slrk";
+}
+
+HChar *
+s390_irgen_SLGRK(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp op3 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ assign(op3, get_gpr_dw0(r3));
+ assign(result, binop(Iop_Sub64, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_64, op2, op3);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "slgrk";
+}
+
+HChar *
+s390_irgen_SL(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_32, op1, op2);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "sl";
+}
+
+HChar *
+s390_irgen_SLY(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_32, op1, op2);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "sly";
+}
+
+HChar *
+s390_irgen_SLG(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_64, op1, op2);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "slg";
+}
+
+HChar *
+s390_irgen_SLGF(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, unop(Iop_32Uto64, load(Ity_I32, mkexpr(op2addr))));
+ assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_64, op1, op2);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "slgf";
+}
+
+HChar *
+s390_irgen_SLFI(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ UInt op2;
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ op2 = i2;
+ assign(result, binop(Iop_Sub32, mkexpr(op1), mkU32(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_32, op1, mktemp(Ity_I32, mkU32(op2)));
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "slfi";
+}
+
+HChar *
+s390_irgen_SLGFI(UChar r1, UInt i2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ ULong op2;
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ op2 = (ULong)i2;
+ assign(result, binop(Iop_Sub64, mkexpr(op1), mkU64(op2)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_64, op1, mktemp(Ity_I64, mkU64(op2)));
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "slgfi";
+}
+
+HChar *
+s390_irgen_SLHHHR(UChar r3 __attribute__((unused)), UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w0(r1));
+ assign(op3, get_gpr_w0(r2));
+ assign(result, binop(Iop_Sub32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_32, op2, op3);
+ put_gpr_w0(r1, mkexpr(result));
+
+ return "slhhhr";
+}
+
+HChar *
+s390_irgen_SLHHLR(UChar r3 __attribute__((unused)), UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w0(r1));
+ assign(op3, get_gpr_w1(r2));
+ assign(result, binop(Iop_Sub32, mkexpr(op2), mkexpr(op3)));
+ s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_32, op2, op3);
+ put_gpr_w0(r1, mkexpr(result));
+
+ return "slhhlr";
+}
+
+HChar *
+s390_irgen_SLBR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+ IRTemp borrow_in = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, get_gpr_w1(r2));
+ assign(borrow_in, binop(Iop_Sub32, mkU32(1), binop(Iop_Shr32, s390_call_calculate_cc(), mkU8(1))));
+ assign(result, binop(Iop_Sub32, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)), mkexpr(borrow_in)));
+ s390_cc_thunk_putZZZ(S390_CC_OP_UNSIGNED_SUBB_32, op1, op2, borrow_in);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "slbr";
+}
+
+HChar *
+s390_irgen_SLBGR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+ IRTemp borrow_in = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, get_gpr_dw0(r2));
+ assign(borrow_in, unop(Iop_32Uto64, binop(Iop_Sub32, mkU32(1), binop(Iop_Shr32, s390_call_calculate_cc(), mkU8(1)))));
+ assign(result, binop(Iop_Sub64, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)), mkexpr(borrow_in)));
+ s390_cc_thunk_putZZZ(S390_CC_OP_UNSIGNED_SUBB_64, op1, op2, borrow_in);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "slbgr";
+}
+
+HChar *
+s390_irgen_SLB(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp op2 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+ IRTemp borrow_in = newTemp(Ity_I32);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+ assign(borrow_in, binop(Iop_Sub32, mkU32(1), binop(Iop_Shr32, s390_call_calculate_cc(), mkU8(1))));
+ assign(result, binop(Iop_Sub32, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)), mkexpr(borrow_in)));
+ s390_cc_thunk_putZZZ(S390_CC_OP_UNSIGNED_SUBB_32, op1, op2, borrow_in);
+ put_gpr_w1(r1, mkexpr(result));
+
+ return "slb";
+}
+
+HChar *
+s390_irgen_SLBG(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp op2 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+ IRTemp borrow_in = newTemp(Ity_I64);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+ assign(borrow_in, unop(Iop_32Uto64, binop(Iop_Sub32, mkU32(1), binop(Iop_Shr32, s390_call_calculate_cc(), mkU8(1)))));
+ assign(result, binop(Iop_Sub64, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)), mkexpr(borrow_in)));
+ s390_cc_thunk_putZZZ(S390_CC_OP_UNSIGNED_SUBB_64, op1, op2, borrow_in);
+ put_gpr_dw0(r1, mkexpr(result));
+
+ return "slbg";
+}
+
+HChar *
+s390_irgen_SVC(UChar i)
+{
+ IRTemp sysno = newTemp(Ity_I64);
+
+ if (i != 0) {
+ assign(sysno, mkU64(i));
+ } else {
+ assign(sysno, unop(Iop_32Uto64, get_gpr_w1(1)));
+ }
+ system_call(mkexpr(sysno));
+
+ return "svc";
+}
+
+HChar *
+s390_irgen_TS(IRTemp op2addr)
+{
+ IRTemp value = newTemp(Ity_I8);
+
+ assign(value, load(Ity_I8, mkexpr(op2addr)));
+ s390_cc_thunk_putZ(S390_CC_OP_TEST_AND_SET, value);
+ store(mkexpr(op2addr), mkU8(255));
+
+ return "ts";
+}
+
+HChar *
+s390_irgen_TM(UChar i2, IRTemp op1addr)
+{
+ UChar mask;
+ IRTemp value = newTemp(Ity_I8);
+
+ mask = i2;
+ assign(value, load(Ity_I8, mkexpr(op1addr)));
+ s390_cc_thunk_putZZ(S390_CC_OP_TEST_UNDER_MASK_8, value, mktemp(Ity_I8, mkU8(mask)));
+
+ return "tm";
+}
+
+HChar *
+s390_irgen_TMY(UChar i2, IRTemp op1addr)
+{
+ UChar mask;
+ IRTemp value = newTemp(Ity_I8);
+
+ mask = i2;
+ assign(value, load(Ity_I8, mkexpr(op1addr)));
+ s390_cc_thunk_putZZ(S390_CC_OP_TEST_UNDER_MASK_8, value, mktemp(Ity_I8, mkU8(mask)));
+
+ return "tmy";
+}
+
+HChar *
+s390_irgen_TMHH(UChar r1, UShort i2)
+{
+ UShort mask;
+ IRTemp value = newTemp(Ity_I16);
+
+ mask = i2;
+ assign(value, get_gpr_hw0(r1));
+ s390_cc_thunk_putZZ(S390_CC_OP_TEST_UNDER_MASK_16, value, mktemp(Ity_I16, mkU16(mask)));
+
+ return "tmhh";
+}
+
+HChar *
+s390_irgen_TMHL(UChar r1, UShort i2)
+{
+ UShort mask;
+ IRTemp value = newTemp(Ity_I16);
+
+ mask = i2;
+ assign(value, get_gpr_hw1(r1));
+ s390_cc_thunk_putZZ(S390_CC_OP_TEST_UNDER_MASK_16, value, mktemp(Ity_I16, mkU16(mask)));
+
+ return "tmhl";
+}
+
+HChar *
+s390_irgen_TMLH(UChar r1, UShort i2)
+{
+ UShort mask;
+ IRTemp value = newTemp(Ity_I16);
+
+ mask = i2;
+ assign(value, get_gpr_hw2(r1));
+ s390_cc_thunk_putZZ(S390_CC_OP_TEST_UNDER_MASK_16, value, mktemp(Ity_I16, mkU16(mask)));
+
+ return "tmlh";
+}
+
+HChar *
+s390_irgen_TMLL(UChar r1, UShort i2)
+{
+ UShort mask;
+ IRTemp value = newTemp(Ity_I16);
+
+ mask = i2;
+ assign(value, get_gpr_hw3(r1));
+ s390_cc_thunk_putZZ(S390_CC_OP_TEST_UNDER_MASK_16, value, mktemp(Ity_I16, mkU16(mask)));
+
+ return "tmll";
+}
+
+HChar *
+s390_irgen_EFPC(UChar r1)
+{
+ put_gpr_w1(r1, get_fpc_w0());
+
+ return "efpc";
+}
+
+HChar *
+s390_irgen_LER(UChar r1, UChar r2)
+{
+ put_fpr_w0(r1, get_fpr_w0(r2));
+
+ return "ler";
+}
+
+HChar *
+s390_irgen_LDR(UChar r1, UChar r2)
+{
+ put_fpr_dw0(r1, get_fpr_dw0(r2));
+
+ return "ldr";
+}
+
+HChar *
+s390_irgen_LXR(UChar r1, UChar r2)
+{
+ put_fpr_dw0(r1, get_fpr_dw0(r2));
+ put_fpr_dw0(r1 + 2, get_fpr_dw0(r2 + 2));
+
+ return "lxr";
+}
+
+HChar *
+s390_irgen_LE(UChar r1, IRTemp op2addr)
+{
+ put_fpr_w0(r1, load(Ity_F32, mkexpr(op2addr)));
+
+ return "le";
+}
+
+HChar *
+s390_irgen_LD(UChar r1, IRTemp op2addr)
+{
+ put_fpr_dw0(r1, load(Ity_F64, mkexpr(op2addr)));
+
+ return "ld";
+}
+
+HChar *
+s390_irgen_LEY(UChar r1, IRTemp op2addr)
+{
+ put_fpr_w0(r1, load(Ity_F32, mkexpr(op2addr)));
+
+ return "ley";
+}
+
+HChar *
+s390_irgen_LDY(UChar r1, IRTemp op2addr)
+{
+ put_fpr_dw0(r1, load(Ity_F64, mkexpr(op2addr)));
+
+ return "ldy";
+}
+
+HChar *
+s390_irgen_LFPC(IRTemp op2addr)
+{
+ put_fpc_w0(load(Ity_I32, mkexpr(op2addr)));
+
+ return "lfpc";
+}
+
+HChar *
+s390_irgen_LZER(UChar r1)
+{
+ put_fpr_w0(r1, mkF32i(0x0));
+
+ return "lzer";
+}
+
+HChar *
+s390_irgen_LZDR(UChar r1)
+{
+ put_fpr_dw0(r1, mkF64i(0x0));
+
+ return "lzdr";
+}
+
+HChar *
+s390_irgen_LZXR(UChar r1)
+{
+ put_fpr_dw0(r1, mkF64i(0x0));
+ put_fpr_dw0(r1 + 2, mkF64i(0x0));
+
+ return "lzxr";
+}
+
+HChar *
+s390_irgen_SRNM(IRTemp op2addr)
+{
+ UInt mask;
+
+ mask = 3;
+ put_fpc_w0(binop(Iop_Or32, binop(Iop_And32, get_fpc_w0(), mkU32(~mask)), binop(Iop_And32, unop(Iop_64to32, mkexpr(op2addr)), mkU32(mask))));
+
+ return "srnm";
+}
+
+HChar *
+s390_irgen_SFPC(UChar r1)
+{
+ put_fpc_w0(get_gpr_w1(r1));
+
+ return "sfpc";
+}
+
+HChar *
+s390_irgen_STE(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_fpr_w0(r1));
+
+ return "ste";
+}
+
+HChar *
+s390_irgen_STD(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_fpr_dw0(r1));
+
+ return "std";
+}
+
+HChar *
+s390_irgen_STEY(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_fpr_w0(r1));
+
+ return "stey";
+}
+
+HChar *
+s390_irgen_STDY(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_fpr_dw0(r1));
+
+ return "stdy";
+}
+
+HChar *
+s390_irgen_STFPC(IRTemp op2addr)
+{
+ store(mkexpr(op2addr), get_fpc_w0());
+
+ return "stfpc";
+}
+
+HChar *
+s390_irgen_AEBR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_F32);
+ IRTemp op2 = newTemp(Ity_F32);
+ IRTemp result = newTemp(Ity_F32);
+
+ assign(op1, get_fpr_w0(r1));
+ assign(op2, get_fpr_w0(r2));
+ assign(result, triop(Iop_AddF32, mkU32(Irrm_CURRENT), mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_32, result);
+ put_fpr_w0(r1, mkexpr(result));
+
+ return "aebr";
+}
+
+HChar *
+s390_irgen_ADBR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_F64);
+ IRTemp op2 = newTemp(Ity_F64);
+ IRTemp result = newTemp(Ity_F64);
+
+ assign(op1, get_fpr_dw0(r1));
+ assign(op2, get_fpr_dw0(r2));
+ assign(result, triop(Iop_AddF64, mkU32(Irrm_CURRENT), mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_64, result);
+ put_fpr_dw0(r1, mkexpr(result));
+
+ return "adbr";
+}
+
+HChar *
+s390_irgen_AEB(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_F32);
+ IRTemp op2 = newTemp(Ity_F32);
+ IRTemp result = newTemp(Ity_F32);
+
+ assign(op1, get_fpr_w0(r1));
+ assign(op2, load(Ity_F32, mkexpr(op2addr)));
+ assign(result, triop(Iop_AddF32, mkU32(Irrm_CURRENT), mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_32, result);
+ put_fpr_w0(r1, mkexpr(result));
+
+ return "aeb";
+}
+
+HChar *
+s390_irgen_ADB(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_F64);
+ IRTemp op2 = newTemp(Ity_F64);
+ IRTemp result = newTemp(Ity_F64);
+
+ assign(op1, get_fpr_dw0(r1));
+ assign(op2, load(Ity_F64, mkexpr(op2addr)));
+ assign(result, triop(Iop_AddF64, mkU32(Irrm_CURRENT), mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_64, result);
+ put_fpr_dw0(r1, mkexpr(result));
+
+ return "adb";
+}
+
+HChar *
+s390_irgen_CEFBR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ put_fpr_w0(r1, binop(Iop_I32StoF32, mkU32(Irrm_CURRENT), mkexpr(op2)));
+
+ return "cefbr";
+}
+
+HChar *
+s390_irgen_CDFBR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ put_fpr_dw0(r1, unop(Iop_I32StoF64, mkexpr(op2)));
+
+ return "cdfbr";
+}
+
+HChar *
+s390_irgen_CEGBR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ put_fpr_w0(r1, binop(Iop_I64StoF32, mkU32(Irrm_CURRENT), mkexpr(op2)));
+
+ return "cegbr";
+}
+
+HChar *
+s390_irgen_CDGBR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ put_fpr_dw0(r1, binop(Iop_I64StoF64, mkU32(Irrm_CURRENT), mkexpr(op2)));
+
+ return "cdgbr";
+}
+
+HChar *
+s390_irgen_CFEBR(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op = newTemp(Ity_F32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op, get_fpr_w0(r2));
+ assign(result, binop(Iop_F32toI32S, mkU32(encode_rounding_mode(r3)), mkexpr(op)));
+ put_gpr_w1(r1, mkexpr(result));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_32_TO_INT_32, op);
+
+ return "cfebr";
+}
+
+HChar *
+s390_irgen_CFDBR(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op = newTemp(Ity_F64);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op, get_fpr_dw0(r2));
+ assign(result, binop(Iop_F64toI32S, mkU32(encode_rounding_mode(r3)), mkexpr(op)));
+ put_gpr_w1(r1, mkexpr(result));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_64_TO_INT_32, op);
+
+ return "cfdbr";
+}
+
+HChar *
+s390_irgen_CGEBR(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op = newTemp(Ity_F32);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op, get_fpr_w0(r2));
+ assign(result, binop(Iop_F32toI64S, mkU32(encode_rounding_mode(r3)), mkexpr(op)));
+ put_gpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_32_TO_INT_64, op);
+
+ return "cgebr";
+}
+
+HChar *
+s390_irgen_CGDBR(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op = newTemp(Ity_F64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op, get_fpr_dw0(r2));
+ assign(result, binop(Iop_F64toI64S, mkU32(encode_rounding_mode(r3)), mkexpr(op)));
+ put_gpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_64_TO_INT_64, op);
+
+ return "cgdbr";
+}
+
+HChar *
+s390_irgen_DEBR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_F32);
+ IRTemp op2 = newTemp(Ity_F32);
+ IRTemp result = newTemp(Ity_F32);
+
+ assign(op1, get_fpr_w0(r1));
+ assign(op2, get_fpr_w0(r2));
+ assign(result, triop(Iop_DivF32, mkU32(Irrm_CURRENT), mkexpr(op1), mkexpr(op2)));
+ put_fpr_w0(r1, mkexpr(result));
+
+ return "debr";
+}
+
+HChar *
+s390_irgen_DDBR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_F64);
+ IRTemp op2 = newTemp(Ity_F64);
+ IRTemp result = newTemp(Ity_F64);
+
+ assign(op1, get_fpr_dw0(r1));
+ assign(op2, get_fpr_dw0(r2));
+ assign(result, triop(Iop_DivF64, mkU32(Irrm_CURRENT), mkexpr(op1), mkexpr(op2)));
+ put_fpr_dw0(r1, mkexpr(result));
+
+ return "ddbr";
+}
+
+HChar *
+s390_irgen_DEB(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_F32);
+ IRTemp op2 = newTemp(Ity_F32);
+ IRTemp result = newTemp(Ity_F32);
+
+ assign(op1, get_fpr_w0(r1));
+ assign(op2, load(Ity_F32, mkexpr(op2addr)));
+ assign(result, triop(Iop_DivF32, mkU32(Irrm_CURRENT), mkexpr(op1), mkexpr(op2)));
+ put_fpr_w0(r1, mkexpr(result));
+
+ return "deb";
+}
+
+HChar *
+s390_irgen_DDB(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_F64);
+ IRTemp op2 = newTemp(Ity_F64);
+ IRTemp result = newTemp(Ity_F64);
+
+ assign(op1, get_fpr_dw0(r1));
+ assign(op2, load(Ity_F64, mkexpr(op2addr)));
+ assign(result, triop(Iop_DivF64, mkU32(Irrm_CURRENT), mkexpr(op1), mkexpr(op2)));
+ put_fpr_dw0(r1, mkexpr(result));
+
+ return "ddb";
+}
+
+HChar *
+s390_irgen_LTEBR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F32);
+
+ assign(result, get_fpr_w0(r2));
+ put_fpr_w0(r1, mkexpr(result));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_32, result);
+
+ return "ltebr";
+}
+
+HChar *
+s390_irgen_LTDBR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F64);
+
+ assign(result, get_fpr_dw0(r2));
+ put_fpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_64, result);
+
+ return "ltdbr";
+}
+
+HChar *
+s390_irgen_LCEBR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F32);
+
+ assign(result, unop(Iop_NegF32, get_fpr_w0(r2)));
+ put_fpr_w0(r1, mkexpr(result));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_32, result);
+
+ return "lcebr";
+}
+
+HChar *
+s390_irgen_LCDBR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F64);
+
+ assign(result, unop(Iop_NegF64, get_fpr_dw0(r2)));
+ put_fpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_64, result);
+
+ return "lcdbr";
+}
+
+HChar *
+s390_irgen_LDEBR(UChar r1, UChar r2)
+{
+ IRTemp op = newTemp(Ity_F32);
+
+ assign(op, get_fpr_w0(r2));
+ put_fpr_dw0(r1, unop(Iop_F32toF64, mkexpr(op)));
+
+ return "ldebr";
+}
+
+HChar *
+s390_irgen_LDEB(UChar r1, IRTemp op2addr)
+{
+ IRTemp op = newTemp(Ity_F32);
+
+ assign(op, load(Ity_F32, mkexpr(op2addr)));
+ put_fpr_dw0(r1, unop(Iop_F32toF64, mkexpr(op)));
+
+ return "ldeb";
+}
+
+HChar *
+s390_irgen_LEDBR(UChar r1, UChar r2)
+{
+ IRTemp op = newTemp(Ity_F64);
+
+ assign(op, get_fpr_dw0(r2));
+ put_fpr_w0(r1, binop(Iop_F64toF32, mkU32(Irrm_CURRENT), mkexpr(op)));
+
+ return "ledbr";
+}
+
+HChar *
+s390_irgen_MEEBR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_F32);
+ IRTemp op2 = newTemp(Ity_F32);
+ IRTemp result = newTemp(Ity_F32);
+
+ assign(op1, get_fpr_w0(r1));
+ assign(op2, get_fpr_w0(r2));
+ assign(result, triop(Iop_MulF32, mkU32(Irrm_CURRENT), mkexpr(op1), mkexpr(op2)));
+ put_fpr_w0(r1, mkexpr(result));
+
+ return "meebr";
+}
+
+HChar *
+s390_irgen_MDBR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_F64);
+ IRTemp op2 = newTemp(Ity_F64);
+ IRTemp result = newTemp(Ity_F64);
+
+ assign(op1, get_fpr_dw0(r1));
+ assign(op2, get_fpr_dw0(r2));
+ assign(result, triop(Iop_MulF64, mkU32(Irrm_CURRENT), mkexpr(op1), mkexpr(op2)));
+ put_fpr_dw0(r1, mkexpr(result));
+
+ return "mdbr";
+}
+
+HChar *
+s390_irgen_MEEB(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_F32);
+ IRTemp op2 = newTemp(Ity_F32);
+ IRTemp result = newTemp(Ity_F32);
+
+ assign(op1, get_fpr_w0(r1));
+ assign(op2, load(Ity_F32, mkexpr(op2addr)));
+ assign(result, triop(Iop_MulF32, mkU32(Irrm_CURRENT), mkexpr(op1), mkexpr(op2)));
+ put_fpr_w0(r1, mkexpr(result));
+
+ return "meeb";
+}
+
+HChar *
+s390_irgen_MDB(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_F64);
+ IRTemp op2 = newTemp(Ity_F64);
+ IRTemp result = newTemp(Ity_F64);
+
+ assign(op1, get_fpr_dw0(r1));
+ assign(op2, load(Ity_F64, mkexpr(op2addr)));
+ assign(result, triop(Iop_MulF64, mkU32(Irrm_CURRENT), mkexpr(op1), mkexpr(op2)));
+ put_fpr_dw0(r1, mkexpr(result));
+
+ return "mdb";
+}
+
+HChar *
+s390_irgen_SEBR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_F32);
+ IRTemp op2 = newTemp(Ity_F32);
+ IRTemp result = newTemp(Ity_F32);
+
+ assign(op1, get_fpr_w0(r1));
+ assign(op2, get_fpr_w0(r2));
+ assign(result, triop(Iop_SubF32, mkU32(Irrm_CURRENT), mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_32, result);
+ put_fpr_w0(r1, mkexpr(result));
+
+ return "sebr";
+}
+
+HChar *
+s390_irgen_SDBR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_F64);
+ IRTemp op2 = newTemp(Ity_F64);
+ IRTemp result = newTemp(Ity_F64);
+
+ assign(op1, get_fpr_dw0(r1));
+ assign(op2, get_fpr_dw0(r2));
+ assign(result, triop(Iop_SubF64, mkU32(Irrm_CURRENT), mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_64, result);
+ put_fpr_dw0(r1, mkexpr(result));
+
+ return "sdbr";
+}
+
+HChar *
+s390_irgen_SEB(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_F32);
+ IRTemp op2 = newTemp(Ity_F32);
+ IRTemp result = newTemp(Ity_F32);
+
+ assign(op1, get_fpr_w0(r1));
+ assign(op2, load(Ity_F32, mkexpr(op2addr)));
+ assign(result, triop(Iop_SubF32, mkU32(Irrm_CURRENT), mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_32, result);
+ put_fpr_w0(r1, mkexpr(result));
+
+ return "seb";
+}
+
+HChar *
+s390_irgen_SDB(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_F64);
+ IRTemp op2 = newTemp(Ity_F64);
+ IRTemp result = newTemp(Ity_F64);
+
+ assign(op1, get_fpr_dw0(r1));
+ assign(op2, load(Ity_F64, mkexpr(op2addr)));
+ assign(result, triop(Iop_SubF64, mkU32(Irrm_CURRENT), mkexpr(op1), mkexpr(op2)));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_64, result);
+ put_fpr_dw0(r1, mkexpr(result));
+
+ return "sdb";
+}
+
+
+
+#define OFFB_TISTART offsetof(VexGuestS390XState, guest_TISTART)
+#define OFFB_TILEN offsetof(VexGuestS390XState, guest_TILEN)
+/* Return the guest state offset of word #0 of the counter register. */
+static __inline__ UInt
+counter_w0_offset(void)
+{
+ return counter_offset() + 0;
+}
+
+/* Return the guest state offset of word #1 of the counter register. */
+static __inline__ UInt
+counter_w1_offset(void)
+{
+ return counter_offset() + 4;
+}
+
+/* Write word #0 of the counter to the guest state. */
+static __inline__ void
+put_counter_w0(IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I32);
+
+ stmt(IRStmt_Put(counter_w0_offset(), expr));
+}
+
+/* Read word #0 of the counter register. */
+static __inline__ IRExpr *
+get_counter_w0(void)
+{
+ return IRExpr_Get(counter_w0_offset(), Ity_I32);
+}
+
+/* Write word #1 of the counter to the guest state. */
+static __inline__ void
+put_counter_w1(IRExpr *expr)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I32);
+
+ stmt(IRStmt_Put(counter_w1_offset(), expr));
+}
+
+/* Read word #1 of the counter register. */
+static __inline__ IRExpr *
+get_counter_w1(void)
+{
+ return IRExpr_Get(counter_w1_offset(), Ity_I32);
+}
+
+
+
+HChar *
+s390_irgen_CLC(UChar length, IRTemp start1, IRTemp start2)
+{
+ IRTemp current1 = newTemp(Ity_I8);
+ IRTemp current2 = newTemp(Ity_I8);
+ IRTemp counter = newTemp(Ity_I64);
+
+ assign(counter, get_counter_dw0());
+ put_counter_dw0(mkU64(0));
+
+ assign(current1, load(Ity_I8, binop(Iop_Add64, mkexpr(start1),
+ mkexpr(counter))));
+ assign(current2, load(Ity_I8, binop(Iop_Add64, mkexpr(start2),
+ mkexpr(counter))));
+ s390_cc_thunk_put2(S390_CC_OP_UNSIGNED_COMPARE, current1, current2,
+ False);
+
+ /* Both fields differ ? */
+ if_condition_goto(binop(Iop_CmpNE8, mkexpr(current1), mkexpr(current2)),
+ guest_IA_next_instr);
+
+ /* Check for end of field */
+ put_counter_dw0(binop(Iop_Add64, mkexpr(counter), mkU64(1)));
+ if_condition_goto(binop(Iop_CmpNE64, mkexpr(counter), mkU64(length)),
+ guest_IA_curr_instr);
+ put_counter_dw0(mkU64(0));
+
+ return "clc";
+}
+
+HChar *
+s390_irgen_CLCLE(UChar r1, UChar r3, IRTemp pad2)
+{
+ IRTemp addr1, addr3, addr1_load, addr3_load, len1, len3, single1, single3;
+
+ addr1 = newTemp(Ity_I64);
+ addr3 = newTemp(Ity_I64);
+ addr1_load = newTemp(Ity_I64);
+ addr3_load = newTemp(Ity_I64);
+ len1 = newTemp(Ity_I64);
+ len3 = newTemp(Ity_I64);
+ single1 = newTemp(Ity_I8);
+ single3 = newTemp(Ity_I8);
+
+ assign(addr1, get_gpr_dw0(r1));
+ assign(len1, get_gpr_dw0(r1 + 1));
+ assign(addr3, get_gpr_dw0(r3));
+ assign(len3, get_gpr_dw0(r3 + 1));
+
+ /* len1 == 0 and len3 == 0? Exit */
+ s390_cc_set(0);
+ if_condition_goto(binop(Iop_CmpEQ64,binop(Iop_Or64, mkexpr(len1),
+ mkexpr(len3)), mkU64(0)),
+ guest_IA_next_instr);
+
+ /* A mux requires both ways to be possible. This is a way to prevent clcle
+ from reading from addr1 if it should read from the pad. Since the pad
+ has no address, just read from the instruction, we discard that anyway */
+ assign(addr1_load,
+ IRExpr_Mux0X(unop(Iop_1Uto8,
+ binop(Iop_CmpEQ64, mkexpr(len1), mkU64(0))),
+ mkexpr(addr1),
+ mkU64(guest_IA_curr_instr)));
+
+ /* same for addr3 */
+ assign(addr3_load,
+ IRExpr_Mux0X(unop(Iop_1Uto8,
+ binop(Iop_CmpEQ64, mkexpr(len3), mkU64(0))),
+ mkexpr(addr3),
+ mkU64(guest_IA_curr_instr)));
+
+ assign(single1,
+ IRExpr_Mux0X(unop(Iop_1Uto8,
+ binop(Iop_CmpEQ64, mkexpr(len1), mkU64(0))),
+ load(Ity_I8, mkexpr(addr1_load)),
+ unop(Iop_64to8, mkexpr(pad2))));
+
+ assign(single3,
+ IRExpr_Mux0X(unop(Iop_1Uto8,
+ binop(Iop_CmpEQ64, mkexpr(len3), mkU64(0))),
+ load(Ity_I8, mkexpr(addr3_load)),
+ unop(Iop_64to8, mkexpr(pad2))));
+
+ s390_cc_thunk_put2(S390_CC_OP_UNSIGNED_COMPARE, single1, single3, False);
+ /* Both fields differ ? */
+ if_condition_goto(binop(Iop_CmpNE8, mkexpr(single1), mkexpr(single3)),
+ guest_IA_next_instr);
+
+ /* If a length in 0 we must not change this length and the address */
+ put_gpr_dw0(r1,
+ IRExpr_Mux0X(unop(Iop_1Uto8,
+ binop(Iop_CmpEQ64, mkexpr(len1), mkU64(0))),
+ binop(Iop_Add64, mkexpr(addr1), mkU64(1)),
+ mkexpr(addr1)));
+
+ put_gpr_dw0(r1 + 1,
+ IRExpr_Mux0X(unop(Iop_1Uto8,
+ binop(Iop_CmpEQ64, mkexpr(len1), mkU64(0))),
+ binop(Iop_Sub64, mkexpr(len1), mkU64(1)),
+ mkU64(0)));
+
+ put_gpr_dw0(r3,
+ IRExpr_Mux0X(unop(Iop_1Uto8,
+ binop(Iop_CmpEQ64, mkexpr(len3), mkU64(0))),
+ binop(Iop_Add64, mkexpr(addr3), mkU64(1)),
+ mkexpr(addr3)));
+
+ put_gpr_dw0(r3 + 1,
+ IRExpr_Mux0X(unop(Iop_1Uto8,
+ binop(Iop_CmpEQ64, mkexpr(len3), mkU64(0))),
+ binop(Iop_Sub64, mkexpr(len3), mkU64(1)),
+ mkU64(0)));
+
+ /* The architecture requires that we exit with CC3 after a machine specific
+ amount of bytes. We do that if len1+len3 % 4096 == 0 */
+ s390_cc_set(3);
+ if_condition_goto(binop(Iop_CmpEQ64,
+ binop(Iop_And64,
+ binop(Iop_Add64, mkexpr(len1), mkexpr(len3)),
+ mkU64(0xfff)),
+ mkU64(0)),
+ guest_IA_next_instr);
+
+ always_goto(mkU64(guest_IA_curr_instr));
+
+ return "clcle";
+}
+static void
+s390_irgen_XC_EX(IRTemp length, IRTemp start1, IRTemp start2)
+{
+ IRTemp old1 = newTemp(Ity_I8);
+ IRTemp old2 = newTemp(Ity_I8);
+ IRTemp new1 = newTemp(Ity_I8);
+ IRTemp counter = newTemp(Ity_I32);
+ IRTemp addr1 = newTemp(Ity_I64);
+
+ assign(counter, get_counter_w0());
+
+ assign(addr1, binop(Iop_Add64, mkexpr(start1),
+ unop(Iop_32Uto64, mkexpr(counter))));
+
+ assign(old1, load(Ity_I8, mkexpr(addr1)));
+ assign(old2, load(Ity_I8, binop(Iop_Add64, mkexpr(start2),
+ unop(Iop_32Uto64,mkexpr(counter)))));
+ assign(new1, binop(Iop_Xor8, mkexpr(old1), mkexpr(old2)));
+
+ store(mkexpr(addr1),
+ IRExpr_Mux0X(unop(Iop_1Uto8, binop(Iop_CmpEQ64, mkexpr(start1),
+ mkexpr(start2))),
+ mkexpr(new1), mkU8(0)));
+ put_counter_w1(binop(Iop_Or32, unop(Iop_8Uto32, mkexpr(new1)),
+ get_counter_w1()));
+
+ /* Check for end of field */
+ put_counter_w0(binop(Iop_Add32, mkexpr(counter), mkU32(1)));
+ if_condition_goto(binop(Iop_CmpNE32, mkexpr(counter), mkexpr(length)),
+ guest_IA_curr_instr);
+ s390_cc_thunk_put1(S390_CC_OP_BITWISE, mktemp(Ity_I32, get_counter_w1()),
+ False);
+ put_counter_dw0(mkU64(0));
+}
+
+
+static void
+s390_irgen_CLC_EX(IRTemp length, IRTemp start1, IRTemp start2)
+{
+ IRTemp current1 = newTemp(Ity_I8);
+ IRTemp current2 = newTemp(Ity_I8);
+ IRTemp counter = newTemp(Ity_I64);
+
+ assign(counter, get_counter_dw0());
+ put_counter_dw0(mkU64(0));
+
+ assign(current1, load(Ity_I8, binop(Iop_Add64, mkexpr(start1),
+ mkexpr(counter))));
+ assign(current2, load(Ity_I8, binop(Iop_Add64, mkexpr(start2),
+ mkexpr(counter))));
+ s390_cc_thunk_put2(S390_CC_OP_UNSIGNED_COMPARE, current1, current2,
+ False);
+
+ /* Both fields differ ? */
+ if_condition_goto(binop(Iop_CmpNE8, mkexpr(current1), mkexpr(current2)),
+ guest_IA_next_instr);
+
+ /* Check for end of field */
+ put_counter_dw0(binop(Iop_Add64, mkexpr(counter), mkU64(1)));
+ if_condition_goto(binop(Iop_CmpNE64, mkexpr(counter), mkexpr(length)),
+ guest_IA_curr_instr);
+ put_counter_dw0(mkU64(0));
+}
+
+static void
+s390_irgen_MVC_EX(IRTemp length, IRTemp start1, IRTemp start2)
+{
+ IRTemp counter = newTemp(Ity_I64);
+
+ assign(counter, get_counter_dw0());
+
+ store(binop(Iop_Add64, mkexpr(start1), mkexpr(counter)),
+ load(Ity_I8, binop(Iop_Add64, mkexpr(start2), mkexpr(counter))));
+
+ /* Check for end of field */
+ put_counter_dw0(binop(Iop_Add64, mkexpr(counter), mkU64(1)));
+ if_condition_goto(binop(Iop_CmpNE64, mkexpr(counter), mkexpr(length)),
+ guest_IA_curr_instr);
+ put_counter_dw0(mkU64(0));
+}
+
+
+
+static void
+s390_irgen_EX_SS(UChar r, IRTemp addr2,
+void (*irgen)(IRTemp length, IRTemp start1, IRTemp start2), int lensize)
+{
+ struct SS {
+ unsigned int op : 8;
+ unsigned int l : 8;
+ unsigned int b1 : 4;
+ unsigned int d1 : 12;
+ unsigned int b2 : 4;
+ unsigned int d2 : 12;
+ };
+ union {
+ struct SS dec;
+ unsigned long bytes;
+ } ss;
+ IRTemp cond;
+ IRDirty *d;
+ IRTemp torun;
+
+ IRTemp start1 = newTemp(Ity_I64);
+ IRTemp start2 = newTemp(Ity_I64);
+ IRTemp len = newTemp(lensize == 64 ? Ity_I64 : Ity_I32);
+ cond = newTemp(Ity_I1);
+ torun = newTemp(Ity_I64);
+
+ assign(torun, load(Ity_I64, mkexpr(addr2)));
+ /* Start with a check that the saved code is still correct */
+ assign(cond, binop(Iop_CmpNE64, mkexpr(torun), mkU64(last_execute_target)));
+ /* If not, save the new value */
+ d = unsafeIRDirty_0_N (0, "s390x_dirtyhelper_EX", &s390x_dirtyhelper_EX,
+ mkIRExprVec_1(mkexpr(torun)));
+ d->guard = mkexpr(cond);
+ stmt(IRStmt_Dirty(d));
+
+ /* and restart */
+ stmt(IRStmt_Put(OFFB_TISTART, mkU64(guest_IA_curr_instr)));
+ stmt(IRStmt_Put(OFFB_TILEN, mkU64(4)));
+ stmt(IRStmt_Exit(mkexpr(cond), Ijk_TInval, IRConst_U64(guest_IA_curr_instr)));
+
+ ss.bytes = last_execute_target;
+ assign(start1, binop(Iop_Add64, mkU64(ss.dec.d1), ss.dec.b1 != 0 ? get_gpr_dw0(ss.dec.b1) : mkU64(0)));
+ assign(start2, binop(Iop_Add64, mkU64(ss.dec.d2), ss.dec.b2 != 0 ? get_gpr_dw0(ss.dec.b2) : mkU64(0)));
+ assign(len, unop(lensize == 64 ? Iop_8Uto64 : Iop_8Uto32, binop(Iop_Or8, r != 0 ? get_gpr_b7(r): mkU8(0), mkU8(ss.dec.l))));
+ irgen(len, start1, start2);
+ last_execute_target = 0;
+}
+
+HChar *
+s390_irgen_EX(UChar r1, IRTemp addr2)
+{
+ switch(last_execute_target & 0xff00000000000000ULL) {
+ case 0:
+ {
+ /* no code information yet */
+ IRDirty *d;
+
+ /* so safe the code... */
+ d = unsafeIRDirty_0_N (0, "s390x_dirtyhelper_EX", &s390x_dirtyhelper_EX,
+ mkIRExprVec_1(load(Ity_I64, mkexpr(addr2))));
+ stmt(IRStmt_Dirty(d));
+ /* and restart */
+ stmt(IRStmt_Put(OFFB_TISTART, mkU64(guest_IA_curr_instr)));
+ stmt(IRStmt_Put(OFFB_TILEN, mkU64(4)));
+ stmt(IRStmt_Exit(IRExpr_Const(IRConst_U1(True)), Ijk_TInval, IRConst_U64(guest_IA_curr_instr)));
+ /* we know that this will be invalidated */
+ irsb->next = mkU64(guest_IA_next_instr);
+ s390_dis_res->whatNext = Dis_StopHere;
+ break;
+ }
+
+ case 0xd200000000000000ULL:
+ /* special case MVC */
+ s390_irgen_EX_SS(r1, addr2, s390_irgen_MVC_EX, 64);
+ return "mvc via ex";
+
+ case 0xd500000000000000ULL:
+ /* special case CLC */
+ s390_irgen_EX_SS(r1, addr2, s390_irgen_CLC_EX, 64);
+ return "clc via ex";
+
+ case 0xd700000000000000ULL:
+ /* special case XC */
+ s390_irgen_EX_SS(r1, addr2, s390_irgen_XC_EX, 32);
+ return "xc via ex";
+
+
+ default:
+ {
+ /* everything else will get a self checking prefix that also checks the
+ register content */
+ IRDirty *d;
+ UChar *bytes;
+ IRTemp cond;
+ IRTemp orperand;
+ IRTemp torun;
+
+ cond = newTemp(Ity_I1);
+ orperand = newTemp(Ity_I64);
+ torun = newTemp(Ity_I64);
+
+ if (r1 == 0)
+ assign(orperand, mkU64(0));
+ else
+ assign(orperand, unop(Iop_8Uto64,get_gpr_b7(r1)));
+ /* This code is going to be translated */
+ assign(torun, binop(Iop_Or64, load(Ity_I64, mkexpr(addr2)), binop(Iop_Shl64, mkexpr(orperand), mkU8(48))));
+
+ /* Start with a check that saved code is still correct */
+ assign(cond, binop(Iop_CmpNE64, mkexpr(torun), mkU64(last_execute_target)));
+ /* If not, save the new value */
+ d = unsafeIRDirty_0_N (0, "s390x_dirtyhelper_EX", &s390x_dirtyhelper_EX,
+ mkIRExprVec_1(mkexpr(torun)));
+ d->guard = mkexpr(cond);
+ stmt(IRStmt_Dirty(d));
+
+ /* and restart */
+ stmt(IRStmt_Put(OFFB_TISTART, mkU64(guest_IA_curr_instr)));
+ stmt(IRStmt_Put(OFFB_TILEN, mkU64(4)));
+ stmt(IRStmt_Exit(mkexpr(cond), Ijk_TInval, IRConst_U64(guest_IA_curr_instr)));
+
+ /* Now comes the actual translation */
+ bytes = (UChar *) &last_execute_target;
+ s390_decode_and_irgen(bytes, ((((bytes[0] >> 6) + 1) >> 1) + 1) << 1, s390_dis_res);
+ if (unlikely(vex_traceflags & VEX_TRACE_FE))
+ vex_printf(" which was executed by\n");
+ /* dont make useless translations in the next execute */
+ last_execute_target = 0;
+ }
+ }
+ return "ex";
+}
+
+HChar *
+s390_irgen_EXRL(UChar r1, UInt offset)
+{
+ IRTemp addr = newTemp(Ity_I64);
+ /* we might save one round trip because we know the target */
+ if (!last_execute_target)
+ last_execute_target = *(ULong *)(HWord) (guest_IA_curr_instr + offset * 2UL);
+ assign(addr, mkU64(guest_IA_curr_instr + offset * 2UL));
+ s390_irgen_EX(r1, addr);
+ return "exrl";
+}
+
+HChar *
+s390_irgen_IPM(UChar r1)
+{
+ // As long as we dont support SPM, lets just assume 0 as program mask
+ put_gpr_b4(r1, unop(Iop_32to8, binop(Iop_Or32, mkU32(0 /* program mask */),
+ binop(Iop_Shl32, s390_call_calculate_cc(), mkU8(4)))));
+
+ return "ipm";
+}
+
+
+HChar *
+s390_irgen_SRST(UChar r1, UChar r2)
+{
+ IRTemp address = newTemp(Ity_I64);
+ IRTemp next = newTemp(Ity_I64);
+ IRTemp delim = newTemp(Ity_I8);
+ IRTemp counter = newTemp(Ity_I64);
+ IRTemp byte = newTemp(Ity_I8);
+
+ assign(address, get_gpr_dw0(r2));
+ assign(next, get_gpr_dw0(r1));
+
+ assign(counter, get_counter_dw0());
+ put_counter_dw0(mkU64(0));
+
+ // start = next? CC=2 and out r1 and r2 unchanged
+ s390_cc_set(2);
+ put_gpr_dw0(r2, binop(Iop_Sub64, mkexpr(address), mkexpr(counter)));
+ if_condition_goto(binop(Iop_CmpEQ64, mkexpr(address), mkexpr(next)),
+ guest_IA_next_instr);
+
+ assign(byte, load(Ity_I8, mkexpr(address)));
+ assign(delim, get_gpr_b7(0));
+
+ // byte = delim? CC=1, R1=address
+ s390_cc_set(1);
+ put_gpr_dw0(r1, mkexpr(address));
+ if_condition_goto(binop(Iop_CmpEQ8, mkexpr(delim), mkexpr(byte)),
+ guest_IA_next_instr);
+
+ // else: all equal, no end yet, loop
+ put_counter_dw0(binop(Iop_Add64, mkexpr(counter), mkU64(1)));
+ put_gpr_dw0(r1, mkexpr(next));
+ put_gpr_dw0(r2, binop(Iop_Add64, mkexpr(address), mkU64(1)));
+ stmt(IRStmt_Exit(binop(Iop_CmpNE64, mkexpr(counter), mkU64(255)),
+ Ijk_Boring, IRConst_U64(guest_IA_curr_instr)));
+ // >= 256 bytes done CC=3
+ s390_cc_set(3);
+ put_counter_dw0(mkU64(0));
+
+ return "srst";
+}
+
+HChar *
+s390_irgen_CLST(UChar r1, UChar r2)
+{
+ IRTemp address1 = newTemp(Ity_I64);
+ IRTemp address2 = newTemp(Ity_I64);
+ IRTemp end = newTemp(Ity_I8);
+ IRTemp counter = newTemp(Ity_I64);
+ IRTemp byte1 = newTemp(Ity_I8);
+ IRTemp byte2 = newTemp(Ity_I8);
+
+ assign(address1, get_gpr_dw0(r1));
+ assign(address2, get_gpr_dw0(r2));
+ assign(end, get_gpr_b7(0));
+ assign(counter, get_counter_dw0());
+ put_counter_dw0(mkU64(0));
+ assign(byte1, load(Ity_I8, mkexpr(address1)));
+ assign(byte2, load(Ity_I8, mkexpr(address2)));
+
+ // end in both? all equal, reset r1 and r2 to start values
+ s390_cc_set(0);
+ put_gpr_dw0(r1, binop(Iop_Sub64, mkexpr(address1), mkexpr(counter)));
+ put_gpr_dw0(r2, binop(Iop_Sub64, mkexpr(address2), mkexpr(counter)));
+ if_condition_goto(binop(Iop_CmpEQ8, mkU8(0),
+ binop(Iop_Or8,
+ binop(Iop_Xor8, mkexpr(byte1), mkexpr(end)),
+ binop(Iop_Xor8, mkexpr(byte2), mkexpr(end)))),
+ guest_IA_next_instr);
+
+ put_gpr_dw0(r1, mkexpr(address1));
+ put_gpr_dw0(r2, mkexpr(address2));
+
+ // End found in string1
+ s390_cc_set(1);
+ if_condition_goto(binop(Iop_CmpEQ8, mkexpr(end), mkexpr(byte1)),
+ guest_IA_next_instr);
+
+ // End found in string2
+ s390_cc_set(2);
+ if_condition_goto(binop(Iop_CmpEQ8, mkexpr(end), mkexpr(byte2)),
+ guest_IA_next_instr);
+
+ // string1 < string2
+ s390_cc_set(1);
+ if_condition_goto(binop(Iop_CmpLT32U, unop(Iop_8Uto32, mkexpr(byte1)),
+ unop(Iop_8Uto32, mkexpr(byte2))),
+ guest_IA_next_instr);
+
+ // string2 < string1
+ s390_cc_set(2);
+ if_condition_goto(binop(Iop_CmpLT32U, unop(Iop_8Uto32, mkexpr(byte2)),
+ unop(Iop_8Uto32, mkexpr(byte1))),
+ guest_IA_next_instr);
+
+ // else: all equal, no end yet, loop
+ put_counter_dw0(binop(Iop_Add64, mkexpr(counter), mkU64(1)));
+ put_gpr_dw0(r1, binop(Iop_Add64, get_gpr_dw0(r1), mkU64(1)));
+ put_gpr_dw0(r2, binop(Iop_Add64, get_gpr_dw0(r2), mkU64(1)));
+ stmt(IRStmt_Exit(binop(Iop_CmpNE64, mkexpr(counter), mkU64(255)),
+ Ijk_Boring, IRConst_U64(guest_IA_curr_instr)));
+ // >= 256 bytes done CC=3
+ s390_cc_set(3);
+ put_counter_dw0(mkU64(0));
+
+ return "clst";
+}
+
+static void
+s390_irgen_load_multiple_32bit(UChar r1, UChar r3, IRTemp op2addr)
+{
+ UChar reg;
+ IRTemp addr = newTemp(Ity_I64);
+
+ assign(addr, mkexpr(op2addr));
+ reg = r1;
+ do {
+ IRTemp old = addr;
+
+ reg %= 16;
+ put_gpr_w1(reg, load(Ity_I32, mkexpr(addr)));
+ addr = newTemp(Ity_I64);
+ assign(addr, binop(Iop_Add64, mkexpr(old), mkU64(4)));
+ reg++;
+ } while (reg != (r3 + 1));
+}
+
+HChar *
+s390_irgen_LM(UChar r1, UChar r3, IRTemp op2addr)
+{
+ s390_irgen_load_multiple_32bit(r1, r3, op2addr);
+
+ return "lm";
+}
+
+HChar *
+s390_irgen_LMY(UChar r1, UChar r3, IRTemp op2addr)
+{
+ s390_irgen_load_multiple_32bit(r1, r3, op2addr);
+
+ return "lmy";
+}
+
+HChar *
+s390_irgen_LMH(UChar r1, UChar r3, IRTemp op2addr)
+{
+ UChar reg;
+ IRTemp addr = newTemp(Ity_I64);
+
+ assign(addr, mkexpr(op2addr));
+ reg = r1;
+ do {
+ IRTemp old = addr;
+
+ reg %= 16;
+ put_gpr_w0(reg, load(Ity_I32, mkexpr(addr)));
+ addr = newTemp(Ity_I64);
+ assign(addr, binop(Iop_Add64, mkexpr(old), mkU64(4)));
+ reg++;
+ } while (reg != (r3 + 1));
+
+ return "lmh";
+}
+
+HChar *
+s390_irgen_LMG(UChar r1, UChar r3, IRTemp op2addr)
+{
+ UChar reg;
+ IRTemp addr = newTemp(Ity_I64);
+
+ assign(addr, mkexpr(op2addr));
+ reg = r1;
+ do {
+ IRTemp old = addr;
+
+ reg %= 16;
+ put_gpr_dw0(reg, load(Ity_I64, mkexpr(addr)));
+ addr = newTemp(Ity_I64);
+ assign(addr, binop(Iop_Add64, mkexpr(old), mkU64(8)));
+ reg++;
+ } while (reg != (r3 + 1));
+
+ return "lmg";
+}
+
+static void
+s390_irgen_store_multiple_32bit(UChar r1, UChar r3, IRTemp op2addr)
+{
+ UChar reg;
+ IRTemp addr = newTemp(Ity_I64);
+
+ assign(addr, mkexpr(op2addr));
+ reg = r1;
+ do {
+ IRTemp old = addr;
+
+ reg %= 16;
+ store(mkexpr(addr), get_gpr_w1(reg));
+ addr = newTemp(Ity_I64);
+ assign(addr, binop(Iop_Add64, mkexpr(old), mkU64(4)));
+ reg++;
+ } while( reg != (r3 + 1));
+}
+
+HChar *
+s390_irgen_STM(UChar r1, UChar r3, IRTemp op2addr)
+{
+ s390_irgen_store_multiple_32bit(r1, r3, op2addr);
+
+ return "stm";
+}
+
+HChar *
+s390_irgen_STMY(UChar r1, UChar r3, IRTemp op2addr)
+{
+ s390_irgen_store_multiple_32bit(r1, r3, op2addr);
+
+ return "stmy";
+}
+
+HChar *
+s390_irgen_STMH(UChar r1, UChar r3, IRTemp op2addr)
+{
+ UChar reg;
+ IRTemp addr = newTemp(Ity_I64);
+
+ assign(addr, mkexpr(op2addr));
+ reg = r1;
+ do {
+ IRTemp old = addr;
+
+ reg %= 16;
+ store(mkexpr(addr), get_gpr_w0(reg));
+ addr = newTemp(Ity_I64);
+ assign(addr, binop(Iop_Add64, mkexpr(old), mkU64(4)));
+ reg++;
+ } while( reg != (r3 + 1));
+
+ return "stmh";
+}
+
+HChar *
+s390_irgen_STMG(UChar r1, UChar r3, IRTemp op2addr)
+{
+ UChar reg;
+ IRTemp addr = newTemp(Ity_I64);
+
+ assign(addr, mkexpr(op2addr));
+ reg = r1;
+ do {
+ IRTemp old = addr;
+
+ reg %= 16;
+ store(mkexpr(addr), get_gpr_dw0(reg));
+ addr = newTemp(Ity_I64);
+ assign(addr, binop(Iop_Add64, mkexpr(old), mkU64(8)));
+ reg++;
+ } while( reg != (r3 + 1));
+
+ return "stmg";
+}
+
+static void
+s390_irgen_XONC(IROp op, UChar length, IRTemp start1, IRTemp start2)
+{
+ IRTemp old1 = newTemp(Ity_I8);
+ IRTemp old2 = newTemp(Ity_I8);
+ IRTemp new1 = newTemp(Ity_I8);
+ IRTemp counter = newTemp(Ity_I32);
+ IRTemp addr1 = newTemp(Ity_I64);
+
+ assign(counter, get_counter_w0());
+
+ assign(addr1, binop(Iop_Add64, mkexpr(start1),
+ unop(Iop_32Uto64, mkexpr(counter))));
+
+ assign(old1, load(Ity_I8, mkexpr(addr1)));
+ assign(old2, load(Ity_I8, binop(Iop_Add64, mkexpr(start2),
+ unop(Iop_32Uto64,mkexpr(counter)))));
+ assign(new1, binop(op, mkexpr(old1), mkexpr(old2)));
+
+ /* Special case: xc is used to zero memory */
+ /* fixs390: we also want an instrumentation time shortcut */
+ if (op == Iop_Xor8) {
+ store(mkexpr(addr1),
+ IRExpr_Mux0X(unop(Iop_1Uto8, binop(Iop_CmpEQ64, mkexpr(start1),
+ mkexpr(start2))),
+ mkexpr(new1), mkU8(0)));
+ } else
+ store(mkexpr(addr1), mkexpr(new1));
+ put_counter_w1(binop(Iop_Or32, unop(Iop_8Uto32, mkexpr(new1)),
+ get_counter_w1()));
+
+ /* Check for end of field */
+ put_counter_w0(binop(Iop_Add32, mkexpr(counter), mkU32(1)));
+ if_condition_goto(binop(Iop_CmpNE32, mkexpr(counter), mkU32(length)),
+ guest_IA_curr_instr);
+ s390_cc_thunk_put1(S390_CC_OP_BITWISE, mktemp(Ity_I32, get_counter_w1()),
+ False);
+ put_counter_dw0(mkU64(0));
+}
+
+HChar *
+s390_irgen_XC(UChar length, IRTemp start1, IRTemp start2)
+{
+ s390_irgen_XONC(Iop_Xor8, length, start1, start2);
+
+ return "xc";
+}
+
+HChar *
+s390_irgen_NC(UChar length, IRTemp start1, IRTemp start2)
+{
+ s390_irgen_XONC(Iop_And8, length, start1, start2);
+
+ return "nc";
+}
+
+HChar *
+s390_irgen_OC(UChar length, IRTemp start1, IRTemp start2)
+{
+ s390_irgen_XONC(Iop_Or8, length, start1, start2);
+
+ return "oc";
+}
+
+
+HChar *
+s390_irgen_MVC(UChar length, IRTemp start1, IRTemp start2)
+{
+ IRTemp counter = newTemp(Ity_I64);
+
+ assign(counter, get_counter_dw0());
+
+ store(binop(Iop_Add64, mkexpr(start1), mkexpr(counter)),
+ load(Ity_I8, binop(Iop_Add64, mkexpr(start2), mkexpr(counter))));
+
+ /* Check for end of field */
+ put_counter_dw0(binop(Iop_Add64, mkexpr(counter), mkU64(1)));
+ if_condition_goto(binop(Iop_CmpNE64, mkexpr(counter), mkU64(length)),
+ guest_IA_curr_instr);
+ put_counter_dw0(mkU64(0));
+
+ return "mvc";
+}
+
+HChar *
+s390_irgen_MVCLE(UChar r1, UChar r3, IRTemp pad2)
+{
+ IRTemp addr1, addr3, addr3_load, len1, len3, single;
+
+ addr1 = newTemp(Ity_I64);
+ addr3 = newTemp(Ity_I64);
+ addr3_load = newTemp(Ity_I64);
+ len1 = newTemp(Ity_I64);
+ len3 = newTemp(Ity_I64);
+ single = newTemp(Ity_I8);
+
+ assign(addr1, get_gpr_dw0(r1));
+ assign(len1, get_gpr_dw0(r1 + 1));
+ assign(addr3, get_gpr_dw0(r3));
+ assign(len3, get_gpr_dw0(r3 + 1));
+
+ // len1 == 0 ?
+ s390_cc_thunk_put2(S390_CC_OP_UNSIGNED_COMPARE, len1, len3, False);
+ if_condition_goto(binop(Iop_CmpEQ64,mkexpr(len1), mkU64(0)),
+ guest_IA_next_instr);
+
+ /* This is a hack to prevent mvcle from reading from addr3 if it
+ should read from the pad. Since the pad has no address, just
+ read from the instruction, we discard that anyway */
+ assign(addr3_load,
+ IRExpr_Mux0X(unop(Iop_1Uto8, binop(Iop_CmpEQ64, mkexpr(len3),
+ mkU64(0))),
+ mkexpr(addr3),
+ mkU64(guest_IA_curr_instr)));
+
+ assign(single,
+ IRExpr_Mux0X(unop(Iop_1Uto8, binop(Iop_CmpEQ64, mkexpr(len3),
+ mkU64(0))),
+ load(Ity_I8, mkexpr(addr3_load)),
+ unop(Iop_64to8, mkexpr(pad2))));
+ store(mkexpr(addr1), mkexpr(single));
+
+ put_gpr_dw0(r1, binop(Iop_Add64, mkexpr(addr1), mkU64(1)));
+
+ put_gpr_dw0(r1 + 1, binop(Iop_Sub64, mkexpr(len1), mkU64(1)));
+
+ put_gpr_dw0(r3,
+ IRExpr_Mux0X(unop(Iop_1Uto8, binop(Iop_CmpEQ64, mkexpr(len3),
+ mkU64(0))),
+ binop(Iop_Add64, mkexpr(addr3), mkU64(1)),
+ mkexpr(addr3)));
+
+ put_gpr_dw0(r3 + 1,
+ IRExpr_Mux0X(unop(Iop_1Uto8, binop(Iop_CmpEQ64, mkexpr(len3),
+ mkU64(0))),
+ binop(Iop_Sub64, mkexpr(len3), mkU64(1)),
+ mkU64(0)));
+
+ /* We should set CC=3 (faked by overflow add) and leave after
+ a maximum of ~4096 bytes have been processed. This is simpler:
+ we leave whenever (len1 % 4096) == 0 */
+ s390_cc_thunk_put2(S390_CC_OP_UNSIGNED_ADD_64, mktemp(Ity_I64, mkU64(-1ULL)),
+ mktemp(Ity_I64, mkU64(-1ULL)), False);
+ if_condition_goto(binop(Iop_CmpEQ64,
+ binop(Iop_And64, mkexpr(len1), mkU64(0xfff)),
+ mkU64(0)),
+ guest_IA_next_instr);
+
+ s390_cc_thunk_put2(S390_CC_OP_UNSIGNED_COMPARE, len1, len3, False);
+ if_condition_goto(binop(Iop_CmpNE64, mkexpr(len1), mkU64(1)),
+ guest_IA_curr_instr);
+
+ return "mvcle";
+}
+
+HChar *
+s390_irgen_MVST(UChar r1, UChar r2)
+{
+ IRTemp addr1 = newTemp(Ity_I64);
+ IRTemp addr2 = newTemp(Ity_I64);
+ IRTemp end = newTemp(Ity_I8);
+ IRTemp byte = newTemp(Ity_I8);
+ IRTemp counter = newTemp(Ity_I64);
+
+ assign(addr1, get_gpr_dw0(r1));
+ assign(addr2, get_gpr_dw0(r2));
+ assign(counter, get_counter_dw0());
+ assign(end, get_gpr_b7(0));
+ assign(byte, load(Ity_I8, binop(Iop_Add64, mkexpr(addr2),mkexpr(counter))));
+ store(binop(Iop_Add64,mkexpr(addr1),mkexpr(counter)), mkexpr(byte));
+
+ // We use unlimited as cpu-determined number
+ put_counter_dw0(binop(Iop_Add64, mkexpr(counter), mkU64(1)));
+ if_condition_goto(binop(Iop_CmpNE8, mkexpr(end), mkexpr(byte)),
+ guest_IA_curr_instr);
+
+ // and always set cc=1 at the end + update r1
+ s390_cc_set(1);
+ put_gpr_dw0(r1, binop(Iop_Add64, mkexpr(addr1), mkexpr(counter)));
+ put_counter_dw0(mkU64(0));
+
+ return "mvst";
+}
+
+static void
+s390_irgen_divide_64to32(IROp op, UChar r1, IRTemp op2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op1, binop(Iop_32HLto64,
+ get_gpr_w1(r1), // high 32 bits
+ get_gpr_w1(r1 + 1))); // low 32 bits
+ assign(result, binop(op, mkexpr(op1), mkexpr(op2)));
+ put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result))); // remainder
+ put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result))); // quotient
+}
+
+static void
+s390_irgen_divide_128to64(IROp op, UChar r1, IRTemp op2)
+{
+ IRTemp op1 = newTemp(Ity_I128);
+ IRTemp result = newTemp(Ity_I128);
+
+ assign(op1, binop(Iop_64HLto128,
+ get_gpr_dw0(r1), // high 64 bits
+ get_gpr_dw0(r1 + 1))); // low 64 bits
+ assign(result, binop(op, mkexpr(op1), mkexpr(op2)));
+ put_gpr_dw0(r1, unop(Iop_128HIto64, mkexpr(result))); // remainder
+ put_gpr_dw0(r1 + 1, unop(Iop_128to64, mkexpr(result))); // quotient
+}
+
+static void
+s390_irgen_divide_64to64(IROp op, UChar r1, IRTemp op2)
+{
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I128);
+
+ assign(op1, get_gpr_dw0(r1 + 1));
+ assign(result, binop(op, mkexpr(op1), mkexpr(op2)));
+ put_gpr_dw0(r1, unop(Iop_128HIto64, mkexpr(result))); // remainder
+ put_gpr_dw0(r1 + 1, unop(Iop_128to64, mkexpr(result))); // quotient
+}
+
+HChar *
+s390_irgen_DR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+
+ s390_irgen_divide_64to32(Iop_DivModS64to32, r1, op2);
+
+ return "dr";
+}
+
+HChar *
+s390_irgen_D(UChar r1, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+
+ s390_irgen_divide_64to32(Iop_DivModS64to32, r1, op2);
+
+ return "d";
+}
+
+HChar *
+s390_irgen_DLR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+
+ s390_irgen_divide_64to32(Iop_DivModU64to32, r1, op2);
+
+ return "dr";
+}
+
+HChar *
+s390_irgen_DL(UChar r1, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op2, load(Ity_I32, mkexpr(op2addr)));
+
+ s390_irgen_divide_64to32(Iop_DivModU64to32, r1, op2);
+
+ return "dl";
+}
+
+HChar *
+s390_irgen_DLG(UChar r1, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+
+ s390_irgen_divide_128to64(Iop_DivModU128to64, r1, op2);
+
+ return "dlg";
+}
+
+HChar *
+s390_irgen_DLGR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+
+ s390_irgen_divide_128to64(Iop_DivModU128to64, r1, op2);
+
+ return "dlgr";
+}
+
+HChar *
+s390_irgen_DSGR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+
+ s390_irgen_divide_64to64(Iop_DivModS64to64, r1, op2);
+
+ return "dsgr";
+}
+
+HChar *
+s390_irgen_DSG(UChar r1, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, load(Ity_I64, mkexpr(op2addr)));
+
+ s390_irgen_divide_64to64(Iop_DivModS64to64, r1, op2);
+
+ return "dsg";
+}
+
+HChar *
+s390_irgen_DSGFR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, unop(Iop_32Sto64, get_gpr_w1(r2)));
+
+ s390_irgen_divide_64to64(Iop_DivModS64to64, r1, op2);
+
+ return "dsgfr";
+}
+
+HChar *
+s390_irgen_DSGF(UChar r1, IRTemp op2addr)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, unop(Iop_32Sto64, load(Ity_I32, mkexpr(op2addr))));
+
+ s390_irgen_divide_64to64(Iop_DivModS64to64, r1, op2);
+
+ return "dsgf";
+}
+
+static void
+s390_irgen_load_ar_multiple(UChar r1, UChar r3, IRTemp op2addr)
+{
+ UChar reg;
+ IRTemp addr = newTemp(Ity_I64);
+
+ assign(addr, mkexpr(op2addr));
+ reg = r1;
+ do {
+ IRTemp old = addr;
+
+ reg %= 16;
+ put_ar_w0(reg, load(Ity_I32, mkexpr(addr)));
+ addr = newTemp(Ity_I64);
+ assign(addr, binop(Iop_Add64, mkexpr(old), mkU64(4)));
+ reg++;
+ } while (reg != (r3 + 1));
+}
+
+HChar *
+s390_irgen_LAM(UChar r1, UChar r3, IRTemp op2addr)
+{
+ s390_irgen_load_ar_multiple(r1, r3, op2addr);
+
+ return "lam";
+}
+
+HChar *
+s390_irgen_LAMY(UChar r1, UChar r3, IRTemp op2addr)
+{
+ s390_irgen_load_ar_multiple(r1, r3, op2addr);
+
+ return "lamy";
+}
+
+static void
+s390_irgen_store_ar_multiple(UChar r1, UChar r3, IRTemp op2addr)
+{
+ UChar reg;
+ IRTemp addr = newTemp(Ity_I64);
+
+ assign(addr, mkexpr(op2addr));
+ reg = r1;
+ do {
+ IRTemp old = addr;
+
+ reg %= 16;
+ store(mkexpr(addr), get_ar_w0(reg));
+ addr = newTemp(Ity_I64);
+ assign(addr, binop(Iop_Add64, mkexpr(old), mkU64(4)));
+ reg++;
+ } while (reg != (r3 + 1));
+}
+
+HChar *
+s390_irgen_STAM(UChar r1, UChar r3, IRTemp op2addr)
+{
+ s390_irgen_store_ar_multiple(r1, r3, op2addr);
+
+ return "stam";
+}
+
+HChar *
+s390_irgen_STAMY(UChar r1, UChar r3, IRTemp op2addr)
+{
+ s390_irgen_store_ar_multiple(r1, r3, op2addr);
+
+ return "stamy";
+}
+
+
+/* Implementation for 32-bit compare-and-swap */
+static void
+s390_irgen_cas_32(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRCAS *cas;
+ IRTemp op1 = newTemp(Ity_I32);
+ IRTemp old_mem = newTemp(Ity_I32);
+ IRTemp op3 = newTemp(Ity_I32);
+ IRTemp result = newTemp(Ity_I32);
+ IRTemp nequal = newTemp(Ity_I1);
+
+ assign(op1, get_gpr_w1(r1));
+ assign(op3, get_gpr_w1(r3));
+
+ /* The first and second operands are compared. If they are equal,
+ the third operand is stored at the second- operand location. */
+ cas = mkIRCAS(IRTemp_INVALID, old_mem,
+ Iend_BE, mkexpr(op2addr),
+ NULL, mkexpr(op1), /* expected value */
+ NULL, mkexpr(op3) /* new value */);
+ stmt(IRStmt_CAS(cas));
+
+ /* Set CC. Operands compared equal -> 0, else 1. */
+ assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(old_mem)));
+ s390_cc_thunk_put1(S390_CC_OP_BITWISE, result, False);
+
+ /* If operands were equal (cc == 0) just store the old value op1 in r1.
+ Otherwise, store the old_value from memory in r1 and yield. */
+ assign(nequal, binop(Iop_CmpNE32, s390_call_calculate_cc(), mkU32(0)));
+ put_gpr_w1(r1, mkite(mkexpr(nequal), mkexpr(old_mem), mkexpr(op1)));
+ stmt(IRStmt_Exit(mkexpr(nequal), Ijk_Yield, IRConst_U64(guest_IA_next_instr)));
+}
+
+HChar *
+s390_irgen_CS(UChar r1, UChar r3, IRTemp op2addr)
+{
+ s390_irgen_cas_32(r1, r3, op2addr);
+
+ return "cs";
+}
+
+HChar *
+s390_irgen_CSY(UChar r1, UChar r3, IRTemp op2addr)
+{
+ s390_irgen_cas_32(r1, r3, op2addr);
+
+ return "csy";
+}
+
+HChar *
+s390_irgen_CSG(UChar r1, UChar r3, IRTemp op2addr)
+{
+ IRCAS *cas;
+ IRTemp op1 = newTemp(Ity_I64);
+ IRTemp old_mem = newTemp(Ity_I64);
+ IRTemp op3 = newTemp(Ity_I64);
+ IRTemp result = newTemp(Ity_I64);
+ IRTemp nequal = newTemp(Ity_I1);
+
+ assign(op1, get_gpr_dw0(r1));
+ assign(op3, get_gpr_dw0(r3));
+
+ /* The first and second operands are compared. If they are equal,
+ the third operand is stored at the second- operand location. */
+ cas = mkIRCAS(IRTemp_INVALID, old_mem,
+ Iend_BE, mkexpr(op2addr),
+ NULL, mkexpr(op1), /* expected value */
+ NULL, mkexpr(op3) /* new value */);
+ stmt(IRStmt_CAS(cas));
+
+ /* Set CC. Operands compared equal -> 0, else 1. */
+ assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(old_mem)));
+ s390_cc_thunk_put1(S390_CC_OP_BITWISE, result, False);
+
+ /* If operands were equal (cc == 0) just store the old value op1 in r1.
+ Otherwise, store the old_value from memory in r1 and yield. */
+ assign(nequal, binop(Iop_CmpNE32, s390_call_calculate_cc(), mkU32(0)));
+ put_gpr_dw0(r1, mkite(mkexpr(nequal), mkexpr(old_mem), mkexpr(op1)));
+ stmt(IRStmt_Exit(mkexpr(nequal), Ijk_Yield, IRConst_U64(guest_IA_next_instr)));
+
+ return "csg";
+}
+
+
+/* Binary floating point */
+
+HChar *
+s390_irgen_AXBR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_F128);
+ IRTemp op2 = newTemp(Ity_F128);
+ IRTemp result = newTemp(Ity_F128);
+
+ assign(op1, get_fpr_pair(r1));
+ assign(op2, get_fpr_pair(r2));
+ assign(result, triop(Iop_AddF128, mkU32(Irrm_CURRENT), mkexpr(op1),
+ mkexpr(op2)));
+ put_fpr_pair(r1, mkexpr(result));
+
+ s390_cc_thunk_put1f128(S390_CC_OP_BFP_RESULT_128, result);
+
+ return "axbr";
+}
+
+/* The result of a Iop_CmdFxx operation is a condition code. It is
+ encoded using the values defined in type IRCmpFxxResult.
+ Before we can store the condition code into the guest state (or do
+ anything else with it for that matter) we need to convert it to
+ the encoding that s390 uses. This is what this function does.
+
+ s390 VEX b6 b2 b0 cc.1 cc.0
+ 0 0x40 EQ 1 0 0 0 0
+ 1 0x01 LT 0 0 1 0 1
+ 2 0x00 GT 0 0 0 1 0
+ 3 0x45 Unordered 1 1 1 1 1
+
+ The following bits from the VEX encoding are interesting:
+ b0, b2, b6 with b0 being the LSB. We observe:
+
+ cc.0 = b0;
+ cc.1 = b2 | (~b0 & ~b6)
+
+ with cc being the s390 condition code.
+*/
+static IRExpr *
+convert_vex_fpcc_to_s390(IRTemp vex_cc)
+{
+ IRTemp cc0 = newTemp(Ity_I32);
+ IRTemp cc1 = newTemp(Ity_I32);
+ IRTemp b0 = newTemp(Ity_I32);
+ IRTemp b2 = newTemp(Ity_I32);
+ IRTemp b6 = newTemp(Ity_I32);
+
+ assign(b0, binop(Iop_And32, mkexpr(vex_cc), mkU32(1)));
+ assign(b2, binop(Iop_And32, binop(Iop_Shr32, mkexpr(vex_cc), mkU8(2)),
+ mkU32(1)));
+ assign(b6, binop(Iop_And32, binop(Iop_Shr32, mkexpr(vex_cc), mkU8(6)),
+ mkU32(1)));
+
+ assign(cc0, mkexpr(b0));
+ assign(cc1, binop(Iop_Or32, mkexpr(b2),
+ binop(Iop_And32,
+ binop(Iop_Sub32, mkU32(1), mkexpr(b0)), /* ~b0 */
+ binop(Iop_Sub32, mkU32(1), mkexpr(b6)) /* ~b6 */
+ )));
+
+ return binop(Iop_Or32, mkexpr(cc0), binop(Iop_Shl32, mkexpr(cc1), mkU8(1)));
+}
+
+HChar *
+s390_irgen_CEBR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_F32);
+ IRTemp op2 = newTemp(Ity_F32);
+ IRTemp cc_vex = newTemp(Ity_I32);
+ IRTemp cc_s390 = newTemp(Ity_I32);
+
+ assign(op1, get_fpr_w0(r1));
+ assign(op2, get_fpr_w0(r2));
+ assign(cc_vex, binop(Iop_CmpF32, mkexpr(op1), mkexpr(op2)));
+
+ assign(cc_s390, convert_vex_fpcc_to_s390(cc_vex));
+ s390_cc_thunk_put1(S390_CC_OP_SET, cc_s390, False);
+
+ return "cebr";
+}
+
+HChar *
+s390_irgen_CDBR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_F64);
+ IRTemp op2 = newTemp(Ity_F64);
+ IRTemp cc_vex = newTemp(Ity_I32);
+ IRTemp cc_s390 = newTemp(Ity_I32);
+
+ assign(op1, get_fpr_dw0(r1));
+ assign(op2, get_fpr_dw0(r2));
+ assign(cc_vex, binop(Iop_CmpF64, mkexpr(op1), mkexpr(op2)));
+
+ assign(cc_s390, convert_vex_fpcc_to_s390(cc_vex));
+ s390_cc_thunk_put1(S390_CC_OP_SET, cc_s390, False);
+
+ return "cdbr";
+}
+
+HChar *
+s390_irgen_CXBR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_F128);
+ IRTemp op2 = newTemp(Ity_F128);
+ IRTemp cc_vex = newTemp(Ity_I32);
+ IRTemp cc_s390 = newTemp(Ity_I32);
+
+ assign(op1, get_fpr_pair(r1));
+ assign(op2, get_fpr_pair(r2));
+ assign(cc_vex, binop(Iop_CmpF128, mkexpr(op1), mkexpr(op2)));
+
+ assign(cc_s390, convert_vex_fpcc_to_s390(cc_vex));
+ s390_cc_thunk_put1(S390_CC_OP_SET, cc_s390, False);
+
+ return "cxbr";
+}
+
+HChar *
+s390_irgen_CEB(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_F32);
+ IRTemp op2 = newTemp(Ity_F32);
+ IRTemp cc_vex = newTemp(Ity_I32);
+ IRTemp cc_s390 = newTemp(Ity_I32);
+
+ assign(op1, get_fpr_w0(r1));
+ assign(op2, load(Ity_F32, mkexpr(op2addr)));
+ assign(cc_vex, binop(Iop_CmpF32, mkexpr(op1), mkexpr(op2)));
+
+ assign(cc_s390, convert_vex_fpcc_to_s390(cc_vex));
+ s390_cc_thunk_put1(S390_CC_OP_SET, cc_s390, False);
+
+ return "ceb";
+}
+
+HChar *
+s390_irgen_CDB(UChar r1, IRTemp op2addr)
+{
+ IRTemp op1 = newTemp(Ity_F64);
+ IRTemp op2 = newTemp(Ity_F64);
+ IRTemp cc_vex = newTemp(Ity_I32);
+ IRTemp cc_s390 = newTemp(Ity_I32);
+
+ assign(op1, get_fpr_dw0(r1));
+ assign(op2, load(Ity_F64, mkexpr(op2addr)));
+ assign(cc_vex, binop(Iop_CmpF64, mkexpr(op1), mkexpr(op2)));
+
+ assign(cc_s390, convert_vex_fpcc_to_s390(cc_vex));
+ s390_cc_thunk_put1(S390_CC_OP_SET, cc_s390, False);
+
+ return "cdb";
+}
+
+HChar *
+s390_irgen_CXFBR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ put_fpr_pair(r1, unop(Iop_I32StoF128, mkexpr(op2)));
+
+ return "cxfbr";
+}
+
+HChar *
+s390_irgen_CXGBR(UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ put_fpr_pair(r1, unop(Iop_I64StoF128, mkexpr(op2)));
+
+ return "cxgbr";
+}
+
+HChar *
+s390_irgen_CFXBR(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op = newTemp(Ity_F128);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op, get_fpr_pair(r2));
+ assign(result, binop(Iop_F128toI32S, mkU32(encode_rounding_mode(r3)),
+ mkexpr(op)));
+ put_gpr_w1(r1, mkexpr(result));
+ s390_cc_thunk_put1f128(S390_CC_OP_BFP_128_TO_INT_32, op);
+
+ return "cfxbr";
+}
+
+HChar *
+s390_irgen_CGXBR(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp op = newTemp(Ity_F128);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op, get_fpr_pair(r2));
+ assign(result, binop(Iop_F128toI64S, mkU32(encode_rounding_mode(r3)),
+ mkexpr(op)));
+ put_gpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_put1f128(S390_CC_OP_BFP_128_TO_INT_64, op);
+
+ return "cgxbr";
+}
+
+HChar *
+s390_irgen_DXBR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_F128);
+ IRTemp op2 = newTemp(Ity_F128);
+ IRTemp result = newTemp(Ity_F128);
+
+ assign(op1, get_fpr_pair(r1));
+ assign(op2, get_fpr_pair(r2));
+ assign(result, triop(Iop_DivF128, mkU32(Irrm_CURRENT), mkexpr(op1),
+ mkexpr(op2)));
+ put_fpr_pair(r1, mkexpr(result));
+
+ return "dxbr";
+}
+
+HChar *
+s390_irgen_LTXBR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F128);
+
+ assign(result, get_fpr_pair(r2));
+ put_fpr_pair(r1, mkexpr(result));
+ s390_cc_thunk_put1f128(S390_CC_OP_BFP_RESULT_128, result);
+
+ return "ltxbr";
+}
+
+HChar *
+s390_irgen_LCXBR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F128);
+
+ assign(result, unop(Iop_NegF128, get_fpr_pair(r2)));
+ put_fpr_pair(r1, mkexpr(result));
+ s390_cc_thunk_put1f128(S390_CC_OP_BFP_RESULT_128, result);
+
+ return "lcxbr";
+}
+
+HChar *
+s390_irgen_LXDBR(UChar r1, UChar r2)
+{
+ IRTemp op = newTemp(Ity_F64);
+
+ assign(op, get_fpr_dw0(r2));
+ put_fpr_pair(r1, unop(Iop_F64toF128, mkexpr(op)));
+
+ return "lxdbr";
+}
+
+HChar *
+s390_irgen_LXEBR(UChar r1, UChar r2)
+{
+ IRTemp op = newTemp(Ity_F32);
+
+ assign(op, get_fpr_w0(r2));
+ put_fpr_pair(r1, unop(Iop_F32toF128, mkexpr(op)));
+
+ return "lxebr";
+}
+
+HChar *
+s390_irgen_LXDB(UChar r1, IRTemp op2addr)
+{
+ IRTemp op = newTemp(Ity_F64);
+
+ assign(op, load(Ity_F64, mkexpr(op2addr)));
+ put_fpr_pair(r1, unop(Iop_F64toF128, mkexpr(op)));
+
+ return "lxdb";
+}
+
+HChar *
+s390_irgen_LXEB(UChar r1, IRTemp op2addr)
+{
+ IRTemp op = newTemp(Ity_F32);
+
+ assign(op, load(Ity_F32, mkexpr(op2addr)));
+ put_fpr_pair(r1, unop(Iop_F32toF128, mkexpr(op)));
+
+ return "lxeb";
+}
+
+HChar *
+s390_irgen_LNEBR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F32);
+
+ assign(result, unop(Iop_NegF32, unop(Iop_AbsF32, get_fpr_w0(r2))));
+ put_fpr_w0(r1, mkexpr(result));
+ s390_cc_thunk_put1f(S390_CC_OP_BFP_RESULT_32, result);
+
+ return "lnebr";
+}
+
+HChar *
+s390_irgen_LNDBR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F64);
+
+ assign(result, unop(Iop_NegF64, unop(Iop_AbsF64, get_fpr_dw0(r2))));
+ put_fpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_put1f(S390_CC_OP_BFP_RESULT_64, result);
+
+ return "lndbr";
+}
+
+HChar *
+s390_irgen_LNXBR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F128);
+
+ assign(result, unop(Iop_NegF128, unop(Iop_AbsF128, get_fpr_pair(r2))));
+ put_fpr_pair(r1, mkexpr(result));
+ s390_cc_thunk_put1f128(S390_CC_OP_BFP_RESULT_128, result);
+
+ return "lnxbr";
+}
+
+HChar *
+s390_irgen_LPEBR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F32);
+
+ assign(result, unop(Iop_AbsF32, get_fpr_w0(r2)));
+ put_fpr_w0(r1, mkexpr(result));
+ s390_cc_thunk_put1f(S390_CC_OP_BFP_RESULT_32, result);
+
+ return "lpebr";
+}
+
+HChar *
+s390_irgen_LPDBR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F64);
+
+ assign(result, unop(Iop_AbsF64, get_fpr_dw0(r2)));
+ put_fpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_put1f(S390_CC_OP_BFP_RESULT_64, result);
+
+ return "lpdbr";
+}
+
+HChar *
+s390_irgen_LPXBR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F128);
+
+ assign(result, unop(Iop_AbsF128, get_fpr_pair(r2)));
+ put_fpr_pair(r1, mkexpr(result));
+ s390_cc_thunk_put1f128(S390_CC_OP_BFP_RESULT_128, result);
+
+ return "lpxbr";
+}
+
+HChar *
+s390_irgen_LDXBR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F64);
+
+ assign(result, binop(Iop_F128toF64, mkU32(Irrm_CURRENT), get_fpr_pair(r2)));
+ put_fpr_dw0(r1, mkexpr(result));
+
+ return "ldxbr";
+}
+
+HChar *
+s390_irgen_LEXBR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F32);
+
+ assign(result, binop(Iop_F128toF32, mkU32(Irrm_CURRENT), get_fpr_pair(r2)));
+ put_fpr_w0(r1, mkexpr(result));
+
+ return "lexbr";
+}
+
+HChar *
+s390_irgen_MXBR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_F128);
+ IRTemp op2 = newTemp(Ity_F128);
+ IRTemp result = newTemp(Ity_F128);
+
+ assign(op1, get_fpr_pair(r1));
+ assign(op2, get_fpr_pair(r2));
+ assign(result, triop(Iop_MulF128, mkU32(Irrm_CURRENT), mkexpr(op1),
+ mkexpr(op2)));
+ put_fpr_pair(r1, mkexpr(result));
+
+ return "mxbr";
+}
+
+HChar *
+s390_irgen_MAEBR(UChar r1, UChar r3, UChar r2)
+{
+ put_fpr_w0(r1, qop(Iop_MAddF32, mkU32(Irrm_CURRENT),
+ get_fpr_w0(r1), get_fpr_w0(r2), get_fpr_w0(r3)));
+
+ return "maebr";
+}
+
+HChar *
+s390_irgen_MADBR(UChar r1, UChar r3, UChar r2)
+{
+ put_fpr_dw0(r1, qop(Iop_MAddF64, mkU32(Irrm_CURRENT),
+ get_fpr_dw0(r1), get_fpr_dw0(r2), get_fpr_dw0(r3)));
+
+ return "madbr";
+}
+
+HChar *
+s390_irgen_MAEB(UChar r3, IRTemp op2addr, UChar r1)
+{
+ IRExpr *op2 = load(Ity_F32, mkexpr(op2addr));
+
+ put_fpr_w0(r1, qop(Iop_MAddF32, mkU32(Irrm_CURRENT),
+ get_fpr_w0(r1), op2, get_fpr_w0(r3)));
+
+ return "maeb";
+}
+
+HChar *
+s390_irgen_MADB(UChar r3, IRTemp op2addr, UChar r1)
+{
+ IRExpr *op2 = load(Ity_F64, mkexpr(op2addr));
+
+ put_fpr_dw0(r1, qop(Iop_MAddF64, mkU32(Irrm_CURRENT),
+ get_fpr_dw0(r1), op2, get_fpr_dw0(r3)));
+
+ return "madb";
+}
+
+HChar *
+s390_irgen_MSEBR(UChar r1, UChar r3, UChar r2)
+{
+ put_fpr_w0(r1, qop(Iop_MSubF32, mkU32(Irrm_CURRENT),
+ get_fpr_w0(r1), get_fpr_w0(r2), get_fpr_w0(r3)));
+
+ return "msebr";
+}
+
+HChar *
+s390_irgen_MSDBR(UChar r1, UChar r3, UChar r2)
+{
+ put_fpr_dw0(r1, qop(Iop_MSubF64, mkU32(Irrm_CURRENT),
+ get_fpr_dw0(r1), get_fpr_dw0(r2), get_fpr_dw0(r3)));
+
+ return "msdbr";
+}
+
+HChar *
+s390_irgen_MSEB(UChar r3, IRTemp op2addr, UChar r1)
+{
+ IRExpr *op2 = load(Ity_F32, mkexpr(op2addr));
+
+ put_fpr_w0(r1, qop(Iop_MSubF32, mkU32(Irrm_CURRENT),
+ get_fpr_w0(r1), op2, get_fpr_w0(r3)));
+
+ return "mseb";
+}
+
+HChar *
+s390_irgen_MSDB(UChar r3, IRTemp op2addr, UChar r1)
+{
+ IRExpr *op2 = load(Ity_F64, mkexpr(op2addr));
+
+ put_fpr_dw0(r1, qop(Iop_MSubF64, mkU32(Irrm_CURRENT),
+ get_fpr_dw0(r1), op2, get_fpr_dw0(r3)));
+
+ return "msdb";
+}
+
+HChar *
+s390_irgen_SQEBR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F32);
+
+ assign(result, binop(Iop_SqrtF32, mkU32(Irrm_CURRENT), get_fpr_w0(r2)));
+ put_fpr_w0(r1, mkexpr(result));
+
+ return "sqebr";
+}
+
+HChar *
+s390_irgen_SQDBR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F64);
+
+ assign(result, binop(Iop_SqrtF64, mkU32(Irrm_CURRENT), get_fpr_dw0(r2)));
+ put_fpr_dw0(r1, mkexpr(result));
+
+ return "sqdbr";
+}
+
+HChar *
+s390_irgen_SQXBR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F128);
+
+ assign(result, binop(Iop_SqrtF128, mkU32(Irrm_CURRENT), get_fpr_pair(r2)));
+ put_fpr_pair(r1, mkexpr(result));
+
+ return "sqxbr";
+}
+
+HChar *
+s390_irgen_SQEB(UChar r1, IRTemp op2addr)
+{
+ IRTemp op = newTemp(Ity_F32);
+
+ assign(op, load(Ity_F32, mkexpr(op2addr)));
+ put_fpr_w0(r1, binop(Iop_SqrtF32, mkU32(Irrm_CURRENT), mkexpr(op)));
+
+ return "sqeb";
+}
+
+HChar *
+s390_irgen_SQDB(UChar r1, IRTemp op2addr)
+{
+ IRTemp op = newTemp(Ity_F64);
+
+ assign(op, load(Ity_F64, mkexpr(op2addr)));
+ put_fpr_dw0(r1, binop(Iop_SqrtF64, mkU32(Irrm_CURRENT), mkexpr(op)));
+
+ return "sqdb";
+}
+
+HChar *
+s390_irgen_SXBR(UChar r1, UChar r2)
+{
+ IRTemp op1 = newTemp(Ity_F128);
+ IRTemp op2 = newTemp(Ity_F128);
+ IRTemp result = newTemp(Ity_F128);
+
+ assign(op1, get_fpr_pair(r1));
+ assign(op2, get_fpr_pair(r2));
+ assign(result, triop(Iop_SubF128, mkU32(Irrm_CURRENT), mkexpr(op1),
+ mkexpr(op2)));
+ put_fpr_pair(r1, mkexpr(result));
+ s390_cc_thunk_put1f128(S390_CC_OP_BFP_RESULT_128, result);
+
+ return "sxbr";
+}
+
+HChar *
+s390_irgen_TCEB(UChar r1, IRTemp op2addr)
+{
+ IRTemp value = newTemp(Ity_F32);
+
+ assign(value, get_fpr_w0(r1));
+
+ s390_cc_thunk_putFZ(S390_CC_OP_BFP_TDC_32, value, op2addr);
+
+ return "tceb";
+}
+
+HChar *
+s390_irgen_TCDB(UChar r1, IRTemp op2addr)
+{
+ IRTemp value = newTemp(Ity_F64);
+
+ assign(value, get_fpr_dw0(r1));
+
+ s390_cc_thunk_putFZ(S390_CC_OP_BFP_TDC_64, value, op2addr);
+
+ return "tcdb";
+}
+
+HChar *
+s390_irgen_TCXB(UChar r1, IRTemp op2addr)
+{
+ IRTemp value = newTemp(Ity_F128);
+
+ assign(value, get_fpr_pair(r1));
+
+ s390_cc_thunk_put1f128Z(S390_CC_OP_BFP_TDC_128, value, op2addr);
+
+ return "tcxb";
+}
+
+HChar *
+s390_irgen_LCDFR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F64);
+
+ assign(result, unop(Iop_NegF64, get_fpr_dw0(r2)));
+ put_fpr_dw0(r1, mkexpr(result));
+
+ return "lcdfr";
+}
+
+HChar *
+s390_irgen_LNDFR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F64);
+
+ assign(result, unop(Iop_NegF64, unop(Iop_AbsF64, get_fpr_dw0(r2))));
+ put_fpr_dw0(r1, mkexpr(result));
+
+ return "lndfr";
+}
+
+HChar *
+s390_irgen_LPDFR(UChar r1, UChar r2)
+{
+ IRTemp result = newTemp(Ity_F64);
+
+ assign(result, unop(Iop_AbsF64, get_fpr_dw0(r2)));
+ put_fpr_dw0(r1, mkexpr(result));
+
+ return "lpdfr";
+}
+
+HChar *
+s390_irgen_LDGR(UChar r1, UChar r2)
+{
+ put_fpr_dw0(r1, unop(Iop_ReinterpI64asF64, get_gpr_dw0(r2)));
+
+ return "ldgr";
+}
+
+HChar *
+s390_irgen_LGDR(UChar r1, UChar r2)
+{
+ put_gpr_dw0(r1, unop(Iop_ReinterpF64asI64, get_fpr_dw0(r2)));
+
+ return "lgdr";
+}
+
+
+HChar *
+s390_irgen_CPSDR(UChar r3, UChar r1, UChar r2)
+{
+ IRTemp sign = newTemp(Ity_I64);
+ IRTemp value = newTemp(Ity_I64);
+
+ assign(sign, binop(Iop_And64, unop(Iop_ReinterpF64asI64, get_fpr_dw0(r3)),
+ mkU64(1ULL << 63)));
+ assign(value, binop(Iop_And64, unop(Iop_ReinterpF64asI64, get_fpr_dw0(r2)),
+ mkU64((1ULL << 63) - 1)));
+ put_fpr_dw0(r1, unop(Iop_ReinterpI64asF64, binop(Iop_Or64, mkexpr(value),
+ mkexpr(sign))));
+
+ return "cpsdr";
+}
+
+
+static UInt
+s390_do_cvb(ULong decimal)
+{
+#if defined(VGA_s390x)
+ UInt binary;
+
+ __asm__ volatile (
+ "cvb %[result],%[input]\n\t"
+ : [result] "=d"(binary)
+ : [input] "m"(decimal)
+ );
+
+ return binary;
+#else
+ return 0;
+#endif
+}
+
+static IRExpr *
+s390_call_cvb(IRExpr *in)
+{
+ IRExpr **args, *call;
+
+ args = mkIRExprVec_1(in);
+ call = mkIRExprCCall(Ity_I32, 0 /*regparm*/,
+ "s390_do_cvb", &s390_do_cvb, args);
+
+ /* Nothing is excluded from definedness checking. */
+ call->Iex.CCall.cee->mcx_mask = 0;
+
+ return call;
+}
+
+HChar *
+s390_irgen_CVB(UChar r1, IRTemp op2addr)
+{
+ put_gpr_w1(r1, s390_call_cvb(load(Ity_I64, mkexpr(op2addr))));
+
+ return "cvb";
+}
+
+HChar *
+s390_irgen_CVBY(UChar r1, IRTemp op2addr)
+{
+ put_gpr_w1(r1, s390_call_cvb(load(Ity_I64, mkexpr(op2addr))));
+
+ return "cvby";
+}
+
+
+static ULong
+s390_do_cvd(ULong binary_in)
+{
+#if defined(VGA_s390x)
+ UInt binary = binary_in & 0xffffffffULL;
+ ULong decimal;
+
+ __asm__ volatile (
+ "cvd %[input],%[result]\n\t"
+ : [result] "=m"(decimal)
+ : [input] "d"(binary)
+ );
+
+ return decimal;
+#else
+ return 0;
+#endif
+}
+
+static IRExpr *
+s390_call_cvd(IRExpr *in)
+{
+ IRExpr **args, *call;
+
+ args = mkIRExprVec_1(in);
+ call = mkIRExprCCall(Ity_I64, 0 /*regparm*/,
+ "s390_do_cvd", &s390_do_cvd, args);
+
+ /* Nothing is excluded from definedness checking. */
+ call->Iex.CCall.cee->mcx_mask = 0;
+
+ return call;
+}
+
+HChar *
+s390_irgen_CVD(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), s390_call_cvd(get_gpr_w1(r1)));
+
+ return "cvd";
+}
+
+HChar *
+s390_irgen_CVDY(UChar r1, IRTemp op2addr)
+{
+ store(mkexpr(op2addr), s390_call_cvd(get_gpr_w1(r1)));
+
+ return "cvdy";
+}
+
+
+HChar *
+s390_irgen_FLOGR(UChar r1, UChar r2)
+{
+ IRTemp input = newTemp(Ity_I64);
+ IRTemp not_zero = newTemp(Ity_I64);
+ IRTemp tmpnum = newTemp(Ity_I64);
+ IRTemp num = newTemp(Ity_I64);
+ IRTemp shift_amount = newTemp(Ity_I8);
+
+ /* We use the "count leading zeroes" operator because the number of
+ leading zeroes is identical with the bit position of the first '1' bit.
+ However, that operator does not work when the input value is zero.
+ Therefore, we set the LSB of the input value to 1 and use Clz64 on
+ the modified value. If input == 0, then the result is 64. Otherwise,
+ the result of Clz64 is what we want. */
+
+ assign(input, get_gpr_dw0(r2));
+ assign(not_zero, binop(Iop_Or64, mkexpr(input), mkU64(1)));
+ assign(tmpnum, unop(Iop_Clz64, mkexpr(not_zero)));
+
+ /* num = (input == 0) ? 64 : tmpnum */
+ assign(num, mkite(binop(Iop_CmpEQ64, mkexpr(input), mkU64(0)),
+ /* == 0 */ mkU64(64),
+ /* != 0 */ mkexpr(tmpnum)));
+
+ put_gpr_dw0(r1, mkexpr(num));
+
+ /* Set the leftmost '1' bit of the input value to zero. Shift it out to the
+ left followed by a logical shift right. Note, that the Iop_Shl64 and
+ Iop_Shr64 semantics will preserve the value-to-be-shifted if the
+ shift-amount equals or is larger than the width of value-to-be-shifted
+ (see ir_opt.c). So instead of shifting to the left once we shift twice
+ to avoid this special case. */
+
+ assign(shift_amount, unop(Iop_64to8, binop(Iop_Add64, mkexpr(num),
+ mkU64(1))));
+
+ put_gpr_dw0(r1 + 1, binop(Iop_Shr64,
+ binop(Iop_Shl64,
+ binop(Iop_Shl64, mkexpr(input),
+ unop(Iop_64to8, mkexpr(num))),
+ mkU8(1)),
+ mkexpr(shift_amount)));
+
+ /* Compare the original value as an unsigned integer with 0. */
+ s390_cc_thunk_put2(S390_CC_OP_UNSIGNED_COMPARE, input,
+ mktemp(Ity_I64, mkU64(0)), False);
+
+ return "flogr";
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Build IR for special instructions ---*/
+/*------------------------------------------------------------*/
+
+void
+s390_irgen_client_request(void)
+{
+ if (0)
+ vex_printf("%%R3 = client_request ( %%R2 )\n");
+
+ irsb->next = mkU64((ULong)(guest_IA_curr_instr
+ + S390_SPECIAL_OP_PREAMBLE_SIZE
+ + S390_SPECIAL_OP_SIZE));
+ irsb->jumpkind = Ijk_ClientReq;
+
+ s390_dis_res->whatNext = Dis_StopHere;
+}
+
+void
+s390_irgen_guest_NRADDR(void)
+{
+ if (0)
+ vex_printf("%%R3 = guest_NRADDR\n");
+
+ put_gpr_dw0(3, IRExpr_Get(S390_GUEST_OFFSET(guest_NRADDR), Ity_I64));
+}
+
+void
+s390_irgen_noredir(void)
+{
+ /* Continue after special op */
+ put_gpr_dw0(14, mkU64(guest_IA_curr_instr
+ + S390_SPECIAL_OP_PREAMBLE_SIZE
+ + S390_SPECIAL_OP_SIZE));
+
+ /* The address is in REG1, all parameters are in the right (guest) places */
+ irsb->next = get_gpr_dw0(1);
+ irsb->jumpkind = Ijk_NoRedir;
+
+ s390_dis_res->whatNext = Dis_StopHere;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end guest_s390_irgen.c ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/guest_s390_priv.h
+++ valgrind/VEX/priv/guest_s390_priv.h
@@ -0,0 +1,661 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin guest_s390_priv.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __VEX_GUEST_S390_PRIV_H
+#define __VEX_GUEST_S390_PRIV_H
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h" // IRSB (needed by bb_to_IR.h)
+#include "libvex.h" // VexArch (needed by bb_to_IR.h)
+#include "guest_generic_bb_to_IR.h" // DisResult
+
+UInt s390_decode_and_irgen(UChar *, UInt, DisResult *);
+
+#define S390_GUEST_OFFSET(x) offsetof(VexGuestS390XState, x)
+
+/*------------------------------------------------------------*/
+/*--- Dirty Helper functions. ---*/
+/*------------------------------------------------------------*/
+void s390x_dirtyhelper_EX(ULong torun);
+
+/*------------------------------------------------------------*/
+/*--- IR generators for special opcodes. ---*/
+/*------------------------------------------------------------*/
+void s390_irgen_client_request(void);
+void s390_irgen_guest_NRADDR(void);
+void s390_irgen_noredir(void);
+void s390_irgen_internal_return(void);
+
+/* Size of special instruction preamble */
+#define S390_SPECIAL_OP_PREAMBLE_SIZE 8
+
+/* Size of special instructions */
+#define S390_SPECIAL_OP_SIZE 2
+
+
+/*------------------------------------------------------------*/
+/*--- IR generators for regular opcodes. ---*/
+/*------------------------------------------------------------*/
+
+void s390_format_I(HChar *(*irgen)(UChar), UChar);
+void s390_format_RI(HChar *(*irgen)(UChar, UShort), UChar, UShort);
+void s390_format_RI_RU(HChar *(*irgen)(UChar, UShort), UChar, UShort);
+void s390_format_RI_RI(HChar *(*irgen)(UChar, UShort), UChar, UShort);
+void s390_format_RI_RP(HChar *(*irgen)(UChar, UShort), UChar, UShort);
+void s390_format_RIE_RRP(HChar *(*irgen)(UChar, UChar, UShort), UChar, UChar, UShort);
+void s390_format_RIE_RRI0(HChar *(*irgen)(UChar, UChar, UShort), UChar, UChar, UShort);
+void s390_format_RIE_RRUUU(HChar *(*irgen)(UChar, UChar, UChar, UChar, UChar), UChar, UChar, UChar, UChar, UChar);
+void s390_format_RIE_RRPU(HChar *(*irgen)(UChar, UChar, UShort, UChar), UChar, UChar, UShort, UChar);
+void s390_format_RIE_RUPU(HChar *(*irgen)(UChar, UChar, UShort, UChar), UChar, UChar, UShort, UChar);
+void s390_format_RIE_RUPI(HChar *(*irgen)(UChar, UChar, UShort, UChar), UChar, UChar, UShort, UChar);
+void s390_format_RIL(HChar *(*irgen)(UChar, UInt), UChar, UInt);
+void s390_format_RIL_RU(HChar *(*irgen)(UChar, UInt), UChar, UInt);
+void s390_format_RIL_RI(HChar *(*irgen)(UChar, UInt), UChar, UInt);
+void s390_format_RIL_RP(HChar *(*irgen)(UChar, UInt), UChar, UInt);
+void s390_format_RIL_UP(HChar *(*irgen)(void), UChar, UInt);
+void s390_format_RIS_RURDI(HChar *(*irgen)(UChar, UChar, UChar, IRTemp), UChar, UChar, UChar, UShort, UChar);
+void s390_format_RIS_RURDU(HChar *(*irgen)(UChar, UChar, UChar, IRTemp), UChar, UChar, UChar, UShort, UChar);
+void s390_format_RR(HChar *(*irgen)(UChar, UChar), UChar, UChar);
+void s390_format_RR_RR(HChar *(*irgen)(UChar, UChar), UChar, UChar);
+void s390_format_RR_FF(HChar *(*irgen)(UChar, UChar), UChar, UChar);
+void s390_format_RRE(HChar *(*irgen)(UChar, UChar), UChar, UChar);
+void s390_format_RRE_RR(HChar *(*irgen)(UChar, UChar), UChar, UChar);
+void s390_format_RRE_FF(HChar *(*irgen)(UChar, UChar), UChar, UChar);
+void s390_format_RRE_RF(HChar *(*irgen)(UChar, UChar), UChar, UChar);
+void s390_format_RRE_FR(HChar *(*irgen)(UChar, UChar), UChar, UChar);
+void s390_format_RRE_R0(HChar *(*irgen)(UChar), UChar);
+void s390_format_RRE_F0(HChar *(*irgen)(UChar), UChar);
+void s390_format_RRF_F0FF(HChar *(*irgen)(UChar, UChar, UChar), UChar, UChar, UChar);
+void s390_format_RRF_U0RF(HChar *(*irgen)(UChar, UChar, UChar), UChar, UChar, UChar);
+void s390_format_RRF_F0FF2(HChar *(*irgen)(UChar, UChar, UChar), UChar, UChar, UChar);
+void s390_format_RRF_R0RR2(HChar *(*irgen)(UChar, UChar, UChar), UChar, UChar, UChar);
+void s390_format_RRS(HChar *(*irgen)(UChar, UChar, UChar, IRTemp), UChar, UChar, UChar, UShort, UChar);
+void s390_format_RS_R0RD(HChar *(*irgen)(UChar, IRTemp), UChar, UChar, UShort);
+void s390_format_RS_RRRD(HChar *(*irgen)(UChar, UChar, IRTemp), UChar, UChar, UChar, UShort);
+void s390_format_RS_RURD(HChar *(*irgen)(UChar, UChar, IRTemp), UChar, UChar, UChar, UShort);
+void s390_format_RS_AARD(HChar *(*irgen)(UChar, UChar, IRTemp), UChar, UChar, UChar, UShort);
+void s390_format_RSI_RRP(HChar *(*irgen)(UChar, UChar, UShort), UChar, UChar, UShort);
+void s390_format_RSY_RRRD(HChar *(*irgen)(UChar, UChar, IRTemp), UChar, UChar, UChar, UShort, UChar);
+void s390_format_RSY_AARD(HChar *(*irgen)(UChar, UChar, IRTemp), UChar, UChar, UChar, UShort, UChar);
+void s390_format_RSY_RURD(HChar *(*irgen)(UChar, UChar, IRTemp), UChar, UChar, UChar, UShort, UChar);
+void s390_format_RX(HChar *(*irgen)(UChar, UChar, UChar, UShort, IRTemp), UChar, UChar, UChar, UShort);
+void s390_format_RX_RRRD(HChar *(*irgen)(UChar, IRTemp), UChar, UChar, UChar, UShort);
+void s390_format_RX_FRRD(HChar *(*irgen)(UChar, IRTemp), UChar, UChar, UChar, UShort);
+void s390_format_RXE_FRRD(HChar *(*irgen)(UChar, IRTemp), UChar, UChar, UChar, UShort);
+void s390_format_RXF_FRRDF(HChar *(*irgen)(UChar, IRTemp, UChar), UChar, UChar, UChar, UShort, UChar);
+void s390_format_RXY_RRRD(HChar *(*irgen)(UChar, IRTemp), UChar, UChar, UChar, UShort, UChar);
+void s390_format_RXY_FRRD(HChar *(*irgen)(UChar, IRTemp), UChar, UChar, UChar, UShort, UChar);
+void s390_format_RXY_URRD(HChar *(*irgen)(void), UChar, UChar, UChar, UShort, UChar);
+void s390_format_S_RD(HChar *(*irgen)(IRTemp), UChar, UShort);
+void s390_format_SI_URD(HChar *(*irgen)(UChar, IRTemp), UChar, UChar, UShort);
+void s390_format_SIY_URD(HChar *(*irgen)(UChar, IRTemp), UChar, UChar, UShort, UChar);
+void s390_format_SIY_IRD(HChar *(*irgen)(UChar, IRTemp), UChar, UChar, UShort, UChar);
+void s390_format_SS_L0RDRD(HChar *(*irgen)(UChar, IRTemp, IRTemp), UChar, UChar, UShort, UChar, UShort);
+void s390_format_SIL_RDI(HChar *(*irgen)(UShort, IRTemp), UChar, UShort, UShort);
+void s390_format_SIL_RDU(HChar *(*irgen)(UShort, IRTemp), UChar, UShort, UShort);
+
+HChar *s390_irgen_AR(UChar, UChar);
+HChar *s390_irgen_AGR(UChar, UChar);
+HChar *s390_irgen_AGFR(UChar, UChar);
+HChar *s390_irgen_ARK(UChar, UChar, UChar);
+HChar *s390_irgen_AGRK(UChar, UChar, UChar);
+HChar *s390_irgen_A(UChar, IRTemp);
+HChar *s390_irgen_AY(UChar, IRTemp);
+HChar *s390_irgen_AG(UChar, IRTemp);
+HChar *s390_irgen_AGF(UChar, IRTemp);
+HChar *s390_irgen_AFI(UChar, UInt);
+HChar *s390_irgen_AGFI(UChar, UInt);
+HChar *s390_irgen_AHIK(UChar, UChar, UShort);
+HChar *s390_irgen_AGHIK(UChar, UChar, UShort);
+HChar *s390_irgen_ASI(UChar, IRTemp);
+HChar *s390_irgen_AGSI(UChar, IRTemp);
+HChar *s390_irgen_AH(UChar, IRTemp);
+HChar *s390_irgen_AHY(UChar, IRTemp);
+HChar *s390_irgen_AHI(UChar, UShort);
+HChar *s390_irgen_AGHI(UChar, UShort);
+HChar *s390_irgen_AHHHR(UChar, UChar, UChar);
+HChar *s390_irgen_AHHLR(UChar, UChar, UChar);
+HChar *s390_irgen_AIH(UChar, UInt);
+HChar *s390_irgen_ALR(UChar, UChar);
+HChar *s390_irgen_ALGR(UChar, UChar);
+HChar *s390_irgen_ALGFR(UChar, UChar);
+HChar *s390_irgen_ALRK(UChar, UChar, UChar);
+HChar *s390_irgen_ALGRK(UChar, UChar, UChar);
+HChar *s390_irgen_AL(UChar, IRTemp);
+HChar *s390_irgen_ALY(UChar, IRTemp);
+HChar *s390_irgen_ALG(UChar, IRTemp);
+HChar *s390_irgen_ALGF(UChar, IRTemp);
+HChar *s390_irgen_ALFI(UChar, UInt);
+HChar *s390_irgen_ALGFI(UChar, UInt);
+HChar *s390_irgen_ALHHHR(UChar, UChar, UChar);
+HChar *s390_irgen_ALHHLR(UChar, UChar, UChar);
+HChar *s390_irgen_ALCR(UChar, UChar);
+HChar *s390_irgen_ALCGR(UChar, UChar);
+HChar *s390_irgen_ALC(UChar, IRTemp);
+HChar *s390_irgen_ALCG(UChar, IRTemp);
+HChar *s390_irgen_ALSI(UChar, IRTemp);
+HChar *s390_irgen_ALGSI(UChar, IRTemp);
+HChar *s390_irgen_ALHSIK(UChar, UChar, UShort);
+HChar *s390_irgen_ALGHSIK(UChar, UChar, UShort);
+HChar *s390_irgen_ALSIH(UChar, UInt);
+HChar *s390_irgen_ALSIHN(UChar, UInt);
+HChar *s390_irgen_NR(UChar, UChar);
+HChar *s390_irgen_NGR(UChar, UChar);
+HChar *s390_irgen_NRK(UChar, UChar, UChar);
+HChar *s390_irgen_NGRK(UChar, UChar, UChar);
+HChar *s390_irgen_N(UChar, IRTemp);
+HChar *s390_irgen_NY(UChar, IRTemp);
+HChar *s390_irgen_NG(UChar, IRTemp);
+HChar *s390_irgen_NI(UChar, IRTemp);
+HChar *s390_irgen_NIY(UChar, IRTemp);
+HChar *s390_irgen_NC(UChar, IRTemp, IRTemp);
+HChar *s390_irgen_NIHF(UChar, UInt);
+HChar *s390_irgen_NIHH(UChar, UShort);
+HChar *s390_irgen_NIHL(UChar, UShort);
+HChar *s390_irgen_NILF(UChar, UInt);
+HChar *s390_irgen_NILH(UChar, UShort);
+HChar *s390_irgen_NILL(UChar, UShort);
+HChar *s390_irgen_BASR(UChar, UChar);
+HChar *s390_irgen_BAS(UChar, IRTemp);
+HChar *s390_irgen_BCR(UChar, UChar);
+HChar *s390_irgen_BC(UChar, UChar, UChar, UShort, IRTemp);
+HChar *s390_irgen_BCTR(UChar, UChar);
+HChar *s390_irgen_BCTGR(UChar, UChar);
+HChar *s390_irgen_BCT(UChar, IRTemp);
+HChar *s390_irgen_BCTG(UChar, IRTemp);
+HChar *s390_irgen_BXH(UChar, UChar, IRTemp);
+HChar *s390_irgen_BXHG(UChar, UChar, IRTemp);
+HChar *s390_irgen_BXLE(UChar, UChar, IRTemp);
+HChar *s390_irgen_BXLEG(UChar, UChar, IRTemp);
+HChar *s390_irgen_BRAS(UChar, UShort);
+HChar *s390_irgen_BRASL(UChar, UInt);
+HChar *s390_irgen_BRC(UChar, UShort);
+HChar *s390_irgen_BRCL(UChar, UInt);
+HChar *s390_irgen_BRCT(UChar, UShort);
+HChar *s390_irgen_BRCTG(UChar, UShort);
+HChar *s390_irgen_BRXH(UChar, UChar, UShort);
+HChar *s390_irgen_BRXHG(UChar, UChar, UShort);
+HChar *s390_irgen_BRXLE(UChar, UChar, UShort);
+HChar *s390_irgen_BRXLG(UChar, UChar, UShort);
+HChar *s390_irgen_CR(UChar, UChar);
+HChar *s390_irgen_CGR(UChar, UChar);
+HChar *s390_irgen_CGFR(UChar, UChar);
+HChar *s390_irgen_C(UChar, IRTemp);
+HChar *s390_irgen_CY(UChar, IRTemp);
+HChar *s390_irgen_CG(UChar, IRTemp);
+HChar *s390_irgen_CGF(UChar, IRTemp);
+HChar *s390_irgen_CFI(UChar, UInt);
+HChar *s390_irgen_CGFI(UChar, UInt);
+HChar *s390_irgen_CRL(UChar, UInt);
+HChar *s390_irgen_CGRL(UChar, UInt);
+HChar *s390_irgen_CGFRL(UChar, UInt);
+HChar *s390_irgen_CRB(UChar, UChar, UChar, IRTemp);
+HChar *s390_irgen_CGRB(UChar, UChar, UChar, IRTemp);
+HChar *s390_irgen_CRJ(UChar, UChar, UShort, UChar);
+HChar *s390_irgen_CGRJ(UChar, UChar, UShort, UChar);
+HChar *s390_irgen_CIB(UChar, UChar, UChar, IRTemp);
+HChar *s390_irgen_CGIB(UChar, UChar, UChar, IRTemp);
+HChar *s390_irgen_CIJ(UChar, UChar, UShort, UChar);
+HChar *s390_irgen_CGIJ(UChar, UChar, UShort, UChar);
+HChar *s390_irgen_CS(UChar, UChar, IRTemp);
+HChar *s390_irgen_CSY(UChar, UChar, IRTemp);
+HChar *s390_irgen_CSG(UChar, UChar, IRTemp);
+HChar *s390_irgen_CH(UChar, IRTemp);
+HChar *s390_irgen_CHY(UChar, IRTemp);
+HChar *s390_irgen_CGH(UChar, IRTemp);
+HChar *s390_irgen_CHI(UChar, UShort);
+HChar *s390_irgen_CGHI(UChar, UShort);
+HChar *s390_irgen_CHHSI(UShort, IRTemp);
+HChar *s390_irgen_CHSI(UShort, IRTemp);
+HChar *s390_irgen_CGHSI(UShort, IRTemp);
+HChar *s390_irgen_CHRL(UChar, UInt);
+HChar *s390_irgen_CGHRL(UChar, UInt);
+HChar *s390_irgen_CHHR(UChar, UChar);
+HChar *s390_irgen_CHLR(UChar, UChar);
+HChar *s390_irgen_CHF(UChar, IRTemp);
+HChar *s390_irgen_CIH(UChar, UInt);
+HChar *s390_irgen_CLR(UChar, UChar);
+HChar *s390_irgen_CLGR(UChar, UChar);
+HChar *s390_irgen_CLGFR(UChar, UChar);
+HChar *s390_irgen_CL(UChar, IRTemp);
+HChar *s390_irgen_CLY(UChar, IRTemp);
+HChar *s390_irgen_CLG(UChar, IRTemp);
+HChar *s390_irgen_CLGF(UChar, IRTemp);
+HChar *s390_irgen_CLC(UChar, IRTemp, IRTemp);
+HChar *s390_irgen_CLFI(UChar, UInt);
+HChar *s390_irgen_CLGFI(UChar, UInt);
+HChar *s390_irgen_CLI(UChar, IRTemp);
+HChar *s390_irgen_CLIY(UChar, IRTemp);
+HChar *s390_irgen_CLFHSI(UShort, IRTemp);
+HChar *s390_irgen_CLGHSI(UShort, IRTemp);
+HChar *s390_irgen_CLHHSI(UShort, IRTemp);
+HChar *s390_irgen_CLRL(UChar, UInt);
+HChar *s390_irgen_CLGRL(UChar, UInt);
+HChar *s390_irgen_CLGFRL(UChar, UInt);
+HChar *s390_irgen_CLHRL(UChar, UInt);
+HChar *s390_irgen_CLGHRL(UChar, UInt);
+HChar *s390_irgen_CLRB(UChar, UChar, UChar, IRTemp);
+HChar *s390_irgen_CLGRB(UChar, UChar, UChar, IRTemp);
+HChar *s390_irgen_CLRJ(UChar, UChar, UShort, UChar);
+HChar *s390_irgen_CLGRJ(UChar, UChar, UShort, UChar);
+HChar *s390_irgen_CLIB(UChar, UChar, UChar, IRTemp);
+HChar *s390_irgen_CLGIB(UChar, UChar, UChar, IRTemp);
+HChar *s390_irgen_CLIJ(UChar, UChar, UShort, UChar);
+HChar *s390_irgen_CLGIJ(UChar, UChar, UShort, UChar);
+HChar *s390_irgen_CLM(UChar, UChar, IRTemp);
+HChar *s390_irgen_CLMY(UChar, UChar, IRTemp);
+HChar *s390_irgen_CLMH(UChar, UChar, IRTemp);
+HChar *s390_irgen_CLHHR(UChar, UChar);
+HChar *s390_irgen_CLHLR(UChar, UChar);
+HChar *s390_irgen_CLHF(UChar, IRTemp);
+HChar *s390_irgen_CLIH(UChar, UInt);
+HChar *s390_irgen_CLCLE(UChar, UChar, IRTemp);
+HChar *s390_irgen_CLST(UChar, UChar);
+HChar *s390_irgen_CVB(UChar, IRTemp);
+HChar *s390_irgen_CVBY(UChar, IRTemp);
+HChar *s390_irgen_CVD(UChar, IRTemp);
+HChar *s390_irgen_CVDY(UChar, IRTemp);
+HChar *s390_irgen_CPYA(UChar, UChar);
+HChar *s390_irgen_DR(UChar, UChar);
+HChar *s390_irgen_D(UChar, IRTemp);
+HChar *s390_irgen_DLR(UChar, UChar);
+HChar *s390_irgen_DLGR(UChar, UChar);
+HChar *s390_irgen_DL(UChar, IRTemp);
+HChar *s390_irgen_DLG(UChar, IRTemp);
+HChar *s390_irgen_DSGR(UChar, UChar);
+HChar *s390_irgen_DSGFR(UChar, UChar);
+HChar *s390_irgen_DSG(UChar, IRTemp);
+HChar *s390_irgen_DSGF(UChar, IRTemp);
+HChar *s390_irgen_XR(UChar, UChar);
+HChar *s390_irgen_XGR(UChar, UChar);
+HChar *s390_irgen_XRK(UChar, UChar, UChar);
+HChar *s390_irgen_XGRK(UChar, UChar, UChar);
+HChar *s390_irgen_X(UChar, IRTemp);
+HChar *s390_irgen_XY(UChar, IRTemp);
+HChar *s390_irgen_XG(UChar, IRTemp);
+HChar *s390_irgen_XI(UChar, IRTemp);
+HChar *s390_irgen_XIY(UChar, IRTemp);
+HChar *s390_irgen_XC(UChar, IRTemp, IRTemp);
+HChar *s390_irgen_XIHF(UChar, UInt);
+HChar *s390_irgen_XILF(UChar, UInt);
+HChar *s390_irgen_EX(UChar, IRTemp);
+HChar *s390_irgen_EXRL(UChar, UInt);
+HChar *s390_irgen_EAR(UChar, UChar);
+HChar *s390_irgen_FLOGR(UChar, UChar);
+HChar *s390_irgen_IC(UChar, IRTemp);
+HChar *s390_irgen_ICY(UChar, IRTemp);
+HChar *s390_irgen_ICM(UChar, UChar, IRTemp);
+HChar *s390_irgen_ICMY(UChar, UChar, IRTemp);
+HChar *s390_irgen_ICMH(UChar, UChar, IRTemp);
+HChar *s390_irgen_IIHF(UChar, UInt);
+HChar *s390_irgen_IIHH(UChar, UShort);
+HChar *s390_irgen_IIHL(UChar, UShort);
+HChar *s390_irgen_IILF(UChar, UInt);
+HChar *s390_irgen_IILH(UChar, UShort);
+HChar *s390_irgen_IILL(UChar, UShort);
+HChar *s390_irgen_IPM(UChar);
+HChar *s390_irgen_LR(UChar, UChar);
+HChar *s390_irgen_LGR(UChar, UChar);
+HChar *s390_irgen_LGFR(UChar, UChar);
+HChar *s390_irgen_L(UChar, IRTemp);
+HChar *s390_irgen_LY(UChar, IRTemp);
+HChar *s390_irgen_LG(UChar, IRTemp);
+HChar *s390_irgen_LGF(UChar, IRTemp);
+HChar *s390_irgen_LGFI(UChar, UInt);
+HChar *s390_irgen_LRL(UChar, UInt);
+HChar *s390_irgen_LGRL(UChar, UInt);
+HChar *s390_irgen_LGFRL(UChar, UInt);
+HChar *s390_irgen_LAM(UChar, UChar, IRTemp);
+HChar *s390_irgen_LAMY(UChar, UChar, IRTemp);
+HChar *s390_irgen_LA(UChar, IRTemp);
+HChar *s390_irgen_LAY(UChar, IRTemp);
+HChar *s390_irgen_LAE(UChar, IRTemp);
+HChar *s390_irgen_LAEY(UChar, IRTemp);
+HChar *s390_irgen_LARL(UChar, UInt);
+HChar *s390_irgen_LAA(UChar, UChar, IRTemp);
+HChar *s390_irgen_LAAG(UChar, UChar, IRTemp);
+HChar *s390_irgen_LAAL(UChar, UChar, IRTemp);
+HChar *s390_irgen_LAALG(UChar, UChar, IRTemp);
+HChar *s390_irgen_LAN(UChar, UChar, IRTemp);
+HChar *s390_irgen_LANG(UChar, UChar, IRTemp);
+HChar *s390_irgen_LAX(UChar, UChar, IRTemp);
+HChar *s390_irgen_LAXG(UChar, UChar, IRTemp);
+HChar *s390_irgen_LAO(UChar, UChar, IRTemp);
+HChar *s390_irgen_LAOG(UChar, UChar, IRTemp);
+HChar *s390_irgen_LTR(UChar, UChar);
+HChar *s390_irgen_LTGR(UChar, UChar);
+HChar *s390_irgen_LTGFR(UChar, UChar);
+HChar *s390_irgen_LT(UChar, IRTemp);
+HChar *s390_irgen_LTG(UChar, IRTemp);
+HChar *s390_irgen_LTGF(UChar, IRTemp);
+HChar *s390_irgen_LBR(UChar, UChar);
+HChar *s390_irgen_LGBR(UChar, UChar);
+HChar *s390_irgen_LB(UChar, IRTemp);
+HChar *s390_irgen_LGB(UChar, IRTemp);
+HChar *s390_irgen_LBH(UChar, IRTemp);
+HChar *s390_irgen_LCR(UChar, UChar);
+HChar *s390_irgen_LCGR(UChar, UChar);
+HChar *s390_irgen_LCGFR(UChar, UChar);
+HChar *s390_irgen_LHR(UChar, UChar);
+HChar *s390_irgen_LGHR(UChar, UChar);
+HChar *s390_irgen_LH(UChar, IRTemp);
+HChar *s390_irgen_LHY(UChar, IRTemp);
+HChar *s390_irgen_LGH(UChar, IRTemp);
+HChar *s390_irgen_LHI(UChar, UShort);
+HChar *s390_irgen_LGHI(UChar, UShort);
+HChar *s390_irgen_LHRL(UChar, UInt);
+HChar *s390_irgen_LGHRL(UChar, UInt);
+HChar *s390_irgen_LHH(UChar, IRTemp);
+HChar *s390_irgen_LFH(UChar, IRTemp);
+HChar *s390_irgen_LLGFR(UChar, UChar);
+HChar *s390_irgen_LLGF(UChar, IRTemp);
+HChar *s390_irgen_LLGFRL(UChar, UInt);
+HChar *s390_irgen_LLCR(UChar, UChar);
+HChar *s390_irgen_LLGCR(UChar, UChar);
+HChar *s390_irgen_LLC(UChar, IRTemp);
+HChar *s390_irgen_LLGC(UChar, IRTemp);
+HChar *s390_irgen_LLCH(UChar, IRTemp);
+HChar *s390_irgen_LLHR(UChar, UChar);
+HChar *s390_irgen_LLGHR(UChar, UChar);
+HChar *s390_irgen_LLH(UChar, IRTemp);
+HChar *s390_irgen_LLGH(UChar, IRTemp);
+HChar *s390_irgen_LLHRL(UChar, UInt);
+HChar *s390_irgen_LLGHRL(UChar, UInt);
+HChar *s390_irgen_LLHH(UChar, IRTemp);
+HChar *s390_irgen_LLIHF(UChar, UInt);
+HChar *s390_irgen_LLIHH(UChar, UShort);
+HChar *s390_irgen_LLIHL(UChar, UShort);
+HChar *s390_irgen_LLILF(UChar, UInt);
+HChar *s390_irgen_LLILH(UChar, UShort);
+HChar *s390_irgen_LLILL(UChar, UShort);
+HChar *s390_irgen_LLGTR(UChar, UChar);
+HChar *s390_irgen_LLGT(UChar, IRTemp);
+HChar *s390_irgen_LM(UChar, UChar, IRTemp);
+HChar *s390_irgen_LMY(UChar, UChar, IRTemp);
+HChar *s390_irgen_LMG(UChar, UChar, IRTemp);
+HChar *s390_irgen_LMH(UChar, UChar, IRTemp);
+HChar *s390_irgen_LNR(UChar, UChar);
+HChar *s390_irgen_LNGR(UChar, UChar);
+HChar *s390_irgen_LNGFR(UChar, UChar);
+HChar *s390_irgen_LPQ(UChar, IRTemp);
+HChar *s390_irgen_LPR(UChar, UChar);
+HChar *s390_irgen_LPGR(UChar, UChar);
+HChar *s390_irgen_LPGFR(UChar, UChar);
+HChar *s390_irgen_LRVR(UChar, UChar);
+HChar *s390_irgen_LRVGR(UChar, UChar);
+HChar *s390_irgen_LRVH(UChar, IRTemp);
+HChar *s390_irgen_LRV(UChar, IRTemp);
+HChar *s390_irgen_LRVG(UChar, IRTemp);
+HChar *s390_irgen_MVC(UChar, IRTemp, IRTemp);
+HChar *s390_irgen_MVHHI(UShort, IRTemp);
+HChar *s390_irgen_MVHI(UShort, IRTemp);
+HChar *s390_irgen_MVGHI(UShort, IRTemp);
+HChar *s390_irgen_MVI(UChar, IRTemp);
+HChar *s390_irgen_MVIY(UChar, IRTemp);
+HChar *s390_irgen_MVCLE(UChar, UChar, IRTemp);
+HChar *s390_irgen_MVST(UChar, UChar);
+HChar *s390_irgen_MR(UChar, UChar);
+HChar *s390_irgen_M(UChar, IRTemp);
+HChar *s390_irgen_MFY(UChar, IRTemp);
+HChar *s390_irgen_MH(UChar, IRTemp);
+HChar *s390_irgen_MHY(UChar, IRTemp);
+HChar *s390_irgen_MHI(UChar, UShort);
+HChar *s390_irgen_MGHI(UChar, UShort);
+HChar *s390_irgen_MLR(UChar, UChar);
+HChar *s390_irgen_MLGR(UChar, UChar);
+HChar *s390_irgen_ML(UChar, IRTemp);
+HChar *s390_irgen_MLG(UChar, IRTemp);
+HChar *s390_irgen_MSR(UChar, UChar);
+HChar *s390_irgen_MSGR(UChar, UChar);
+HChar *s390_irgen_MSGFR(UChar, UChar);
+HChar *s390_irgen_MS(UChar, IRTemp);
+HChar *s390_irgen_MSY(UChar, IRTemp);
+HChar *s390_irgen_MSG(UChar, IRTemp);
+HChar *s390_irgen_MSGF(UChar, IRTemp);
+HChar *s390_irgen_MSFI(UChar, UInt);
+HChar *s390_irgen_MSGFI(UChar, UInt);
+HChar *s390_irgen_OR(UChar, UChar);
+HChar *s390_irgen_OGR(UChar, UChar);
+HChar *s390_irgen_ORK(UChar, UChar, UChar);
+HChar *s390_irgen_OGRK(UChar, UChar, UChar);
+HChar *s390_irgen_O(UChar, IRTemp);
+HChar *s390_irgen_OY(UChar, IRTemp);
+HChar *s390_irgen_OG(UChar, IRTemp);
+HChar *s390_irgen_OI(UChar, IRTemp);
+HChar *s390_irgen_OIY(UChar, IRTemp);
+HChar *s390_irgen_OC(UChar, IRTemp, IRTemp);
+HChar *s390_irgen_OIHF(UChar, UInt);
+HChar *s390_irgen_OIHH(UChar, UShort);
+HChar *s390_irgen_OIHL(UChar, UShort);
+HChar *s390_irgen_OILF(UChar, UInt);
+HChar *s390_irgen_OILH(UChar, UShort);
+HChar *s390_irgen_OILL(UChar, UShort);
+HChar *s390_irgen_PFD(void);
+HChar *s390_irgen_PFDRL(void);
+HChar *s390_irgen_RLL(UChar, UChar, IRTemp);
+HChar *s390_irgen_RLLG(UChar, UChar, IRTemp);
+HChar *s390_irgen_RNSBG(UChar, UChar, UChar, UChar, UChar);
+HChar *s390_irgen_RXSBG(UChar, UChar, UChar, UChar, UChar);
+HChar *s390_irgen_ROSBG(UChar, UChar, UChar, UChar, UChar);
+HChar *s390_irgen_RISBG(UChar, UChar, UChar, UChar, UChar);
+HChar *s390_irgen_SRST(UChar, UChar);
+HChar *s390_irgen_SAR(UChar, UChar);
+HChar *s390_irgen_SLDA(UChar, IRTemp);
+HChar *s390_irgen_SLDL(UChar, IRTemp);
+HChar *s390_irgen_SLA(UChar, IRTemp);
+HChar *s390_irgen_SLAK(UChar, UChar, IRTemp);
+HChar *s390_irgen_SLAG(UChar, UChar, IRTemp);
+HChar *s390_irgen_SLL(UChar, IRTemp);
+HChar *s390_irgen_SLLK(UChar, UChar, IRTemp);
+HChar *s390_irgen_SLLG(UChar, UChar, IRTemp);
+HChar *s390_irgen_SRDA(UChar, IRTemp);
+HChar *s390_irgen_SRDL(UChar, IRTemp);
+HChar *s390_irgen_SRA(UChar, IRTemp);
+HChar *s390_irgen_SRAK(UChar, UChar, IRTemp);
+HChar *s390_irgen_SRAG(UChar, UChar, IRTemp);
+HChar *s390_irgen_SRL(UChar, IRTemp);
+HChar *s390_irgen_SRLK(UChar, UChar, IRTemp);
+HChar *s390_irgen_SRLG(UChar, UChar, IRTemp);
+HChar *s390_irgen_ST(UChar, IRTemp);
+HChar *s390_irgen_STY(UChar, IRTemp);
+HChar *s390_irgen_STG(UChar, IRTemp);
+HChar *s390_irgen_STRL(UChar, UInt);
+HChar *s390_irgen_STGRL(UChar, UInt);
+HChar *s390_irgen_STAM(UChar, UChar, IRTemp);
+HChar *s390_irgen_STAMY(UChar, UChar, IRTemp);
+HChar *s390_irgen_STC(UChar, IRTemp);
+HChar *s390_irgen_STCY(UChar, IRTemp);
+HChar *s390_irgen_STCH(UChar, IRTemp);
+HChar *s390_irgen_STCM(UChar, UChar, IRTemp);
+HChar *s390_irgen_STCMY(UChar, UChar, IRTemp);
+HChar *s390_irgen_STCMH(UChar, UChar, IRTemp);
+HChar *s390_irgen_STH(UChar, IRTemp);
+HChar *s390_irgen_STHY(UChar, IRTemp);
+HChar *s390_irgen_STHRL(UChar, UInt);
+HChar *s390_irgen_STHH(UChar, IRTemp);
+HChar *s390_irgen_STFH(UChar, IRTemp);
+HChar *s390_irgen_STM(UChar, UChar, IRTemp);
+HChar *s390_irgen_STMY(UChar, UChar, IRTemp);
+HChar *s390_irgen_STMG(UChar, UChar, IRTemp);
+HChar *s390_irgen_STMH(UChar, UChar, IRTemp);
+HChar *s390_irgen_STPQ(UChar, IRTemp);
+HChar *s390_irgen_STRVH(UChar, IRTemp);
+HChar *s390_irgen_STRV(UChar, IRTemp);
+HChar *s390_irgen_STRVG(UChar, IRTemp);
+HChar *s390_irgen_SR(UChar, UChar);
+HChar *s390_irgen_SGR(UChar, UChar);
+HChar *s390_irgen_SGFR(UChar, UChar);
+HChar *s390_irgen_SRK(UChar, UChar, UChar);
+HChar *s390_irgen_SGRK(UChar, UChar, UChar);
+HChar *s390_irgen_S(UChar, IRTemp);
+HChar *s390_irgen_SY(UChar, IRTemp);
+HChar *s390_irgen_SG(UChar, IRTemp);
+HChar *s390_irgen_SGF(UChar, IRTemp);
+HChar *s390_irgen_SH(UChar, IRTemp);
+HChar *s390_irgen_SHY(UChar, IRTemp);
+HChar *s390_irgen_SHHHR(UChar, UChar, UChar);
+HChar *s390_irgen_SHHLR(UChar, UChar, UChar);
+HChar *s390_irgen_SLR(UChar, UChar);
+HChar *s390_irgen_SLGR(UChar, UChar);
+HChar *s390_irgen_SLGFR(UChar, UChar);
+HChar *s390_irgen_SLRK(UChar, UChar, UChar);
+HChar *s390_irgen_SLGRK(UChar, UChar, UChar);
+HChar *s390_irgen_SL(UChar, IRTemp);
+HChar *s390_irgen_SLY(UChar, IRTemp);
+HChar *s390_irgen_SLG(UChar, IRTemp);
+HChar *s390_irgen_SLGF(UChar, IRTemp);
+HChar *s390_irgen_SLFI(UChar, UInt);
+HChar *s390_irgen_SLGFI(UChar, UInt);
+HChar *s390_irgen_SLHHHR(UChar, UChar, UChar);
+HChar *s390_irgen_SLHHLR(UChar, UChar, UChar);
+HChar *s390_irgen_SLBR(UChar, UChar);
+HChar *s390_irgen_SLBGR(UChar, UChar);
+HChar *s390_irgen_SLB(UChar, IRTemp);
+HChar *s390_irgen_SLBG(UChar, IRTemp);
+HChar *s390_irgen_SVC(UChar);
+HChar *s390_irgen_TS(IRTemp);
+HChar *s390_irgen_TM(UChar, IRTemp);
+HChar *s390_irgen_TMY(UChar, IRTemp);
+HChar *s390_irgen_TMHH(UChar, UShort);
+HChar *s390_irgen_TMHL(UChar, UShort);
+HChar *s390_irgen_TMLH(UChar, UShort);
+HChar *s390_irgen_TMLL(UChar, UShort);
+HChar *s390_irgen_CPSDR(UChar, UChar, UChar);
+HChar *s390_irgen_EFPC(UChar);
+HChar *s390_irgen_LER(UChar, UChar);
+HChar *s390_irgen_LDR(UChar, UChar);
+HChar *s390_irgen_LXR(UChar, UChar);
+HChar *s390_irgen_LE(UChar, IRTemp);
+HChar *s390_irgen_LD(UChar, IRTemp);
+HChar *s390_irgen_LEY(UChar, IRTemp);
+HChar *s390_irgen_LDY(UChar, IRTemp);
+HChar *s390_irgen_LCDFR(UChar, UChar);
+HChar *s390_irgen_LFPC(IRTemp);
+HChar *s390_irgen_LDGR(UChar, UChar);
+HChar *s390_irgen_LGDR(UChar, UChar);
+HChar *s390_irgen_LNDFR(UChar, UChar);
+HChar *s390_irgen_LPDFR(UChar, UChar);
+HChar *s390_irgen_LZER(UChar);
+HChar *s390_irgen_LZDR(UChar);
+HChar *s390_irgen_LZXR(UChar);
+HChar *s390_irgen_SRNM(IRTemp);
+HChar *s390_irgen_SFPC(UChar);
+HChar *s390_irgen_STE(UChar, IRTemp);
+HChar *s390_irgen_STD(UChar, IRTemp);
+HChar *s390_irgen_STEY(UChar, IRTemp);
+HChar *s390_irgen_STDY(UChar, IRTemp);
+HChar *s390_irgen_STFPC(IRTemp);
+HChar *s390_irgen_AEBR(UChar, UChar);
+HChar *s390_irgen_ADBR(UChar, UChar);
+HChar *s390_irgen_AXBR(UChar, UChar);
+HChar *s390_irgen_AEB(UChar, IRTemp);
+HChar *s390_irgen_ADB(UChar, IRTemp);
+HChar *s390_irgen_CEBR(UChar, UChar);
+HChar *s390_irgen_CDBR(UChar, UChar);
+HChar *s390_irgen_CXBR(UChar, UChar);
+HChar *s390_irgen_CEB(UChar, IRTemp);
+HChar *s390_irgen_CDB(UChar, IRTemp);
+HChar *s390_irgen_CEFBR(UChar, UChar);
+HChar *s390_irgen_CDFBR(UChar, UChar);
+HChar *s390_irgen_CXFBR(UChar, UChar);
+HChar *s390_irgen_CEGBR(UChar, UChar);
+HChar *s390_irgen_CDGBR(UChar, UChar);
+HChar *s390_irgen_CXGBR(UChar, UChar);
+HChar *s390_irgen_CFEBR(UChar, UChar, UChar);
+HChar *s390_irgen_CFDBR(UChar, UChar, UChar);
+HChar *s390_irgen_CFXBR(UChar, UChar, UChar);
+HChar *s390_irgen_CGEBR(UChar, UChar, UChar);
+HChar *s390_irgen_CGDBR(UChar, UChar, UChar);
+HChar *s390_irgen_CGXBR(UChar, UChar, UChar);
+HChar *s390_irgen_DEBR(UChar, UChar);
+HChar *s390_irgen_DDBR(UChar, UChar);
+HChar *s390_irgen_DXBR(UChar, UChar);
+HChar *s390_irgen_DEB(UChar, IRTemp);
+HChar *s390_irgen_DDB(UChar, IRTemp);
+HChar *s390_irgen_LTEBR(UChar, UChar);
+HChar *s390_irgen_LTDBR(UChar, UChar);
+HChar *s390_irgen_LTXBR(UChar, UChar);
+HChar *s390_irgen_LCEBR(UChar, UChar);
+HChar *s390_irgen_LCDBR(UChar, UChar);
+HChar *s390_irgen_LCXBR(UChar, UChar);
+HChar *s390_irgen_LDEBR(UChar, UChar);
+HChar *s390_irgen_LXDBR(UChar, UChar);
+HChar *s390_irgen_LXEBR(UChar, UChar);
+HChar *s390_irgen_LDEB(UChar, IRTemp);
+HChar *s390_irgen_LXDB(UChar, IRTemp);
+HChar *s390_irgen_LXEB(UChar, IRTemp);
+HChar *s390_irgen_LNEBR(UChar, UChar);
+HChar *s390_irgen_LNDBR(UChar, UChar);
+HChar *s390_irgen_LNXBR(UChar, UChar);
+HChar *s390_irgen_LPEBR(UChar, UChar);
+HChar *s390_irgen_LPDBR(UChar, UChar);
+HChar *s390_irgen_LPXBR(UChar, UChar);
+HChar *s390_irgen_LEDBR(UChar, UChar);
+HChar *s390_irgen_LDXBR(UChar, UChar);
+HChar *s390_irgen_LEXBR(UChar, UChar);
+HChar *s390_irgen_MEEBR(UChar, UChar);
+HChar *s390_irgen_MDBR(UChar, UChar);
+HChar *s390_irgen_MXBR(UChar, UChar);
+HChar *s390_irgen_MEEB(UChar, IRTemp);
+HChar *s390_irgen_MDB(UChar, IRTemp);
+HChar *s390_irgen_MAEBR(UChar, UChar, UChar);
+HChar *s390_irgen_MADBR(UChar, UChar, UChar);
+HChar *s390_irgen_MAEB(UChar, IRTemp, UChar);
+HChar *s390_irgen_MADB(UChar, IRTemp, UChar);
+HChar *s390_irgen_MSEBR(UChar, UChar, UChar);
+HChar *s390_irgen_MSDBR(UChar, UChar, UChar);
+HChar *s390_irgen_MSEB(UChar, IRTemp, UChar);
+HChar *s390_irgen_MSDB(UChar, IRTemp, UChar);
+HChar *s390_irgen_SQEBR(UChar, UChar);
+HChar *s390_irgen_SQDBR(UChar, UChar);
+HChar *s390_irgen_SQXBR(UChar, UChar);
+HChar *s390_irgen_SQEB(UChar, IRTemp);
+HChar *s390_irgen_SQDB(UChar, IRTemp);
+HChar *s390_irgen_SEBR(UChar, UChar);
+HChar *s390_irgen_SDBR(UChar, UChar);
+HChar *s390_irgen_SXBR(UChar, UChar);
+HChar *s390_irgen_SEB(UChar, IRTemp);
+HChar *s390_irgen_SDB(UChar, IRTemp);
+HChar *s390_irgen_TCEB(UChar, IRTemp);
+HChar *s390_irgen_TCDB(UChar, IRTemp);
+HChar *s390_irgen_TCXB(UChar, IRTemp);
+
+/* global variables */
+extern IRSB *irsb;
+extern Bool mode64;
+extern Addr64 guest_IA_curr_instr;
+extern Addr64 guest_IA_next_instr;
+extern ULong last_execute_target;
+extern DisResult *s390_dis_res;
+
+/*---------------------------------------------------------------*/
+/*--- end guest_s390_priv.h ---*/
+/*---------------------------------------------------------------*/
+
+#endif /* __VEX_GUEST_S390_PRIV_H */
--- valgrind/VEX/priv/guest_s390_spechelper.c
+++ valgrind/VEX/priv/guest_s390_spechelper.c
@@ -0,0 +1,634 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin guest_s390_spechelper.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h" /* needed for guest_generic_bb_to_IR.h */
+#include "main_util.h" /* vex_streq */
+
+#include "guest_generic_bb_to_IR.h" /* needed for guest_s390_defs.h */
+#include "guest_s390_defs.h"
+#include "guest_s390_cc.h"
+
+/* Convenience macros */
+#define unop(op,a1) IRExpr_Unop((op),(a1))
+#define binop(op,a1,a2) IRExpr_Binop((op),(a1),(a2))
+#define mkU64(v) IRExpr_Const(IRConst_U64(v))
+#define mkU32(v) IRExpr_Const(IRConst_U32(v))
+#define mkU8(v) IRExpr_Const(IRConst_U8(v))
+
+
+static inline Bool
+isC64(IRExpr *expr)
+{
+ return expr->tag == Iex_Const && expr->Iex.Const.con->tag == Ico_U64;
+}
+
+
+/* The returned expression is NULL if no specialization was found. In that
+ case the helper function will be called. Otherwise, the expression has
+ type Ity_I32 and a Boolean value. */
+IRExpr *
+guest_s390x_spechelper(HChar *function_name, IRExpr **args,
+ IRStmt **precedingStmts, Int n_precedingStmts)
+{
+ UInt i, arity = 0;
+
+ for (i = 0; args[i]; i++)
+ arity++;
+
+# if 0
+ vex_printf("spec request:\n");
+ vex_printf(" %s ", function_name);
+ for (i = 0; i < arity; i++) {
+ vex_printf(" ");
+ ppIRExpr(args[i]);
+ }
+ vex_printf("\n");
+# endif
+
+ /* --------- Specialising "s390_calculate_cond" --------- */
+
+ if (vex_streq(function_name, "s390_calculate_cond")) {
+ IRExpr *cond_expr, *cc_op_expr, *cc_dep1, *cc_dep2;
+ ULong cond, cc_op;
+
+ vassert(arity == 5);
+
+ cond_expr = args[0];
+ cc_op_expr = args[1];
+
+ /* The necessary requirement for all optimizations here is that the
+ condition and the cc_op are constant. So check that upfront. */
+ if (! isC64(cond_expr)) return NULL;
+ if (! isC64(cc_op_expr)) return NULL;
+
+ cond = cond_expr->Iex.Const.con->Ico.U64;
+ cc_op = cc_op_expr->Iex.Const.con->Ico.U64;
+
+ vassert(cond <= 15);
+
+ /*
+ +------+---+---+---+---+
+ | cc | 0 | 1 | 2 | 3 |
+ | cond | 8 | 4 | 2 | 1 |
+ +------+---+---+---+---+
+ */
+ cc_dep1 = args[2];
+ cc_dep2 = args[3];
+
+ /* S390_CC_OP_SIGNED_COMPARE */
+ if (cc_op == S390_CC_OP_SIGNED_COMPARE) {
+ /*
+ cc == 0 --> cc_dep1 == cc_dep2 (cond == 8)
+ cc == 1 --> cc_dep1 < cc_dep2 (cond == 4)
+ cc == 2 --> cc_dep1 > cc_dep2 (cond == 2)
+
+ Because cc == 3 cannot occur the rightmost bit of cond is
+ a don't care.
+ */
+ if (cond == 8 || cond == 8 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpEQ64, cc_dep1, cc_dep2));
+ }
+ if (cond == 4 + 2 || cond == 4 + 2 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpNE64, cc_dep1, cc_dep2));
+ }
+ if (cond == 4 || cond == 4 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpLT64S, cc_dep1, cc_dep2));
+ }
+ if (cond == 8 + 4 || cond == 8 + 4 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpLE64S, cc_dep1, cc_dep2));
+ }
+ /* cc_dep1 > cc_dep2 ----> cc_dep2 < cc_dep1 */
+ if (cond == 2 || cond == 2 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpLT64S, cc_dep2, cc_dep1));
+ }
+ if (cond == 8 + 2 || cond == 8 + 2 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpLE64S, cc_dep2, cc_dep1));
+ }
+ if (cond == 8 + 4 + 2 || cond == 8 + 4 + 2 + 1) {
+ return mkU32(1);
+ }
+ /* Remaining case */
+ return mkU32(0);
+ }
+
+ /* S390_CC_OP_UNSIGNED_COMPARE */
+ if (cc_op == S390_CC_OP_UNSIGNED_COMPARE) {
+ /*
+ cc == 0 --> cc_dep1 == cc_dep2 (cond == 8)
+ cc == 1 --> cc_dep1 < cc_dep2 (cond == 4)
+ cc == 2 --> cc_dep1 > cc_dep2 (cond == 2)
+
+ Because cc == 3 cannot occur the rightmost bit of cond is
+ a don't care.
+ */
+ if (cond == 8 || cond == 8 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpEQ64, cc_dep1, cc_dep2));
+ }
+ if (cond == 4 + 2 || cond == 4 + 2 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpNE64, cc_dep1, cc_dep2));
+ }
+ if (cond == 4 || cond == 4 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpLT64U, cc_dep1, cc_dep2));
+ }
+ if (cond == 8 + 4 || cond == 8 + 4 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpLE64U, cc_dep1, cc_dep2));
+ }
+ /* cc_dep1 > cc_dep2 ----> cc_dep2 < cc_dep1 */
+ if (cond == 2 || cond == 2 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpLT64U, cc_dep2, cc_dep1));
+ }
+ if (cond == 8 + 2 || cond == 8 + 2 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpLE64U, cc_dep2, cc_dep1));
+ }
+ if (cond == 8 + 4 + 2 || cond == 8 + 4 + 2 + 1) {
+ return mkU32(1);
+ }
+ /* Remaining case */
+ return mkU32(0);
+ }
+
+ /* S390_CC_OP_LOAD_AND_TEST */
+ if (cc_op == S390_CC_OP_LOAD_AND_TEST) {
+ /*
+ cc == 0 --> cc_dep1 == 0 (cond == 8)
+ cc == 1 --> cc_dep1 < 0 (cond == 4)
+ cc == 2 --> cc_dep1 > 0 (cond == 2)
+
+ Because cc == 3 cannot occur the rightmost bit of cond is
+ a don't care.
+ */
+ if (cond == 8 || cond == 8 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpEQ64, cc_dep1, mkU64(0)));
+ }
+ if (cond == 4 + 2 || cond == 4 + 2 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpNE64, cc_dep1, mkU64(0)));
+ }
+ if (cond == 4 || cond == 4 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpLT64S, cc_dep1, mkU64(0)));
+ }
+ if (cond == 8 + 4 || cond == 8 + 4 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpLE64S, cc_dep1, mkU64(0)));
+ }
+ /* cc_dep1 > 0 ----> 0 < cc_dep1 */
+ if (cond == 2 || cond == 2 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpLT64S, mkU64(0), cc_dep1));
+ }
+ if (cond == 8 + 2 || cond == 8 + 2 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpLE64S, mkU64(0), cc_dep1));
+ }
+ if (cond == 8 + 4 + 2 || cond == 8 + 4 + 2 + 1) {
+ return mkU32(1);
+ }
+ /* Remaining case */
+ return mkU32(0);
+ }
+
+ /* S390_CC_OP_BITWISE */
+ if (cc_op == S390_CC_OP_BITWISE) {
+ /*
+ cc_dep1 is the result of the boolean operation.
+
+ cc == 0 --> cc_dep1 == 0 (cond == 8)
+ cc == 1 --> cc_dep1 != 0 (cond == 4)
+
+ Because cc == 2 and cc == 3 cannot occur the two rightmost bits of
+ cond are don't cares. Therefore:
+
+ cond == 00xx -> always false
+ cond == 01xx -> not equal
+ cond == 10xx -> equal
+ cond == 11xx -> always true
+ */
+ if ((cond & (8 + 4)) == 8 + 4) {
+ return mkU32(1);
+ }
+ if (cond & 8) {
+ return unop(Iop_1Uto32, binop(Iop_CmpEQ64, cc_dep1, mkU64(0)));
+ }
+ if (cond & 4) {
+ return unop(Iop_1Uto32, binop(Iop_CmpNE64, cc_dep1, mkU64(0)));
+ }
+ /* Remaining case */
+ return mkU32(0);
+ }
+
+ /* S390_CC_OP_INSERT_CHAR_MASK_32
+ Since the mask comes from an immediate field in the opcode, we
+ expect the mask to be a constant here. That simplifies matters. */
+ if (cc_op == S390_CC_OP_INSERT_CHAR_MASK_32) {
+ ULong mask;
+ UInt imask = 0, shift = 0;
+ IRExpr *word;
+
+ if (! isC64(cc_dep2)) goto missed;
+
+ mask = cc_dep2->Iex.Const.con->Ico.U64;
+
+ /* Extract the 32-bit value from the thunk */
+
+ word = unop(Iop_64to32, cc_dep1);
+
+ switch (mask) {
+ case 0: shift = 0; imask = 0x00000000; break;
+ case 1: shift = 24; imask = 0x000000FF; break;
+ case 2: shift = 16; imask = 0x0000FF00; break;
+ case 3: shift = 16; imask = 0x0000FFFF; break;
+ case 4: shift = 8; imask = 0x00FF0000; break;
+ case 5: shift = 8; imask = 0x00FF00FF; break;
+ case 6: shift = 8; imask = 0x00FFFF00; break;
+ case 7: shift = 8; imask = 0x00FFFFFF; break;
+ case 8: shift = 0; imask = 0xFF000000; break;
+ case 9: shift = 0; imask = 0xFF0000FF; break;
+ case 10: shift = 0; imask = 0xFF00FF00; break;
+ case 11: shift = 0; imask = 0xFF00FFFF; break;
+ case 12: shift = 0; imask = 0xFFFF0000; break;
+ case 13: shift = 0; imask = 0xFFFF00FF; break;
+ case 14: shift = 0; imask = 0xFFFFFF00; break;
+ case 15: shift = 0; imask = 0xFFFFFFFF; break;
+ }
+
+ /* Select the bits that were inserted */
+ word = binop(Iop_And32, word, mkU32(imask));
+
+ /* cc == 0 --> all inserted bits zero or mask == 0 (cond == 8)
+ cc == 1 --> leftmost inserted bit is one (cond == 4)
+ cc == 2 --> leftmost inserted bit is zero and not (cond == 2)
+ all inserted bits are zero
+
+ Because cc == 0,1,2 the rightmost bit of the mask is a don't care */
+ if (cond == 8 || cond == 8 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpEQ32, word, mkU32(0)));
+ }
+ if (cond == 4 + 2 || cond == 4 + 2 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpNE32, word, mkU32(0)));
+ }
+
+ /* Sign extend */
+ if (shift != 0) {
+ word = binop(Iop_Sar32, binop(Iop_Shl32, word, mkU8(shift)),
+ mkU8(shift));
+ }
+
+ if (cond == 4 || cond == 4 + 1) { /* word < 0 */
+ return unop(Iop_1Uto32, binop(Iop_CmpLT32S, word, mkU32(0)));
+ }
+ if (cond == 2 || cond == 2 + 1) { /* word > 0 */
+ return unop(Iop_1Uto32, binop(Iop_CmpLT32S, mkU32(0), word));
+ }
+ if (cond == 8 + 4 || cond == 8 + 4 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpLE32S, word, mkU32(0)));
+ }
+ if (cond == 8 + 2 || cond == 8 + 2 + 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpLE32S, mkU32(0), word));
+ }
+ if (cond == 8 + 4 + 2 || cond == 8 + 4 + 2 + 1) {
+ return mkU32(1);
+ }
+ /* Remaining case */
+ return mkU32(0);
+ }
+
+ /* S390_CC_OP_TEST_UNDER_MASK_8
+ Since the mask comes from an immediate field in the opcode, we
+ expect the mask to be a constant here. That simplifies matters. */
+ if (cc_op == S390_CC_OP_TEST_UNDER_MASK_8) {
+ ULong mask16;
+
+ if (! isC64(cc_dep2)) goto missed;
+
+ mask16 = cc_dep2->Iex.Const.con->Ico.U64;
+
+ /* Get rid of the mask16 == 0 case first. Some of the simplifications
+ below (e.g. for OVFL) only hold if mask16 == 0. */
+ if (mask16 == 0) { /* cc == 0 */
+ if (cond & 0x8) return mkU32(1);
+ return mkU32(0);
+ }
+
+ /* cc == 2 is a don't care */
+ if (cond == 8 || cond == 8 + 2) {
+ return unop(Iop_1Uto32, binop(Iop_CmpEQ64,
+ binop(Iop_And64, cc_dep1, cc_dep2),
+ mkU64(0)));
+ }
+ if (cond == 7 || cond == 7 - 2) {
+ return unop(Iop_1Uto32, binop(Iop_CmpNE64,
+ binop(Iop_And64, cc_dep1, cc_dep2),
+ mkU64(0)));
+ }
+ if (cond == 1 || cond == 1 + 2) {
+/* fixs390: I'm not exactly sure whether it is allowed that cc_dep2 is refered
+ to twice in the expression we build up here. Elsewhere we try to avoid
+ that (see the bazillions mkU64(0) in irgen.c). On the other hand...
+ guest_x86_helper.c around line 1144 does this, too. */
+ return unop(Iop_1Uto32, binop(Iop_CmpEQ64,
+ binop(Iop_And64, cc_dep1, cc_dep2),
+ cc_dep2));
+ }
+ if (cond == 14 || cond == 14 - 2) { /* ! OVFL */
+ /* fixs390: see above; multiple reference to cc_dep2 */
+ return unop(Iop_1Uto32, binop(Iop_CmpNE64,
+ binop(Iop_And64, cc_dep1, cc_dep2),
+ cc_dep2));
+ }
+ goto missed;
+ }
+
+ /* S390_CC_OP_TEST_UNDER_MASK_16
+ Since the mask comes from an immediate field in the opcode, we
+ expect the mask to be a constant here. That simplifies matters. */
+ if (cc_op == S390_CC_OP_TEST_UNDER_MASK_16) {
+ ULong mask16;
+ UInt msb;
+
+ if (! isC64(cc_dep2)) goto missed;
+
+ mask16 = cc_dep2->Iex.Const.con->Ico.U64;
+
+ /* Get rid of the mask16 == 0 case first. Some of the simplifications
+ below (e.g. for OVFL) only hold if mask16 == 0. */
+ if (mask16 == 0) { /* cc == 0 */
+ if (cond & 0x8) return mkU32(1);
+ return mkU32(0);
+ }
+
+ if (cond == 8) {
+ return unop(Iop_1Uto32, binop(Iop_CmpEQ64,
+ binop(Iop_And64, cc_dep1, cc_dep2),
+ mkU64(0)));
+ }
+ if (cond == 7) {
+ return unop(Iop_1Uto32, binop(Iop_CmpNE64,
+ binop(Iop_And64, cc_dep1, cc_dep2),
+ mkU64(0)));
+ }
+ if (cond == 1) {
+ return unop(Iop_1Uto32, binop(Iop_CmpEQ64,
+ binop(Iop_And64, cc_dep1, cc_dep2),
+ mkU64(mask16)));
+ }
+ if (cond == 14) { /* ! OVFL */
+ return unop(Iop_1Uto32, binop(Iop_CmpNE64,
+ binop(Iop_And64, cc_dep1, cc_dep2),
+ mkU64(mask16)));
+ }
+
+ /* Find MSB in mask */
+ msb = 0x8000;
+ while (msb > mask16)
+ msb >>= 1;
+
+ if (cond == 2) { /* cc == 2 */
+ IRExpr *c1, *c2;
+
+ /* fixs390: see above; multiple reference to cc_dep1 */
+
+ /* (cc_dep & msb) != 0 && (cc_dep & mask16) != mask16 */
+ c1 = binop(Iop_CmpNE64,
+ binop(Iop_And64, cc_dep1, mkU64(msb)), mkU64(0));
+ c2 = binop(Iop_CmpNE64,
+ binop(Iop_And64, cc_dep1, cc_dep2),
+ mkU64(mask16));
+ return binop(Iop_And32, unop(Iop_1Uto32, c1),
+ unop(Iop_1Uto32, c2));
+ }
+
+ if (cond == 4) { /* cc == 1 */
+ IRExpr *c1, *c2;
+
+ /* fixs390: see above; multiple reference to cc_dep1 */
+
+ /* (cc_dep & msb) == 0 && (cc_dep & mask16) != 0 */
+ c1 = binop(Iop_CmpEQ64,
+ binop(Iop_And64, cc_dep1, mkU64(msb)), mkU64(0));
+ c2 = binop(Iop_CmpNE64,
+ binop(Iop_And64, cc_dep1, cc_dep2),
+ mkU64(0));
+ return binop(Iop_And32, unop(Iop_1Uto32, c1),
+ unop(Iop_1Uto32, c2));
+ }
+
+ if (cond == 11) { /* cc == 0,2,3 */
+ IRExpr *c1, *c2;
+
+ /* fixs390: see above; multiple reference to cc_dep1 */
+
+ c1 = binop(Iop_CmpNE64,
+ binop(Iop_And64, cc_dep1, mkU64(msb)), mkU64(0));
+ c2 = binop(Iop_CmpEQ64,
+ binop(Iop_And64, cc_dep1, cc_dep2),
+ mkU64(0));
+ return binop(Iop_Or32, unop(Iop_1Uto32, c1),
+ unop(Iop_1Uto32, c2));
+ }
+
+ if (cond == 3) { /* cc == 2 || cc == 3 */
+ return unop(Iop_1Uto32,
+ binop(Iop_CmpNE64,
+ binop(Iop_And64, cc_dep1, mkU64(msb)),
+ mkU64(0)));
+ }
+ if (cond == 12) { /* cc == 0 || cc == 1 */
+ return unop(Iop_1Uto32,
+ binop(Iop_CmpEQ64,
+ binop(Iop_And64, cc_dep1, mkU64(msb)),
+ mkU64(0)));
+ }
+ // vex_printf("TUM mask = 0x%llx\n", mask16);
+ goto missed;
+ }
+
+ /* S390_CC_OP_UNSIGNED_SUB_64/32 */
+ if (cc_op == S390_CC_OP_UNSIGNED_SUB_64 ||
+ cc_op == S390_CC_OP_UNSIGNED_SUB_32) {
+ /*
+ cc_dep1, cc_dep2 are the zero extended left and right operands
+
+ cc == 1 --> result != 0, borrow (cond == 4)
+ cc == 2 --> result == 0, no borrow (cond == 2)
+ cc == 3 --> result != 0, no borrow (cond == 1)
+
+ cc = (cc_dep1 == cc_dep2) ? 2
+ : (cc_dep1 > cc_dep2) ? 3 : 1;
+
+ Because cc == 0 cannot occur the leftmost bit of cond is
+ a don't care.
+ */
+ if (cond == 1 || cond == 1 + 8) { /* cc == 3 op2 < op1 */
+ return unop(Iop_1Uto32, binop(Iop_CmpLT64U, cc_dep2, cc_dep1));
+ }
+ if (cond == 2 || cond == 2 + 8) { /* cc == 2 */
+ return unop(Iop_1Uto32, binop(Iop_CmpEQ64, cc_dep1, cc_dep2));
+ }
+ if (cond == 4 || cond == 4 + 8) { /* cc == 1 */
+ return unop(Iop_1Uto32, binop(Iop_CmpLT64U, cc_dep1, cc_dep2));
+ }
+ if (cond == 3 || cond == 3 + 8) { /* cc == 2 || cc == 3 */
+ return unop(Iop_1Uto32, binop(Iop_CmpLE64U, cc_dep2, cc_dep1));
+ }
+ if (cond == 6 || cond == 6 + 8) { /* cc == 2 || cc == 1 */
+ return unop(Iop_1Uto32, binop(Iop_CmpLE64U, cc_dep1, cc_dep2));
+ }
+
+ if (cond == 5 || cond == 5 + 8) { /* cc == 3 || cc == 1 */
+ return unop(Iop_1Uto32, binop(Iop_CmpNE64, cc_dep1, cc_dep2));
+ }
+ if (cond == 7 || cond == 7 + 8) {
+ return mkU32(1);
+ }
+ /* Remaining case */
+ return mkU32(0);
+ }
+
+ /* S390_CC_OP_UNSIGNED_ADD_64 */
+ if (cc_op == S390_CC_OP_UNSIGNED_ADD_64) {
+ /*
+ cc_dep1, cc_dep2 are the zero extended left and right operands
+
+ cc == 0 --> result == 0, no carry (cond == 8)
+ cc == 1 --> result != 0, no carry (cond == 4)
+ cc == 2 --> result == 0, carry (cond == 2)
+ cc == 3 --> result != 0, carry (cond == 1)
+ */
+ if (cond == 8) { /* cc == 0 */
+ /* Both inputs are 0 */
+ return unop(Iop_1Uto32, binop(Iop_CmpEQ64,
+ binop(Iop_Or64, cc_dep1, cc_dep2),
+ mkU64(0)));
+ }
+ if (cond == 7) { /* cc == 1,2,3 */
+ /* Not both inputs are 0 */
+ return unop(Iop_1Uto32, binop(Iop_CmpNE64,
+ binop(Iop_Or64, cc_dep1, cc_dep2),
+ mkU64(0)));
+ }
+ if (cond == 8 + 2) { /* cc == 0,2 -> result is zero */
+ return unop(Iop_1Uto32, binop(Iop_CmpEQ64,
+ binop(Iop_Add64, cc_dep1, cc_dep2),
+ mkU64(0)));
+ }
+ if (cond == 4 + 1) { /* cc == 1,3 -> result is not zero */
+ return unop(Iop_1Uto32, binop(Iop_CmpNE64,
+ binop(Iop_Add64, cc_dep1, cc_dep2),
+ mkU64(0)));
+ }
+ goto missed;
+ }
+
+ /* S390_CC_OP_UNSIGNED_ADD_32 */
+ if (cc_op == S390_CC_OP_UNSIGNED_ADD_32) {
+ /*
+ cc_dep1, cc_dep2 are the zero extended left and right operands
+
+ cc == 0 --> result == 0, no carry (cond == 8)
+ cc == 1 --> result != 0, no carry (cond == 4)
+ cc == 2 --> result == 0, carry (cond == 2)
+ cc == 3 --> result != 0, carry (cond == 1)
+ */
+ if (cond == 8) { /* cc == 0 */
+ /* Both inputs are 0 */
+ return unop(Iop_1Uto32, binop(Iop_CmpEQ64,
+ binop(Iop_Or64, cc_dep1, cc_dep2),
+ mkU64(0)));
+ }
+ if (cond == 7) { /* cc == 1,2,3 */
+ /* Not both inputs are 0 */
+ return unop(Iop_1Uto32, binop(Iop_CmpNE64,
+ binop(Iop_Or64, cc_dep1, cc_dep2),
+ mkU64(0)));
+ }
+ if (cond == 8 + 2) { /* cc == 0,2 -> result is zero */
+ return unop(Iop_1Uto32, binop(Iop_CmpEQ32,
+ binop(Iop_Add32,
+ unop(Iop_64to32, cc_dep1),
+ unop(Iop_64to32, cc_dep2)),
+ mkU32(0)));
+ }
+ if (cond == 4 + 1) { /* cc == 1,3 -> result is not zero */
+ return unop(Iop_1Uto32, binop(Iop_CmpNE32,
+ binop(Iop_Add32,
+ unop(Iop_64to32, cc_dep1),
+ unop(Iop_64to32, cc_dep2)),
+ mkU32(0)));
+ }
+ goto missed;
+ }
+
+ /* S390_CC_OP_SET */
+ if (cc_op == S390_CC_OP_SET) {
+ /* cc_dep1 is the condition code
+
+ Return 1, if ((cond << cc_dep1) & 0x8) != 0 */
+
+ return unop(Iop_1Uto32,
+ binop(Iop_CmpNE64,
+ binop(Iop_And64,
+ binop(Iop_Shl64, cond_expr,
+ unop(Iop_64to8, cc_dep1)),
+ mkU64(8)),
+ mkU64(0)));
+ }
+
+ /* S390_CC_OP_TEST_AND_SET */
+ if (cc_op == S390_CC_OP_TEST_AND_SET) {
+ /* cc_dep1 is the zero-extended loaded value
+
+ cc == 0 --> leftmost bit is zero (cond == 8)
+ cc == 1 --> leftmost bit is one (cond == 4)
+
+ As cc is either 0 or 1, only the two leftmost bits of the mask
+ are relevant. */
+ IRExpr *bit = binop(Iop_Shr64, cc_dep1, mkU8(7));
+
+ switch (cond & (8 + 4)) {
+ case 0: return mkU32(0);
+ case 4: return unop(Iop_1Uto32, binop(Iop_CmpNE64, bit, mkU64(0)));
+ case 8: return unop(Iop_1Uto32, binop(Iop_CmpEQ64, bit, mkU64(0)));
+ case 8 + 4: return mkU32(1);
+ }
+ /* not reached */
+ }
+
+missed:
+ // vex_printf("FLORIAN condition = %d\top = %d\n", (int)cond, (int)cc_op);
+ ;
+ }
+
+ return NULL;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end guest_s390_spechelper.c ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/guest_s390_toIR.c
+++ valgrind/VEX/priv/guest_s390_toIR.c
@@ -0,0 +1,203 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin guest_s390_toIR.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+/* "Special" instructions.
+
+ This instruction decoder can decode three special instructions
+ which mean nothing natively (are no-ops as far as regs/mem are
+ concerned) but have meaning for supporting Valgrind.
+ fixs390 later: describe special insns
+*/
+
+/* Translates s390 code to IR. */
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex_guest_s390x.h" /* VexGuestS390XState */
+#include "libvex.h" /* needed for bb_to_IR.h */
+
+#include "main_util.h" /* vassert */
+#include "guest_generic_bb_to_IR.h" /* DisResult */
+#include "guest_s390_priv.h" /* s390_decode_and_irgen */
+#include "guest_s390_defs.h" /* prototypes for this file's functions */
+
+
+/*------------------------------------------------------------*/
+/*--- Globals ---*/
+/*------------------------------------------------------------*/
+
+/* fixs390: all our global variables should be in s390 namespace */
+
+/* The IRSB* into which we're generating code. */
+IRSB *irsb;
+
+/* The guest address for the instruction currently being
+ translated. */
+Addr64 guest_IA_curr_instr;
+
+/* The guest address for the instruction following the current instruction. */
+Addr64 guest_IA_next_instr;
+
+/* s390 has a trampoline for the EX instruction */
+Addr64 guest_trampoline;
+
+/* The last seen execute target instruction */
+ULong last_execute_target;
+
+
+/* Result of disassembly step. */
+DisResult *s390_dis_res;
+
+
+/* Generate an IRExpr for an address. */
+static __inline__ IRExpr *
+mkaddr_expr(Addr64 addr)
+{
+ return IRExpr_Const(IRConst_U64(addr));
+}
+
+
+/* Disassemble a single instruction INSN into IR. */
+static DisResult
+disInstr_S390_WRK(UChar *insn, Bool (*resteerOkFn)(void *, Addr64),
+ void *callback_data)
+{
+ UChar byte;
+ UInt insn_length;
+ DisResult dres;
+
+ /* ---------------------------------------------------- */
+ /* --- Compute instruction length -- */
+ /* ---------------------------------------------------- */
+
+ /* Get the first byte of the insn. */
+ byte = insn[0];
+
+ /* The leftmost two bits (0:1) encode the length of the insn in bytes.
+ 00 -> 2 bytes, 01 -> 4 bytes, 10 -> 4 bytes, 11 -> 6 bytes. */
+ insn_length = ((((byte >> 6) + 1) >> 1) + 1) << 1;
+
+ guest_IA_next_instr = guest_IA_curr_instr + insn_length;
+
+ /* ---------------------------------------------------- */
+ /* --- Initialise the DisResult data -- */
+ /* ---------------------------------------------------- */
+ dres.whatNext = Dis_Continue;
+ dres.len = insn_length;
+ dres.continueAt = 0;
+
+ /* fixs390: special insn for test purposes only. */
+ /* All other special insns are handled in s390_decode_and_irgen() */
+ {
+ if (byte == 0x0) {
+ /* There is no insn whose first byte is all zero. There never will be.
+ So we use that for testing purposes when we hand-feed a basic block
+ to VEX. We terminate such a basic block with 0x0000 which will then
+ cause the translation to stop. */
+ dres.whatNext = Dis_StopHere;
+ dres.len = 2;
+ irsb->next = mkaddr_expr(0x0);
+ return dres;
+ }
+ }
+
+ /* fixs390: we should probably pass the resteer-function and the callback
+ data. It's not needed for correctness but improves performance. */
+
+ /* Normal and special instruction handling starts here. */
+ if (s390_decode_and_irgen(insn, insn_length, &dres) == 0) {
+ /* All decode failures end up here. The decoder has already issued an
+ error message.
+ Tell the dispatcher that this insn cannot be decoded, and so has
+ not been executed, and (is currently) the next to be executed.
+ IA should be up-to-date since it made so at the start of each
+ insn, but nevertheless be paranoid and update it again right
+ now. */
+ addStmtToIRSB(irsb, IRStmt_Put(S390_GUEST_OFFSET(guest_IA),
+ mkaddr_expr(guest_IA_curr_instr)));
+
+ irsb->next = mkaddr_expr(guest_IA_curr_instr);
+ irsb->jumpkind = Ijk_NoDecode;
+ dres.whatNext = Dis_StopHere;
+ dres.len = 0;
+
+ return dres;
+ }
+
+ return dres;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Top-level fn ---*/
+/*------------------------------------------------------------*/
+
+/* Disassemble a single instruction into IR. The instruction
+ is located in host memory at &guest_code[delta]. */
+
+DisResult
+disInstr_S390(IRSB *irsb_IN,
+ Bool put_IP,
+ Bool (*resteerOkFn)(void *, Addr64),
+ Bool resteerCisOk,
+ void *callback_opaque,
+ UChar *guest_code,
+ Long delta,
+ Addr64 guest_IP,
+ VexArch guest_arch,
+ VexArchInfo *archinfo,
+ VexAbiInfo *abiinfo,
+ Bool host_bigendian)
+{
+ vassert(guest_arch == VexArchS390X);
+
+ /* The instruction decoder requires a big-endian machine. */
+ vassert(host_bigendian == True);
+
+ /* Set globals (see top of this file) */
+ guest_IA_curr_instr = guest_IP;
+
+ irsb = irsb_IN;
+
+ vassert(guest_arch == VexArchS390X);
+
+ /* We may be asked to update the guest IA before going further. */
+ if (put_IP)
+ addStmtToIRSB(irsb, IRStmt_Put(S390_GUEST_OFFSET(guest_IA),
+ mkaddr_expr(guest_IA_curr_instr)));
+
+ return disInstr_S390_WRK(guest_code + delta, resteerOkFn, callback_opaque);
+}
+
+/*---------------------------------------------------------------*/
+/*--- end guest_s390_toIR.c ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/host_s390_amode.c
+++ valgrind/VEX/priv/host_s390_amode.c
@@ -0,0 +1,240 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin host_s390_amode.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#include "libvex_basictypes.h"
+#include "libvex.h"
+
+#include "main_util.h" /* vassert */
+#include "host_generic_regs.h"
+#include "host_s390_hreg.h" /* s390_hreg_print */
+#include "host_s390_amode.h"
+
+
+/* Is VALUE within the domain of a 20-bit signed integer. */
+static __inline__ Bool
+fits_signed_20bit(Int value)
+{
+ return ((value << 12) >> 12) == value;
+}
+
+
+/* Is VALUE within the domain of a 12-bit unsigned integer. */
+static __inline__ Bool
+fits_unsigned_12bit(Int value)
+{
+ return (value & 0xFFF) == value;
+}
+
+
+/* Construct a b12 amode. */
+s390_amode *
+s390_amode_b12(Int d, HReg b)
+{
+ s390_amode *am = LibVEX_Alloc(sizeof(s390_amode));
+
+ vassert(fits_unsigned_12bit(d));
+
+ am->tag = S390_AMODE_B12;
+ am->d = d;
+ am->b = b;
+ am->x = 0; /* hregNumber(0) == 0 */
+
+ return am;
+}
+
+
+/* Construct a b20 amode. */
+s390_amode *
+s390_amode_b20(Int d, HReg b)
+{
+ s390_amode *am = LibVEX_Alloc(sizeof(s390_amode));
+
+ vassert(fits_signed_20bit(d));
+
+ am->tag = S390_AMODE_B20;
+ am->d = d;
+ am->b = b;
+ am->x = 0; /* hregNumber(0) == 0 */
+
+ return am;
+}
+
+
+/* Construct a bx12 amode. */
+s390_amode *
+s390_amode_bx12(Int d, HReg b, HReg x)
+{
+ s390_amode *am = LibVEX_Alloc(sizeof(s390_amode));
+
+ vassert(fits_unsigned_12bit(d));
+ vassert(b != 0);
+ vassert(x != 0);
+
+ am->tag = S390_AMODE_BX12;
+ am->d = d;
+ am->b = b;
+ am->x = x;
+
+ return am;
+}
+
+
+/* Construct a bx20 amode. */
+s390_amode *
+s390_amode_bx20(Int d, HReg b, HReg x)
+{
+ s390_amode *am = LibVEX_Alloc(sizeof(s390_amode));
+
+ vassert(fits_signed_20bit(d));
+ vassert(b != 0);
+ vassert(x != 0);
+
+ am->tag = S390_AMODE_BX20;
+ am->d = d;
+ am->b = b;
+ am->x = x;
+
+ return am;
+}
+
+
+/* Decompile the given amode into a static buffer and return it. */
+const HChar *
+s390_amode_as_string(const s390_amode *am)
+{
+ static HChar buf[30];
+ HChar *p;
+
+ buf[0] = '\0';
+ p = buf;
+
+ switch (am->tag) {
+ case S390_AMODE_B12:
+ case S390_AMODE_B20:
+ vex_sprintf(p, "%d(%s)", am->d, s390_hreg_as_string(am->b));
+ break;
+
+ case S390_AMODE_BX12:
+ case S390_AMODE_BX20:
+ /* s390_hreg_as_string returns pointer to local buffer. Need to
+ split this into two printfs */
+ p += vex_sprintf(p, "%d(%s,", am->d, s390_hreg_as_string(am->x));
+ vex_sprintf(p, "%s)", s390_hreg_as_string(am->b));
+ break;
+
+ default:
+ vpanic("s390_amode_as_string");
+ }
+
+ return buf;
+}
+
+
+/* Helper function for s390_amode_is_sane */
+static __inline__ Bool
+is_virtual_gpr(HReg reg)
+{
+ return hregIsVirtual(reg) && hregClass(reg) == HRcInt64;
+}
+
+
+/* Sanity check for an amode */
+Bool
+s390_amode_is_sane(const s390_amode *am)
+{
+ switch (am->tag) {
+ case S390_AMODE_B12:
+ return is_virtual_gpr(am->b) && fits_unsigned_12bit(am->d);
+
+ case S390_AMODE_B20:
+ return is_virtual_gpr(am->b) && fits_signed_20bit(am->d);
+
+ case S390_AMODE_BX12:
+ return is_virtual_gpr(am->b) && is_virtual_gpr(am->x) &&
+ fits_unsigned_12bit(am->d);
+
+ case S390_AMODE_BX20:
+ return is_virtual_gpr(am->b) && is_virtual_gpr(am->x) &&
+ fits_signed_20bit(am->d);
+
+ default:
+ vpanic("s390_amode_is_sane");
+ }
+}
+
+
+/* Record the register use of an amode */
+void
+s390_amode_get_reg_usage(HRegUsage *u, const s390_amode *am)
+{
+ switch (am->tag) {
+ case S390_AMODE_B12:
+ case S390_AMODE_B20:
+ addHRegUse(u, HRmRead, am->b);
+ return;
+
+ case S390_AMODE_BX12:
+ case S390_AMODE_BX20:
+ addHRegUse(u, HRmRead, am->b);
+ addHRegUse(u, HRmRead, am->x);
+ return;
+
+ default:
+ vpanic("s390_amode_get_reg_usage");
+ }
+}
+
+
+void
+s390_amode_map_regs(HRegRemap *m, s390_amode *am)
+{
+ switch (am->tag) {
+ case S390_AMODE_B12:
+ case S390_AMODE_B20:
+ am->b = lookupHRegRemap(m, am->b);
+ return;
+
+ case S390_AMODE_BX12:
+ case S390_AMODE_BX20:
+ am->b = lookupHRegRemap(m, am->b);
+ am->x = lookupHRegRemap(m, am->x);
+ return;
+
+ default:
+ vpanic("s390_amode_map_regs");
+ }
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end host_s390_amode.c ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/host_s390_amode.h
+++ valgrind/VEX/priv/host_s390_amode.h
@@ -0,0 +1,80 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin host_s390_amode.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#ifndef __VEX_HOST_S390_AMODE_H
+#define __VEX_HOST_S390_AMODE_H
+
+#include "libvex_basictypes.h" /* Int etc */
+#include "main_util.h" /* needed for host_generic_regs.h */
+#include "host_generic_regs.h" /* HReg */
+
+
+/* --------- Memory address expressions (amodes). --------- */
+
+/* These are the address modes:
+ (1) b12: base register + 12-bit unsigned offset (e.g. RS)
+ (2) b20: base register + 20-bit signed offset (e.g. RSY)
+ (3) bx12: base register + index register + 12-bit unsigned offset (e.g. RX)
+ (4) bx20: base register + index register + 20-bit signed offset (e.g. RXY)
+ fixs390: There is also pc-relative stuff.. e.g. LARL
+*/
+
+typedef enum {
+ S390_AMODE_B12,
+ S390_AMODE_B20,
+ S390_AMODE_BX12,
+ S390_AMODE_BX20
+} s390_amode_t;
+
+typedef struct s390_amode {
+ s390_amode_t tag;
+ HReg b;
+ HReg x; /* hregNumber(x) == 0 for S390_AMODE_B12/B20 kinds */
+ Int d; /* 12 bit unsigned or 20 bit signed */
+} s390_amode;
+
+
+s390_amode *s390_amode_b12(Int d, HReg b);
+s390_amode *s390_amode_b20(Int d, HReg b);
+s390_amode *s390_amode_bx12(Int d, HReg b, HReg x);
+s390_amode *s390_amode_bx20(Int d, HReg b, HReg x);
+Bool s390_amode_is_sane(const s390_amode *);
+void s390_amode_get_reg_usage(HRegUsage *, const s390_amode *);
+void s390_amode_map_regs(HRegRemap *, s390_amode *);
+
+const HChar *s390_amode_as_string(const s390_amode *);
+
+#endif /* ndef __VEX_HOST_S390_AMODE_H */
+
+/*---------------------------------------------------------------*/
+/*--- end host_s390_amode.h ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/host_s390_defs.c
+++ valgrind/VEX/priv/host_s390_defs.c
@@ -0,0 +1,294 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin host_s390_defs.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#include "host_s390_insn.h"
+#include "host_s390_amode.h"
+#include "host_s390_hreg.h"
+#include "host_s390_isel.h"
+#include "host_s390_defs.h"
+
+
+/* KLUDGE: We need to know the hwcaps of the host when generating
+ code. But that info is not passed to emit_S390Instr. Only mode64 is
+ being passed. So, ideally, we want this passed as an argument, too.
+ Until then, we use a global variable. This variable is set as a side
+ effect of iselSB_S390. This is safe because instructions are selected
+ before they are emitted. */
+const VexArchInfo *s390_archinfo_host;
+
+
+void
+ppS390AMode(struct s390_amode *am)
+{
+ vex_printf("%s", s390_amode_as_string(am));
+}
+
+void
+ppS390Instr(struct s390_insn *insn, Bool mode64)
+{
+ vex_printf("%s", s390_insn_as_string(insn));
+}
+
+void
+ppHRegS390(HReg reg)
+{
+ vex_printf("%s", s390_hreg_as_string(reg));
+}
+
+
+/* --------- Helpers for register allocation. --------- */
+
+/* Called once per translation. */
+void
+getAllocableRegs_S390(Int *nregs, HReg **arr, Bool mode64)
+{
+ s390_hreg_get_allocable(nregs, arr);
+}
+
+
+/* Tell the register allocator how the given instruction uses the registers
+ it refers to. */
+void
+getRegUsage_S390Instr(HRegUsage *u, struct s390_insn *insn, Bool mode64)
+{
+ s390_insn_get_reg_usage(u, insn);
+}
+
+
+/* Map the registers of the given instruction */
+void
+mapRegs_S390Instr(HRegRemap *m, struct s390_insn *insn, Bool mode64)
+{
+ s390_insn_map_regs(m, insn);
+}
+
+
+/* Figure out if the given insn represents a reg-reg move, and if so
+ assign the source and destination to *src and *dst. If in doubt say No.
+ Used by the register allocator to do move coalescing. */
+Bool
+isMove_S390Instr(struct s390_insn *insn, HReg *src, HReg *dst)
+{
+ return s390_insn_is_reg_reg_move(insn, src, dst);
+}
+
+
+/* Generate s390 spill/reload instructions under the direction of the
+ register allocator. Note it's critical these don't write the
+ condition codes. This is like an Ist_Put */
+void
+genSpill_S390(HInstr **i1, HInstr **i2, HReg rreg, Int offsetB, Bool mode64)
+{
+ s390_amode *am;
+
+ vassert(offsetB >= 0);
+ vassert(offsetB <= (1 << 12)); /* because we use b12 amode */
+ vassert(!hregIsVirtual(rreg));
+
+ *i1 = *i2 = NULL;
+
+ am = s390_amode_b12(offsetB, s390_hreg_guest_state_pointer());
+
+ switch (hregClass(rreg)) {
+ case HRcInt64:
+ case HRcFlt64:
+ *i1 = s390_insn_store(8, am, rreg);
+ return;
+
+ default:
+ ppHRegClass(hregClass(rreg));
+ vpanic("genSpill_S390: unimplemented regclass");
+ }
+}
+
+
+/* This is like an Iex_Get */
+void
+genReload_S390(HInstr **i1, HInstr **i2, HReg rreg, Int offsetB, Bool mode64)
+{
+ s390_amode *am;
+
+ vassert(offsetB >= 0);
+ vassert(offsetB <= (1 << 12)); /* because we use b12 amode */
+ vassert(!hregIsVirtual(rreg));
+
+ *i1 = *i2 = NULL;
+
+ am = s390_amode_b12(offsetB, s390_hreg_guest_state_pointer());
+
+ switch (hregClass(rreg)) {
+ case HRcInt64:
+ case HRcFlt64:
+ *i1 = s390_insn_load(8, rreg, am);
+ return;
+
+ default:
+ ppHRegClass(hregClass(rreg));
+ vpanic("genReload_S390: unimplemented regclass");
+ }
+}
+
+#if 0
+static s390_insn *
+s390_insn_copy(const s390_insn *insn)
+{
+ s390_insn *new = LibVEX_Alloc(sizeof(s390_insn));
+
+ *new = *insn;
+
+ return new;
+}
+#endif
+
+
+/* The given instruction reads the specified vreg exactly once, and
+ that vreg is currently located at the given spill offset. If
+ possible, return a variant of the instruction to one which instead
+ references the spill slot directly. Otherwise, return NULL. */
+struct s390_insn *
+directReload_S390(struct s390_insn *insn, HReg vreg, Short spill_off)
+{
+#if 0 // needs more work
+ s390_insn *new;
+
+ /* Need to be able to use b12 addressing mode */
+ if (spill_off < 0 || spill_off > 0xFFFF0) return NULL;
+
+ if (insn->tag == S390_INSN_MOVE) {
+ if (insn->variant.move.src == vreg) {
+ vassert(insn->variant.move.dst != vreg);
+
+ spill_off += 8 - insn->size;
+
+ new = s390_insn_copy(insn);
+ new->tag = S390_INSN_LOAD;
+ new->variant.load.src =
+ s390_amode_b12(spill_off, s390_hreg_guest_state_pointer());
+
+ return new;
+ }
+ }
+
+ if (insn->tag == S390_INSN_TEST) {
+ if (insn->variant.test.src.tag == S390_OPND_REG &&
+ insn->variant.test.src.variant.reg == vreg) {
+
+ spill_off += 8 - insn->size;
+
+ new = s390_insn_copy(insn);
+ new->variant.test.src.tag = S390_OPND_AMODE;
+ new->variant.test.src.variant.am =
+ s390_amode_b12(spill_off, s390_hreg_guest_state_pointer());
+
+ return new;
+ }
+ }
+
+ if (insn->tag == S390_INSN_COMPARE) {
+ if (insn->variant.compare.src2.tag == S390_OPND_REG &&
+ insn->variant.compare.src2.variant.reg == vreg) {
+
+ vassert(insn->variant.compare.src1 != vreg);
+
+ spill_off += 8 - insn->size;
+
+ new = s390_insn_copy(insn);
+ new->variant.compare.src2.tag = S390_OPND_AMODE;
+ new->variant.compare.src2.variant.am =
+ s390_amode_b12(spill_off, s390_hreg_guest_state_pointer());
+
+ return new;
+ }
+ }
+
+ if (insn->tag == S390_INSN_UNOP) {
+ if (insn->variant.unop.src.tag == S390_OPND_REG &&
+ insn->variant.unop.src.variant.reg == vreg) {
+ vassert(insn->variant.unop.dst != vreg);
+
+ /* Correct the spill-offset depending on how many bytes are loaded */
+ switch (insn->variant.unop.tag) {
+ case S390_SIGN_EXTEND_8:
+ case S390_ZERO_EXTEND_8:
+ spill_off += 7;
+ break;
+
+ case S390_SIGN_EXTEND_16:
+ case S390_ZERO_EXTEND_16:
+ spill_off += 6;
+ break;
+
+ case S390_SIGN_EXTEND_32:
+ case S390_ZERO_EXTEND_32:
+ spill_off += 4;
+ break;
+ }
+
+ new = s390_insn_copy(insn);
+ new->variant.unop.src.tag = S390_OPND_AMODE;
+ new->variant.unop.src.variant.am =
+ s390_amode_b12(spill_off, s390_hreg_guest_state_pointer());
+
+ return new;
+ }
+ }
+#endif
+
+ /* fixs390 (missed optimization) */
+ return NULL;
+}
+
+
+/* Emit an instruction into buf and return the number of bytes used.
+ Note that buf is not the insn's final place, and therefore it is
+ imperative to emit position-independent code. */
+Int
+emit_S390Instr(UChar *buf, Int nbuf, struct s390_insn *insn,
+ Bool mode64, void *dispatch)
+{
+ return s390_insn_emit(buf, nbuf, insn, dispatch);
+}
+
+
+HInstrArray *
+iselSB_S390(IRSB *bb, VexArch arch_host, VexArchInfo *archinfo_host,
+ VexAbiInfo *vbi)
+{
+ /* KLUDGE: export archinfo_host. */
+ s390_archinfo_host = archinfo_host;
+
+ return s390_isel_sb(bb, arch_host, archinfo_host, vbi);
+}
+
+/*---------------------------------------------------------------*/
+/*--- end host_s390_defs.c ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/host_s390_defs.h
+++ valgrind/VEX/priv/host_s390_defs.h
@@ -0,0 +1,72 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin host_s390_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#ifndef __VEX_HOST_S390_DEFS_H
+#define __VEX_HOST_S390_DEFS_H
+
+#include "libvex_basictypes.h"
+#include "libvex.h" /* VexAbiInfo */
+
+#include "main_util.h" /* vpanic (for host_generic_regs.h) */
+#include "host_generic_regs.h" /* HReg */
+
+struct s390_insn;
+struct s390_amode;
+
+/*--------------------------------------------------------*/
+/* --- Interface exposed to VEX --- */
+/*--------------------------------------------------------*/
+
+extern void ppS390AMode(struct s390_amode *);
+extern void ppS390Instr(struct s390_insn *, Bool mode64);
+extern void ppHRegS390(HReg);
+
+
+/* Some functions that insulate the register allocator from details
+ of the underlying instruction set. */
+extern void getRegUsage_S390Instr ( HRegUsage *, struct s390_insn *, Bool );
+extern void mapRegs_S390Instr ( HRegRemap *, struct s390_insn *, Bool );
+extern Bool isMove_S390Instr ( struct s390_insn *, HReg *, HReg * );
+extern Int emit_S390Instr ( UChar * buf, Int nbuf, struct s390_insn *,
+ Bool, void *dispatch );
+extern void getAllocableRegs_S390 ( Int *, HReg **, Bool );
+extern void genSpill_S390 ( HInstr **, HInstr **, HReg rreg, Int offset, Bool );
+extern void genReload_S390 ( HInstr **, HInstr **, HReg rreg, Int offset, Bool );
+extern struct s390_insn *directReload_S390 ( struct s390_insn *,
+ HReg vreg, Short spill_off );
+extern HInstrArray *iselSB_S390 ( IRSB *, VexArch, VexArchInfo *,
+ VexAbiInfo * );
+
+#endif /* ndef __VEX_HOST_S390_DEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end host_s390_defs.h ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/host_s390_disasm.c
+++ valgrind/VEX/priv/host_s390_disasm.c
@@ -0,0 +1,452 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin host_s390_disasm.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#include <stdarg.h>
+#include "libvex_basictypes.h"
+#include "main_util.h" // vassert
+#include "main_globals.h" // vex_traceflags
+#include "host_s390_disasm.h"
+
+/* The format that is used to write out a mnemonic.
+ These should be declared as 'const HChar' but vex_printf needs
+ to be changed for that first */
+static HChar s390_mnm_fmt[] = "%-8s";
+
+
+/* Return the name of a gpr register for dis-assembly purposes. */
+static const HChar *
+gpr_operand(UInt archreg)
+{
+ static const HChar names[16][5] = {
+ "%r0", "%r1", "%r2", "%r3",
+ "%r4", "%r5", "%r6", "%r7",
+ "%r8", "%r9", "%r10", "%r11",
+ "%r12", "%r13", "%r14", "%r15",
+ };
+
+ vassert(archreg < 16);
+
+ return names[archreg];
+}
+
+
+/* Return the name of a fpr register for dis-assembly purposes. */
+static const HChar *
+fpr_operand(UInt archreg)
+{
+ static const HChar names[16][5] = {
+ "%f0", "%f1", "%f2", "%f3",
+ "%f4", "%f5", "%f6", "%f7",
+ "%f8", "%f9", "%f10", "%f11",
+ "%f12", "%f13", "%f14", "%f15",
+ };
+
+ vassert(archreg < 16);
+
+ return names[archreg];
+}
+
+
+/* Return the name of a ar register for dis-assembly purposes. */
+static const HChar *
+ar_operand(UInt archreg)
+{
+ static const HChar names[16][5] = {
+ "%a0", "%a1", "%a2", "%a3",
+ "%a4", "%a5", "%a6", "%a7",
+ "%a8", "%a9", "%a10", "%a11",
+ "%a12", "%a13", "%a14", "%a15",
+ };
+
+ vassert(archreg < 16);
+
+ return names[archreg];
+}
+
+
+/* Build and return the extended mnemonic for the compare and branch
+ opcodes as introduced by z10. See also the opcodes in file
+ opcodes/s390-opc.txt (from binutils) that have a '$' in their name. */
+static const HChar *
+cab_operand(const HChar *base, UInt mask)
+{
+ HChar *to;
+ const HChar *from;
+
+ static HChar buf[10]; /* Minimum is 6 + 2 */
+
+ static HChar *suffix[] = {
+ "", "h", "l", "ne", "e", "nl", "nh", ""
+ };
+
+ /* strcpy(buf, from); */
+ for (from = base, to = buf; *from; ++from, ++to) {
+ *to = *from;
+ }
+ /* strcat(buf, suffix); */
+ for (from = suffix[mask >> 1]; *from; ++from, ++to) {
+ *to = *from;
+ }
+ *to = '\0';
+
+ return buf;
+}
+
+
+/* Return the special mnemonic for the BCR opcode */
+static const HChar *
+bcr_operand(UInt m1)
+{
+ static const HChar mnemonic[16][6] = {
+ /* 0 */ "nopr", /* no operation */
+ /* 1 */ "bor", /* branch on overflow / if ones */
+ /* 2 */ "bhr", /* branch on high */
+ /* 3 */ "bnler", /* branch on not low or equal */
+ /* 4 */ "blr", /* branch on low */
+ /* 5 */ "bnher", /* branch on not high or equal */
+ /* 6 */ "blhr", /* branch on low or high */
+ /* 7 */ "bner", /* branch on not equal */
+ /* 8 */ "ber", /* branch on equal */
+ /* 9 */ "bnlhr", /* branch on not low or high */
+ /* a */ "bher", /* branch on high or equal */
+ /* b */ "bnlr", /* branch on not low */
+ /* c */ "bler", /* brach on low or equal */
+ /* d */ "bnhr", /* branch on not high */
+ /* e */ "bnor", /* branch on not overflow / if not ones */
+ /* f */ "br", /* unconditional branch */
+ };
+
+ return mnemonic[m1];
+}
+
+
+/* Return the special mnemonic for the BC opcode */
+static const HChar *
+bc_operand(UInt m1)
+{
+ static const HChar mnemonic[16][5] = {
+ /* 0 */ "nop", // no operation
+ /* 1 */ "bo", // branch on overflow / if ones
+ /* 2 */ "bh", // branch on high
+ /* 3 */ "bnle", // branch on not low or equal
+ /* 4 */ "bl", // branch on low
+ /* 5 */ "bnhe", // branch on not high or equal
+ /* 6 */ "blh", // branch on low or high
+ /* 7 */ "bne", // branch on not equal
+ /* 8 */ "be", // branch on equal
+ /* 9 */ "bnlh", // branch on not low or high
+ /* a */ "bhe", // branch on high or equal
+ /* b */ "bnl", // branch on not low
+ /* c */ "ble", // branch on low or equal
+ /* d */ "bnh", // branch on not high
+ /* e */ "bno", // branch on not overflow / if not ones
+ /* f */ "b" // unconditional branch
+ };
+
+ return mnemonic[m1];
+}
+
+
+/* Return the special mnemonic for the BRC opcode */
+static const HChar *
+brc_operand(UInt m1)
+{
+ static const HChar mnemonic[16][5] = {
+ /* 0 */ "brc", /* no special mnemonic */
+ /* 1 */ "jo", /* jump on overflow / if ones */
+ /* 2 */ "jh", /* jump on A high */
+ /* 3 */ "jnle", /* jump on not low or equal */
+ /* 4 */ "jl", /* jump on A low */
+ /* 5 */ "jnhe", /* jump on not high or equal */
+ /* 6 */ "jlh", /* jump on low or high */
+ /* 7 */ "jne", /* jump on A not equal B */
+ /* 8 */ "je", /* jump on A equal B */
+ /* 9 */ "jnlh", /* jump on not low or high */
+ /* a */ "jhe", /* jump on high or equal */
+ /* b */ "jnl", /* jump on A not low */
+ /* c */ "jle", /* jump on low or equal */
+ /* d */ "jnh", /* jump on A not high */
+ /* e */ "jno", /* jump on not overflow / if not ones */
+ /* f */ "j", /* jump */
+ };
+
+ return mnemonic[m1];
+}
+
+
+/* Return the special mnemonic for the BRCL opcode */
+static const HChar *
+brcl_operand(UInt m1)
+{
+ static const HChar mnemonic[16][6] = {
+ /* 0 */ "brcl", /* no special mnemonic */
+ /* 1 */ "jgo", /* jump long on overflow / if ones */
+ /* 2 */ "jgh", /* jump long on high */
+ /* 3 */ "jgnle", /* jump long on not low or equal */
+ /* 4 */ "jgl", /* jump long on low */
+ /* 5 */ "jgnhe", /* jump long on not high or equal */
+ /* 6 */ "jglh", /* jump long on low or high */
+ /* 7 */ "jgne", /* jump long on not equal */
+ /* 8 */ "jge", /* jump long on equal */
+ /* 9 */ "jgnlh", /* jump long on not low or high */
+ /* a */ "jghe", /* jump long on high or equal */
+ /* b */ "jgnl", /* jump long on not low */
+ /* c */ "jgle", /* jump long on low or equal */
+ /* d */ "jgnh", /* jump long on not high */
+ /* e */ "jgno", /* jump long on not overflow / if not ones */
+ /* f */ "jg", /* jump long */
+ };
+
+ return mnemonic[m1];
+}
+
+
+/* An operand with a base register, an index register, and a displacement.
+ If the displacement is signed, the rightmost 20 bit of D need to be
+ sign extended */
+static HChar *
+dxb_operand(HChar *p, UInt d, UInt x, UInt b, Bool displacement_is_signed)
+{
+ if (displacement_is_signed) {
+ Int displ = ((Int)d << 12) >> 12; /* sign extend */
+
+ p += vex_sprintf(p, "%d", displ);
+ } else {
+ p += vex_sprintf(p, "%u", d);
+ }
+ if (x != 0) {
+ p += vex_sprintf(p, "(%s", gpr_operand(x));
+ if (b != 0) {
+ p += vex_sprintf(p, ",%s", gpr_operand(b));
+ }
+ p += vex_sprintf(p, ")");
+ } else {
+ if (b != 0) {
+ p += vex_sprintf(p, "(%s)", gpr_operand(b));
+ }
+ }
+
+ return p;
+}
+
+
+/* An operand with base register, unsigned length, and a 12-bit
+ unsigned displacement */
+static HChar *
+udlb_operand(HChar *p, UInt d, UInt length, UInt b)
+{
+ p += vex_sprintf(p, "%u", d);
+ p += vex_sprintf(p, "(%u", length + 1); // actual length is +1
+ if (b != 0) {
+ p += vex_sprintf(p, ",%s", gpr_operand(b));
+ }
+ p += vex_sprintf(p, ")");
+
+ return p;
+}
+
+
+/* The first argument is the command that says how to write the disassembled
+ insn. It is understood that the mnemonic comes first and that arguments
+ are separated by a ','. The command holds the arguments. Each argument is
+ encoded using a 4-bit S390_ARG_xyz value. The first argument is placed
+ in the least significant bits of the command and so on. There are at most
+ 5 arguments in an insn and a sentinel (S390_ARG_DONE) is needed to identify
+ the end of the argument list. 6 * 4 = 24 bits are required for the
+ command. */
+void
+s390_disasm(UInt command, ...)
+{
+ va_list args;
+ unsigned argkind;
+ HChar buf[128]; /* holds the disassembled insn */
+ HChar *p;
+ HChar separator;
+
+ va_start(args, command);
+
+ p = buf;
+ separator = 0;
+
+ while (42) {
+ argkind = command & 0xF;
+ command >>= 4;
+
+ if (argkind == S390_ARG_DONE) goto done;
+
+ if (argkind == S390_ARG_CABM) separator = 0; /* optional */
+
+ /* Write out the separator */
+ if (separator) *p++ = separator;
+
+ /* argument */
+ switch (argkind) {
+ case S390_ARG_MNM:
+ p += vex_sprintf(p, s390_mnm_fmt, va_arg(args, HChar *));
+ separator = ' ';
+ continue;
+
+ case S390_ARG_XMNM: {
+ UInt mask, kind;
+ const HChar *mnm;
+
+ kind = va_arg(args, UInt);
+
+ separator = ' ';
+ switch (kind) {
+ case S390_XMNM_BC:
+ case S390_XMNM_BCR:
+ mask = va_arg(args, UInt);
+ mnm = kind == S390_XMNM_BCR ? bcr_operand(mask) : bc_operand(mask);
+ p += vex_sprintf(p, s390_mnm_fmt, mnm);
+ /* mask == 0 is a NOP and has no argument */
+ if (mask == 0) goto done;
+ break;
+
+ case S390_XMNM_BRC:
+ case S390_XMNM_BRCL:
+ mask = va_arg(args, UInt);
+ mnm = kind == S390_XMNM_BRC ? brc_operand(mask) : brcl_operand(mask);
+ p += vex_sprintf(p, s390_mnm_fmt, mnm);
+
+ /* mask == 0 has no special mnemonic */
+ if (mask == 0) {
+ p += vex_sprintf(p, " 0");
+ separator = ',';
+ }
+ break;
+
+ case S390_XMNM_CAB:
+ mnm = va_arg(args, HChar *);
+ mask = va_arg(args, UInt);
+ p += vex_sprintf(p, s390_mnm_fmt, cab_operand(mnm, mask));
+ break;
+ }
+ }
+ continue;
+
+ case S390_ARG_GPR:
+ p += vex_sprintf(p, "%s", gpr_operand(va_arg(args, UInt)));
+ break;
+
+ case S390_ARG_FPR:
+ p += vex_sprintf(p, "%s", fpr_operand(va_arg(args, UInt)));
+ break;
+
+ case S390_ARG_AR:
+ p += vex_sprintf(p, "%s", ar_operand(va_arg(args, UInt)));
+ break;
+
+ case S390_ARG_UINT:
+ p += vex_sprintf(p, "%u", va_arg(args, UInt));
+ break;
+
+ case S390_ARG_INT:
+ p += vex_sprintf(p, "%d", (Int)(va_arg(args, UInt)));
+ break;
+
+ case S390_ARG_PCREL: {
+ Int offset = (Int)(va_arg(args, UInt));
+
+ /* Convert # halfwords to # bytes */
+ offset <<= 1;
+
+ if (offset < 0) {
+ p += vex_sprintf(p, ".%d", offset);
+ } else {
+ p += vex_sprintf(p, ".+%u", offset);
+ }
+ break;
+ }
+
+ case S390_ARG_SDXB: {
+ UInt dh, dl, x, b;
+
+ dh = va_arg(args, UInt);
+ dl = va_arg(args, UInt);
+ x = va_arg(args, UInt);
+ b = va_arg(args, UInt);
+
+ p = dxb_operand(p, (dh << 12) | dl, x, b, 1 /* signed_displacement */);
+ break;
+ }
+
+ case S390_ARG_UDXB: {
+ UInt d, x, b;
+
+ d = va_arg(args, UInt);
+ x = va_arg(args, UInt);
+ b = va_arg(args, UInt);
+
+ p = dxb_operand(p, d, x, b, 0 /* signed_displacement */);
+ break;
+ }
+
+ case S390_ARG_UDLB: {
+ UInt d, l, b;
+
+ d = va_arg(args, UInt);
+ l = va_arg(args, UInt);
+ b = va_arg(args, UInt);
+
+ p = udlb_operand(p, d, l, b);
+ break;
+ }
+
+ case S390_ARG_CABM: {
+ UInt mask;
+
+ mask = va_arg(args, UInt) & 0xE;
+ if (mask == 0 || mask == 14) {
+ p += vex_sprintf(p, ",%u", mask);
+ }
+ break;
+ }
+ }
+
+ separator = ',';
+ }
+
+ done:
+ va_end(args);
+
+ *p = '\0';
+
+ vassert(p < buf + sizeof buf); /* detect buffer overwrite */
+
+ /* Finally, write out the disassembled insn */
+ vex_printf("%s\n", buf);
+}
+
+/*---------------------------------------------------------------*/
+/*--- end host_s390_disasm.c ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/host_s390_disasm.h
+++ valgrind/VEX/priv/host_s390_disasm.h
@@ -0,0 +1,86 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin host_s390_disasm.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __VEX_HOST_S390_DISASM_H
+#define __VEX_HOST_S390_DISASM_H
+
+#include "libvex_basictypes.h"
+
+/* Macros to encode a command for s390_disasm. */
+#undef P
+#define P(a) (S390_ARG_##a)
+#undef ENC1
+#define ENC1(a) ((P(DONE) << 4) | P(a))
+#undef ENC2
+#define ENC2(a,b) ((P(DONE) << 8) | (P(b) << 4) | P(a))
+#undef ENC3
+#define ENC3(a,b,c) ((P(DONE) << 12) | (P(c) << 8) | (P(b) << 4) | P(a))
+#undef ENC4
+#define ENC4(a,b,c,d) ((P(DONE) << 16) | (P(d) << 12) | (P(c) << 8) | \
+ (P(b) << 4) | P(a))
+#undef ENC5
+#define ENC5(a,b,c,d,e) ((P(DONE) << 20) | (P(e) << 16) | (P(d) << 12) | \
+ (P(c) << 8) | (P(b) << 4) | P(a))
+#undef ENC6
+#define ENC6(a,b,c,d,e,f) ((P(DONE) << 24) | (P(f) << 20) | (P(e) << 16) | \
+ (P(d) << 12) | (P(c) << 8) | (P(b) << 4) | P(a))
+
+/* The different kinds of operands in an asm insn */
+enum {
+ S390_ARG_DONE = 0,
+ S390_ARG_GPR = 1,
+ S390_ARG_FPR = 2,
+ S390_ARG_AR = 3,
+ S390_ARG_INT = 4,
+ S390_ARG_UINT = 5,
+ S390_ARG_PCREL = 6,
+ S390_ARG_SDXB = 7,
+ S390_ARG_UDXB = 8,
+ S390_ARG_UDLB = 9,
+ S390_ARG_CABM = 10,
+ S390_ARG_MNM = 11,
+ S390_ARG_XMNM = 12
+};
+
+/* The different kinds of extended mnemonics */
+enum {
+ S390_XMNM_CAB = 0,
+ S390_XMNM_BCR = 1,
+ S390_XMNM_BC = 2,
+ S390_XMNM_BRC = 3,
+ S390_XMNM_BRCL = 4
+};
+
+void s390_disasm(UInt command, ...);
+
+/*---------------------------------------------------------------*/
+/*--- end host_s390_disasm.h ---*/
+/*---------------------------------------------------------------*/
+
+#endif /* __VEX_HOST_S390_DISASM_H */
--- valgrind/VEX/priv/host_s390_emit.c
+++ valgrind/VEX/priv/host_s390_emit.c
@@ -0,0 +1,2376 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin host_s390_emit.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "libvex_basictypes.h"
+#include "main_globals.h"
+
+#include "host_s390_disasm.h"
+#include "host_s390_emit.h"
+
+#undef likely
+#undef unlikely
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+
+/*------------------------------------------------------------*/
+/*--- Functions to emit a sequence of bytes ---*/
+/*------------------------------------------------------------*/
+
+
+static __inline__ UChar *
+emit_2bytes(UChar *p, ULong val)
+{
+ return (UChar *)__builtin_memcpy(p, ((UChar *)&val) + 6, 2) + 2;
+}
+
+
+static __inline__ UChar *
+emit_4bytes(UChar *p, ULong val)
+{
+ return (UChar *)__builtin_memcpy(p, ((UChar *)&val) + 4, 4) + 4;
+}
+
+
+static __inline__ UChar *
+emit_6bytes(UChar *p, ULong val)
+{
+ return (UChar *)__builtin_memcpy(p, ((UChar *)&val) + 2, 6) + 6;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Functions to emit instructions of the various formats ---*/
+/*------------------------------------------------------------*/
+
+
+static UChar *
+emit_RI(UChar *p, UInt op, UChar r1, UShort i2)
+{
+ ULong the_insn = op;
+
+ the_insn |= ((ULong)r1) << 20;
+ the_insn |= ((ULong)i2) << 0;
+
+ return emit_4bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RIL(UChar *p, ULong op, UChar r1, UInt i2)
+{
+ ULong the_insn = op;
+
+ the_insn |= ((ULong)r1) << 36;
+ the_insn |= ((ULong)i2) << 0;
+
+ return emit_6bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RR(UChar *p, UInt op, UChar r1, UChar r2)
+{
+ ULong the_insn = op;
+
+ the_insn |= ((ULong)r1) << 4;
+ the_insn |= ((ULong)r2) << 0;
+
+ return emit_2bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RRE(UChar *p, UInt op, UChar r1, UChar r2)
+{
+ ULong the_insn = op;
+
+ the_insn |= ((ULong)r1) << 4;
+ the_insn |= ((ULong)r2) << 0;
+
+ return emit_4bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RRF(UChar *p, UInt op, UChar r1, UChar r3, UChar r2)
+{
+ ULong the_insn = op;
+
+ the_insn |= ((ULong)r1) << 12;
+ the_insn |= ((ULong)r3) << 4;
+ the_insn |= ((ULong)r2) << 0;
+
+ return emit_4bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RRF3(UChar *p, UInt op, UChar r3, UChar r1, UChar r2)
+{
+ ULong the_insn = op;
+
+ the_insn |= ((ULong)r3) << 12;
+ the_insn |= ((ULong)r1) << 4;
+ the_insn |= ((ULong)r2) << 0;
+
+ return emit_4bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RS(UChar *p, UInt op, UChar r1, UChar r3, UChar b2, UShort d2)
+{
+ ULong the_insn = op;
+
+ the_insn |= ((ULong)r1) << 20;
+ the_insn |= ((ULong)r3) << 16;
+ the_insn |= ((ULong)b2) << 12;
+ the_insn |= ((ULong)d2) << 0;
+
+ return emit_4bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RSY(UChar *p, ULong op, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+ ULong the_insn = op;
+
+ the_insn |= ((ULong)r1) << 36;
+ the_insn |= ((ULong)r3) << 32;
+ the_insn |= ((ULong)b2) << 28;
+ the_insn |= ((ULong)dl2) << 16;
+ the_insn |= ((ULong)dh2) << 8;
+
+ return emit_6bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RX(UChar *p, UInt op, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ ULong the_insn = op;
+
+ the_insn |= ((ULong)r1) << 20;
+ the_insn |= ((ULong)x2) << 16;
+ the_insn |= ((ULong)b2) << 12;
+ the_insn |= ((ULong)d2) << 0;
+
+ return emit_4bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RXE(UChar *p, ULong op, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ ULong the_insn = op;
+
+ the_insn |= ((ULong)r1) << 36;
+ the_insn |= ((ULong)x2) << 32;
+ the_insn |= ((ULong)b2) << 28;
+ the_insn |= ((ULong)d2) << 16;
+
+ return emit_6bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RXY(UChar *p, ULong op, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ ULong the_insn = op;
+
+ the_insn |= ((ULong)r1) << 36;
+ the_insn |= ((ULong)x2) << 32;
+ the_insn |= ((ULong)b2) << 28;
+ the_insn |= ((ULong)dl2) << 16;
+ the_insn |= ((ULong)dh2) << 8;
+
+ return emit_6bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_S(UChar *p, UInt op, UChar b2, UShort d2)
+{
+ ULong the_insn = op;
+
+ the_insn |= ((ULong)b2) << 12;
+ the_insn |= ((ULong)d2) << 0;
+
+ return emit_4bytes(p, the_insn);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Functions to emit particular instructions ---*/
+/*------------------------------------------------------------*/
+
+
+UChar *
+s390_emit_AR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "ar", r1, r2);
+
+ return emit_RR(p, 0x1a00, r1, r2);
+}
+
+
+UChar *
+s390_emit_AGR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "agr", r1, r2);
+
+ return emit_RRE(p, 0xb9080000, r1, r2);
+}
+
+
+UChar *
+s390_emit_A(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "a", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x5a000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_AY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "ay", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe3000000005aULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_AG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "ag", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000008ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_AFI(UChar *p, UChar r1, UInt i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, INT), "afi", r1, i2);
+
+ return emit_RIL(p, 0xc20900000000ULL, r1, i2);
+}
+
+
+UChar *
+s390_emit_AGFI(UChar *p, UChar r1, UInt i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, INT), "agfi", r1, i2);
+
+ return emit_RIL(p, 0xc20800000000ULL, r1, i2);
+}
+
+
+UChar *
+s390_emit_AH(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "ah", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x4a000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_AHY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "ahy", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe3000000007aULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_AHI(UChar *p, UChar r1, UShort i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, INT), "ahi", r1, (Int)(Short)i2);
+
+ return emit_RI(p, 0xa70a0000, r1, i2);
+}
+
+
+UChar *
+s390_emit_AGHI(UChar *p, UChar r1, UShort i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, INT), "aghi", r1, (Int)(Short)i2);
+
+ return emit_RI(p, 0xa70b0000, r1, i2);
+}
+
+
+UChar *
+s390_emit_NR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "nr", r1, r2);
+
+ return emit_RR(p, 0x1400, r1, r2);
+}
+
+
+UChar *
+s390_emit_NGR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "ngr", r1, r2);
+
+ return emit_RRE(p, 0xb9800000, r1, r2);
+}
+
+
+UChar *
+s390_emit_N(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "n", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x54000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_NY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "ny", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000054ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_NG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "ng", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000080ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_NIHF(UChar *p, UChar r1, UInt i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "nihf", r1, i2);
+
+ return emit_RIL(p, 0xc00a00000000ULL, r1, i2);
+}
+
+
+UChar *
+s390_emit_NILF(UChar *p, UChar r1, UInt i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "nilf", r1, i2);
+
+ return emit_RIL(p, 0xc00b00000000ULL, r1, i2);
+}
+
+
+UChar *
+s390_emit_NILL(UChar *p, UChar r1, UShort i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "nill", r1, i2);
+
+ return emit_RI(p, 0xa5070000, r1, i2);
+}
+
+
+UChar *
+s390_emit_BASR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "basr", r1, r2);
+
+ return emit_RR(p, 0x0d00, r1, r2);
+}
+
+
+UChar *
+s390_emit_BCR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC2(XMNM, GPR), S390_XMNM_BCR, r1, r2);
+
+ return emit_RR(p, 0x0700, r1, r2);
+}
+
+
+UChar *
+s390_emit_BC(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC2(XMNM, UDXB), S390_XMNM_BC, r1, d2, x2, b2);
+
+ return emit_RX(p, 0x47000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_BRC(UChar *p, UChar r1, UShort i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC2(XMNM, PCREL), S390_XMNM_BRC, r1, (Int)(Short)i2);
+
+ return emit_RI(p, 0xa7040000, r1, i2);
+}
+
+
+UChar *
+s390_emit_CR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "cr", r1, r2);
+
+ return emit_RR(p, 0x1900, r1, r2);
+}
+
+
+UChar *
+s390_emit_CGR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "cgr", r1, r2);
+
+ return emit_RRE(p, 0xb9200000, r1, r2);
+}
+
+
+UChar *
+s390_emit_C(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "c", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x59000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_CY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "cy", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000059ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_CG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "cg", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000020ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_CFI(UChar *p, UChar r1, UInt i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, INT), "cfi", r1, i2);
+
+ return emit_RIL(p, 0xc20d00000000ULL, r1, i2);
+}
+
+
+UChar *
+s390_emit_CS(UChar *p, UChar r1, UChar r3, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC4(MNM, GPR, GPR, UDXB), "cs", r1, r3, d2, 0, b2);
+
+ return emit_RS(p, 0xba000000, r1, r3, b2, d2);
+}
+
+
+UChar *
+s390_emit_CSY(UChar *p, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC4(MNM, GPR, GPR, SDXB), "csy", r1, r3, dh2, dl2, 0, b2);
+
+ return emit_RSY(p, 0xeb0000000014ULL, r1, r3, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_CSG(UChar *p, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC4(MNM, GPR, GPR, SDXB), "csg", r1, r3, dh2, dl2, 0, b2);
+
+ return emit_RSY(p, 0xeb0000000030ULL, r1, r3, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_CLR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "clr", r1, r2);
+
+ return emit_RR(p, 0x1500, r1, r2);
+}
+
+
+UChar *
+s390_emit_CLGR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "clgr", r1, r2);
+
+ return emit_RRE(p, 0xb9210000, r1, r2);
+}
+
+
+UChar *
+s390_emit_CL(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "cl", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x55000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_CLY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "cly", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000055ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_CLG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "clg", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000021ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_CLFI(UChar *p, UChar r1, UInt i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "clfi", r1, i2);
+
+ return emit_RIL(p, 0xc20f00000000ULL, r1, i2);
+}
+
+
+UChar *
+s390_emit_DR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "dr", r1, r2);
+
+ return emit_RR(p, 0x1d00, r1, r2);
+}
+
+
+UChar *
+s390_emit_D(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "d", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x5d000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_DLR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "dlr", r1, r2);
+
+ return emit_RRE(p, 0xb9970000, r1, r2);
+}
+
+
+UChar *
+s390_emit_DLGR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "dlgr", r1, r2);
+
+ return emit_RRE(p, 0xb9870000, r1, r2);
+}
+
+
+UChar *
+s390_emit_DL(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "dl", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000097ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_DLG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "dlg", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000087ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_DSGR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "dsgr", r1, r2);
+
+ return emit_RRE(p, 0xb90d0000, r1, r2);
+}
+
+
+UChar *
+s390_emit_DSG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "dsg", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe3000000000dULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_XR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "xr", r1, r2);
+
+ return emit_RR(p, 0x1700, r1, r2);
+}
+
+
+UChar *
+s390_emit_XGR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "xgr", r1, r2);
+
+ return emit_RRE(p, 0xb9820000, r1, r2);
+}
+
+
+UChar *
+s390_emit_X(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "x", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x57000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_XY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "xy", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000057ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_XG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "xg", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000082ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_XIHF(UChar *p, UChar r1, UInt i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "xihf", r1, i2);
+
+ return emit_RIL(p, 0xc00600000000ULL, r1, i2);
+}
+
+
+UChar *
+s390_emit_XILF(UChar *p, UChar r1, UInt i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "xilf", r1, i2);
+
+ return emit_RIL(p, 0xc00700000000ULL, r1, i2);
+}
+
+
+UChar *
+s390_emit_FLOGR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "flogr", r1, r2);
+
+ return emit_RRE(p, 0xb9830000, r1, r2);
+}
+
+
+UChar *
+s390_emit_IC(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "ic", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x43000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_ICY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "icy", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000073ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_IIHF(UChar *p, UChar r1, UInt i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "iihf", r1, i2);
+
+ return emit_RIL(p, 0xc00800000000ULL, r1, i2);
+}
+
+
+UChar *
+s390_emit_IIHH(UChar *p, UChar r1, UShort i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "iihh", r1, i2);
+
+ return emit_RI(p, 0xa5000000, r1, i2);
+}
+
+
+UChar *
+s390_emit_IIHL(UChar *p, UChar r1, UShort i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "iihl", r1, i2);
+
+ return emit_RI(p, 0xa5010000, r1, i2);
+}
+
+
+UChar *
+s390_emit_IILF(UChar *p, UChar r1, UInt i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "iilf", r1, i2);
+
+ return emit_RIL(p, 0xc00900000000ULL, r1, i2);
+}
+
+
+UChar *
+s390_emit_IILH(UChar *p, UChar r1, UShort i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "iilh", r1, i2);
+
+ return emit_RI(p, 0xa5020000, r1, i2);
+}
+
+
+UChar *
+s390_emit_IILL(UChar *p, UChar r1, UShort i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "iill", r1, i2);
+
+ return emit_RI(p, 0xa5030000, r1, i2);
+}
+
+
+UChar *
+s390_emit_IPM(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC2(MNM, GPR), "ipm", r1);
+
+ return emit_RRE(p, 0xb2220000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "lr", r1, r2);
+
+ return emit_RR(p, 0x1800, r1, r2);
+}
+
+
+UChar *
+s390_emit_LGR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "lgr", r1, r2);
+
+ return emit_RRE(p, 0xb9040000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LGFR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "lgfr", r1, r2);
+
+ return emit_RRE(p, 0xb9140000, r1, r2);
+}
+
+
+UChar *
+s390_emit_L(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "l", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x58000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_LY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "ly", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000058ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_LG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "lg", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000004ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_LGF(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "lgf", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000014ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_LGFI(UChar *p, UChar r1, UInt i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, INT), "lgfi", r1, i2);
+
+ return emit_RIL(p, 0xc00100000000ULL, r1, i2);
+}
+
+
+UChar *
+s390_emit_LTR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "ltr", r1, r2);
+
+ return emit_RR(p, 0x1200, r1, r2);
+}
+
+
+UChar *
+s390_emit_LTGR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "ltgr", r1, r2);
+
+ return emit_RRE(p, 0xb9020000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LT(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "lt", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000012ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_LTG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "ltg", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000002ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_LBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "lbr", r1, r2);
+
+ return emit_RRE(p, 0xb9260000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LGBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "lgbr", r1, r2);
+
+ return emit_RRE(p, 0xb9060000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LB(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "lb", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000076ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_LGB(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "lgb", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000077ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_LCR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "lcr", r1, r2);
+
+ return emit_RR(p, 0x1300, r1, r2);
+}
+
+
+UChar *
+s390_emit_LCGR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "lcgr", r1, r2);
+
+ return emit_RRE(p, 0xb9030000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LHR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "lhr", r1, r2);
+
+ return emit_RRE(p, 0xb9270000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LGHR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "lghr", r1, r2);
+
+ return emit_RRE(p, 0xb9070000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LH(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "lh", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x48000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_LHY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "lhy", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000078ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_LGH(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "lgh", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000015ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_LHI(UChar *p, UChar r1, UShort i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, INT), "lhi", r1, (Int)(Short)i2);
+
+ return emit_RI(p, 0xa7080000, r1, i2);
+}
+
+
+UChar *
+s390_emit_LGHI(UChar *p, UChar r1, UShort i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, INT), "lghi", r1, (Int)(Short)i2);
+
+ return emit_RI(p, 0xa7090000, r1, i2);
+}
+
+
+UChar *
+s390_emit_LLGFR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "llgfr", r1, r2);
+
+ return emit_RRE(p, 0xb9160000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LLGF(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "llgf", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000016ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_LLCR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "llcr", r1, r2);
+
+ return emit_RRE(p, 0xb9940000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LLGCR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "llgcr", r1, r2);
+
+ return emit_RRE(p, 0xb9840000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LLC(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "llc", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000094ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_LLGC(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "llgc", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000090ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_LLHR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "llhr", r1, r2);
+
+ return emit_RRE(p, 0xb9950000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LLGHR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "llghr", r1, r2);
+
+ return emit_RRE(p, 0xb9850000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LLH(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "llh", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000095ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_LLGH(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "llgh", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000091ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_LLILF(UChar *p, UChar r1, UInt i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "llilf", r1, i2);
+
+ return emit_RIL(p, 0xc00f00000000ULL, r1, i2);
+}
+
+
+UChar *
+s390_emit_LLILH(UChar *p, UChar r1, UShort i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "llilh", r1, i2);
+
+ return emit_RI(p, 0xa50e0000, r1, i2);
+}
+
+
+UChar *
+s390_emit_LLILL(UChar *p, UChar r1, UShort i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "llill", r1, i2);
+
+ return emit_RI(p, 0xa50f0000, r1, i2);
+}
+
+
+UChar *
+s390_emit_MR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "mr", r1, r2);
+
+ return emit_RR(p, 0x1c00, r1, r2);
+}
+
+
+UChar *
+s390_emit_M(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "m", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x5c000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_MFY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "mfy", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe3000000005cULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_MH(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "mh", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x4c000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_MHY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "mhy", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe3000000007cULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_MHI(UChar *p, UChar r1, UShort i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, INT), "mhi", r1, (Int)(Short)i2);
+
+ return emit_RI(p, 0xa70c0000, r1, i2);
+}
+
+
+UChar *
+s390_emit_MLR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "mlr", r1, r2);
+
+ return emit_RRE(p, 0xb9960000, r1, r2);
+}
+
+
+UChar *
+s390_emit_MLGR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "mlgr", r1, r2);
+
+ return emit_RRE(p, 0xb9860000, r1, r2);
+}
+
+
+UChar *
+s390_emit_ML(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "ml", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000096ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_MLG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "mlg", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000086ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_MSR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "msr", r1, r2);
+
+ return emit_RRE(p, 0xb2520000, r1, r2);
+}
+
+
+UChar *
+s390_emit_MSGR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "msgr", r1, r2);
+
+ return emit_RRE(p, 0xb90c0000, r1, r2);
+}
+
+
+UChar *
+s390_emit_MS(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "ms", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x71000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_MSY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "msy", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000051ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_MSG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "msg", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe3000000000cULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_MSFI(UChar *p, UChar r1, UInt i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, INT), "msfi", r1, i2);
+
+ return emit_RIL(p, 0xc20100000000ULL, r1, i2);
+}
+
+
+UChar *
+s390_emit_MSGFI(UChar *p, UChar r1, UInt i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, INT), "msgfi", r1, i2);
+
+ return emit_RIL(p, 0xc20000000000ULL, r1, i2);
+}
+
+
+UChar *
+s390_emit_OR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "or", r1, r2);
+
+ return emit_RR(p, 0x1600, r1, r2);
+}
+
+
+UChar *
+s390_emit_OGR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "ogr", r1, r2);
+
+ return emit_RRE(p, 0xb9810000, r1, r2);
+}
+
+
+UChar *
+s390_emit_O(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "o", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x56000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_OY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "oy", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000056ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_OG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "og", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000081ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_OIHF(UChar *p, UChar r1, UInt i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "oihf", r1, i2);
+
+ return emit_RIL(p, 0xc00c00000000ULL, r1, i2);
+}
+
+
+UChar *
+s390_emit_OILF(UChar *p, UChar r1, UInt i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "oilf", r1, i2);
+
+ return emit_RIL(p, 0xc00d00000000ULL, r1, i2);
+}
+
+
+UChar *
+s390_emit_OILL(UChar *p, UChar r1, UShort i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "oill", r1, i2);
+
+ return emit_RI(p, 0xa50b0000, r1, i2);
+}
+
+
+UChar *
+s390_emit_SLL(UChar *p, UChar r1, UChar r3, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "sll", r1, d2, 0, b2);
+
+ return emit_RS(p, 0x89000000, r1, r3, b2, d2);
+}
+
+
+UChar *
+s390_emit_SLLG(UChar *p, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC4(MNM, GPR, GPR, SDXB), "sllg", r1, r3, dh2, dl2, 0, b2);
+
+ return emit_RSY(p, 0xeb000000000dULL, r1, r3, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_SRA(UChar *p, UChar r1, UChar r3, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "sra", r1, d2, 0, b2);
+
+ return emit_RS(p, 0x8a000000, r1, r3, b2, d2);
+}
+
+
+UChar *
+s390_emit_SRAG(UChar *p, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC4(MNM, GPR, GPR, SDXB), "srag", r1, r3, dh2, dl2, 0, b2);
+
+ return emit_RSY(p, 0xeb000000000aULL, r1, r3, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_SRL(UChar *p, UChar r1, UChar r3, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "srl", r1, d2, 0, b2);
+
+ return emit_RS(p, 0x88000000, r1, r3, b2, d2);
+}
+
+
+UChar *
+s390_emit_SRLG(UChar *p, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC4(MNM, GPR, GPR, SDXB), "srlg", r1, r3, dh2, dl2, 0, b2);
+
+ return emit_RSY(p, 0xeb000000000cULL, r1, r3, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_ST(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "st", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x50000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_STY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "sty", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000050ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_STG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "stg", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000024ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_STC(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "stc", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x42000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_STCY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "stcy", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000072ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_STH(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "sth", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x40000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_STHY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "sthy", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000070ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_SR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "sr", r1, r2);
+
+ return emit_RR(p, 0x1b00, r1, r2);
+}
+
+
+UChar *
+s390_emit_SGR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, GPR), "sgr", r1, r2);
+
+ return emit_RRE(p, 0xb9090000, r1, r2);
+}
+
+
+UChar *
+s390_emit_S(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "s", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x5b000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_SY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "sy", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe3000000005bULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_SG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "sg", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe30000000009ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_SH(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UDXB), "sh", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x4b000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_SHY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, SDXB), "shy", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xe3000000007bULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_SLFI(UChar *p, UChar r1, UInt i2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, UINT), "slfi", r1, i2);
+
+ return emit_RIL(p, 0xc20500000000ULL, r1, i2);
+}
+
+
+UChar *
+s390_emit_LDR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "ldr", r1, r2);
+
+ return emit_RR(p, 0x2800, r1, r2);
+}
+
+
+UChar *
+s390_emit_LE(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, UDXB), "le", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x78000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_LD(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, UDXB), "ld", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x68000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_LEY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, SDXB), "ley", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xed0000000064ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_LDY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, SDXB), "ldy", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xed0000000065ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_LFPC(UChar *p, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC2(MNM, UDXB), "lfpc", d2, 0, b2);
+
+ return emit_S(p, 0xb29d0000, b2, d2);
+}
+
+
+UChar *
+s390_emit_LDGR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, GPR), "ldgr", r1, r2);
+
+ return emit_RRE(p, 0xb3c10000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LGDR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, GPR, FPR), "lgdr", r1, r2);
+
+ return emit_RRE(p, 0xb3cd0000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LZER(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC2(MNM, FPR), "lzer", r1);
+
+ return emit_RRE(p, 0xb3740000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LZDR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC2(MNM, FPR), "lzdr", r1);
+
+ return emit_RRE(p, 0xb3750000, r1, r2);
+}
+
+
+UChar *
+s390_emit_SFPC(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC2(MNM, GPR), "sfpc", r1);
+
+ return emit_RRE(p, 0xb3840000, r1, r2);
+}
+
+
+UChar *
+s390_emit_STE(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, UDXB), "ste", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x70000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_STD(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, UDXB), "std", r1, d2, x2, b2);
+
+ return emit_RX(p, 0x60000000, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_STEY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, SDXB), "stey", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xed0000000066ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_STDY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, SDXB), "stdy", r1, dh2, dl2, x2, b2);
+
+ return emit_RXY(p, 0xed0000000067ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+UChar *
+s390_emit_STFPC(UChar *p, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC2(MNM, UDXB), "stfpc", d2, 0, b2);
+
+ return emit_S(p, 0xb29c0000, b2, d2);
+}
+
+
+UChar *
+s390_emit_AEBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "aebr", r1, r2);
+
+ return emit_RRE(p, 0xb30a0000, r1, r2);
+}
+
+
+UChar *
+s390_emit_ADBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "adbr", r1, r2);
+
+ return emit_RRE(p, 0xb31a0000, r1, r2);
+}
+
+
+UChar *
+s390_emit_AXBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "axbr", r1, r2);
+
+ return emit_RRE(p, 0xb34a0000, r1, r2);
+}
+
+
+UChar *
+s390_emit_CEBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "cebr", r1, r2);
+
+ return emit_RRE(p, 0xb3090000, r1, r2);
+}
+
+
+UChar *
+s390_emit_CDBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "cdbr", r1, r2);
+
+ return emit_RRE(p, 0xb3190000, r1, r2);
+}
+
+
+UChar *
+s390_emit_CXBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "cxbr", r1, r2);
+
+ return emit_RRE(p, 0xb3490000, r1, r2);
+}
+
+
+UChar *
+s390_emit_CEFBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, GPR), "cefbr", r1, r2);
+
+ return emit_RRE(p, 0xb3940000, r1, r2);
+}
+
+
+UChar *
+s390_emit_CDFBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, GPR), "cdfbr", r1, r2);
+
+ return emit_RRE(p, 0xb3950000, r1, r2);
+}
+
+
+UChar *
+s390_emit_CXFBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, GPR), "cxfbr", r1, r2);
+
+ return emit_RRE(p, 0xb3960000, r1, r2);
+}
+
+
+UChar *
+s390_emit_CEGBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, GPR), "cegbr", r1, r2);
+
+ return emit_RRE(p, 0xb3a40000, r1, r2);
+}
+
+
+UChar *
+s390_emit_CDGBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, GPR), "cdgbr", r1, r2);
+
+ return emit_RRE(p, 0xb3a50000, r1, r2);
+}
+
+
+UChar *
+s390_emit_CXGBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, GPR), "cxgbr", r1, r2);
+
+ return emit_RRE(p, 0xb3a60000, r1, r2);
+}
+
+
+UChar *
+s390_emit_CFEBR(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC4(MNM, GPR, UINT, FPR), "cfebr", r1, r3, r2);
+
+ return emit_RRF3(p, 0xb3980000, r3, r1, r2);
+}
+
+
+UChar *
+s390_emit_CFDBR(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC4(MNM, GPR, UINT, FPR), "cfdbr", r1, r3, r2);
+
+ return emit_RRF3(p, 0xb3990000, r3, r1, r2);
+}
+
+
+UChar *
+s390_emit_CFXBR(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC4(MNM, GPR, UINT, FPR), "cfxbr", r1, r3, r2);
+
+ return emit_RRF3(p, 0xb39a0000, r3, r1, r2);
+}
+
+
+UChar *
+s390_emit_CGEBR(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC4(MNM, GPR, UINT, FPR), "cgebr", r1, r3, r2);
+
+ return emit_RRF3(p, 0xb3a80000, r3, r1, r2);
+}
+
+
+UChar *
+s390_emit_CGDBR(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC4(MNM, GPR, UINT, FPR), "cgdbr", r1, r3, r2);
+
+ return emit_RRF3(p, 0xb3a90000, r3, r1, r2);
+}
+
+
+UChar *
+s390_emit_CGXBR(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC4(MNM, GPR, UINT, FPR), "cgxbr", r1, r3, r2);
+
+ return emit_RRF3(p, 0xb3aa0000, r3, r1, r2);
+}
+
+
+UChar *
+s390_emit_DEBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "debr", r1, r2);
+
+ return emit_RRE(p, 0xb30d0000, r1, r2);
+}
+
+
+UChar *
+s390_emit_DDBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "ddbr", r1, r2);
+
+ return emit_RRE(p, 0xb31d0000, r1, r2);
+}
+
+
+UChar *
+s390_emit_DXBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "dxbr", r1, r2);
+
+ return emit_RRE(p, 0xb34d0000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LCEBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "lcebr", r1, r2);
+
+ return emit_RRE(p, 0xb3030000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LCDBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "lcdbr", r1, r2);
+
+ return emit_RRE(p, 0xb3130000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LCXBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "lcxbr", r1, r2);
+
+ return emit_RRE(p, 0xb3430000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LDEBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "ldebr", r1, r2);
+
+ return emit_RRE(p, 0xb3040000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LXDBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "lxdbr", r1, r2);
+
+ return emit_RRE(p, 0xb3050000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LXEBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "lxebr", r1, r2);
+
+ return emit_RRE(p, 0xb3060000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LXDB(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, UDXB), "lxdb", r1, d2, x2, b2);
+
+ return emit_RXE(p, 0xed0000000005ULL, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_LXEB(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, UDXB), "lxeb", r1, d2, x2, b2);
+
+ return emit_RXE(p, 0xed0000000006ULL, r1, x2, b2, d2);
+}
+
+
+UChar *
+s390_emit_LNEBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "lnebr", r1, r2);
+
+ return emit_RRE(p, 0xb3010000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LNDBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "lndbr", r1, r2);
+
+ return emit_RRE(p, 0xb3110000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LNXBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "lnxbr", r1, r2);
+
+ return emit_RRE(p, 0xb3410000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LPEBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "lpebr", r1, r2);
+
+ return emit_RRE(p, 0xb3000000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LPDBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "lpdbr", r1, r2);
+
+ return emit_RRE(p, 0xb3100000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LPXBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "lpxbr", r1, r2);
+
+ return emit_RRE(p, 0xb3400000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LEDBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "ledbr", r1, r2);
+
+ return emit_RRE(p, 0xb3440000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LDXBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "ldxbr", r1, r2);
+
+ return emit_RRE(p, 0xb3450000, r1, r2);
+}
+
+
+UChar *
+s390_emit_LEXBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "lexbr", r1, r2);
+
+ return emit_RRE(p, 0xb3460000, r1, r2);
+}
+
+
+UChar *
+s390_emit_MEEBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "meebr", r1, r2);
+
+ return emit_RRE(p, 0xb3170000, r1, r2);
+}
+
+
+UChar *
+s390_emit_MDBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "mdbr", r1, r2);
+
+ return emit_RRE(p, 0xb31c0000, r1, r2);
+}
+
+
+UChar *
+s390_emit_MXBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "mxbr", r1, r2);
+
+ return emit_RRE(p, 0xb34c0000, r1, r2);
+}
+
+
+UChar *
+s390_emit_MAEBR(UChar *p, UChar r1, UChar r3, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC4(MNM, FPR, FPR, FPR), "maebr", r1, r3, r2);
+
+ return emit_RRF(p, 0xb30e0000, r1, r3, r2);
+}
+
+
+UChar *
+s390_emit_MADBR(UChar *p, UChar r1, UChar r3, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC4(MNM, FPR, FPR, FPR), "madbr", r1, r3, r2);
+
+ return emit_RRF(p, 0xb31e0000, r1, r3, r2);
+}
+
+
+UChar *
+s390_emit_MSEBR(UChar *p, UChar r1, UChar r3, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC4(MNM, FPR, FPR, FPR), "msebr", r1, r3, r2);
+
+ return emit_RRF(p, 0xb30f0000, r1, r3, r2);
+}
+
+
+UChar *
+s390_emit_MSDBR(UChar *p, UChar r1, UChar r3, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC4(MNM, FPR, FPR, FPR), "msdbr", r1, r3, r2);
+
+ return emit_RRF(p, 0xb31f0000, r1, r3, r2);
+}
+
+
+UChar *
+s390_emit_SQEBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "sqebr", r1, r2);
+
+ return emit_RRE(p, 0xb3140000, r1, r2);
+}
+
+
+UChar *
+s390_emit_SQDBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "sqdbr", r1, r2);
+
+ return emit_RRE(p, 0xb3150000, r1, r2);
+}
+
+
+UChar *
+s390_emit_SQXBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "sqxbr", r1, r2);
+
+ return emit_RRE(p, 0xb3160000, r1, r2);
+}
+
+
+UChar *
+s390_emit_SEBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "sebr", r1, r2);
+
+ return emit_RRE(p, 0xb30b0000, r1, r2);
+}
+
+
+UChar *
+s390_emit_SDBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "sdbr", r1, r2);
+
+ return emit_RRE(p, 0xb31b0000, r1, r2);
+}
+
+
+UChar *
+s390_emit_SXBR(UChar *p, UChar r1, UChar r2)
+{
+ if (unlikely(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC3(MNM, FPR, FPR), "sxbr", r1, r2);
+
+ return emit_RRE(p, 0xb34b0000, r1, r2);
+}
+
+/*---------------------------------------------------------------*/
+/*--- end host_s390_emit.c ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/host_s390_emit.h
+++ valgrind/VEX/priv/host_s390_emit.h
@@ -0,0 +1,279 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin host_s390_emit.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __VEX_HOST_S390_EMIT_H
+#define __VEX_HOST_S390_EMIT_H
+
+#include "libvex_basictypes.h"
+
+UChar *s390_emit_AR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_AGR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_A(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_AY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_AG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_AFI(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_AGFI(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_AH(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_AHY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_AHI(UChar *p, UChar r1, UShort i2);
+UChar *s390_emit_AGHI(UChar *p, UChar r1, UShort i2);
+UChar *s390_emit_NR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_NGR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_N(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_NY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_NG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_NIHF(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_NILF(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_NILL(UChar *p, UChar r1, UShort i2);
+UChar *s390_emit_BASR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_BCR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_BC(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_BRC(UChar *p, UChar r1, UShort i2);
+UChar *s390_emit_CR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_CGR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_C(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_CY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_CG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_CFI(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_CS(UChar *p, UChar r1, UChar r3, UChar b2, UShort d2);
+UChar *s390_emit_CSY(UChar *p, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_CSG(UChar *p, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_CLR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_CLGR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_CL(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_CLY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_CLG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_CLFI(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_DR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_D(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_DLR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_DLGR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_DL(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_DLG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_DSGR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_DSG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_XR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_XGR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_X(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_XY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_XG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_XIHF(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_XILF(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_FLOGR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_IC(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_ICY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_IIHF(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_IIHH(UChar *p, UChar r1, UShort i2);
+UChar *s390_emit_IIHL(UChar *p, UChar r1, UShort i2);
+UChar *s390_emit_IILF(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_IILH(UChar *p, UChar r1, UShort i2);
+UChar *s390_emit_IILL(UChar *p, UChar r1, UShort i2);
+UChar *s390_emit_IPM(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LGR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LGFR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_L(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_LY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LGF(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LGFI(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_LTR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LTGR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LT(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LTG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LGBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LB(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LGB(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LCR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LCGR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LHR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LGHR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LH(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_LHY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LGH(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LHI(UChar *p, UChar r1, UShort i2);
+UChar *s390_emit_LGHI(UChar *p, UChar r1, UShort i2);
+UChar *s390_emit_LLGFR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LLGF(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LLCR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LLGCR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LLC(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LLGC(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LLHR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LLGHR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LLH(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LLGH(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LLILF(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_LLILH(UChar *p, UChar r1, UShort i2);
+UChar *s390_emit_LLILL(UChar *p, UChar r1, UShort i2);
+UChar *s390_emit_MR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_M(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_MFY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_MH(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_MHY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_MHI(UChar *p, UChar r1, UShort i2);
+UChar *s390_emit_MLR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_MLGR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_ML(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_MLG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_MSR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_MSGR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_MS(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_MSY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_MSG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_MSFI(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_MSGFI(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_OR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_OGR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_O(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_OY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_OG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_OIHF(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_OILF(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_OILL(UChar *p, UChar r1, UShort i2);
+UChar *s390_emit_SLL(UChar *p, UChar r1, UChar r3, UChar b2, UShort d2);
+UChar *s390_emit_SLLG(UChar *p, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_SRA(UChar *p, UChar r1, UChar r3, UChar b2, UShort d2);
+UChar *s390_emit_SRAG(UChar *p, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_SRL(UChar *p, UChar r1, UChar r3, UChar b2, UShort d2);
+UChar *s390_emit_SRLG(UChar *p, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_ST(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_STY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_STG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_STC(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_STCY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_STH(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_STHY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_SR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_SGR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_S(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_SY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_SG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_SH(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_SHY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_SLFI(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_LDR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LE(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_LD(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_LEY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LDY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LFPC(UChar *p, UChar b2, UShort d2);
+UChar *s390_emit_LDGR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LGDR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LZER(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LZDR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_SFPC(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_STE(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_STD(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_STEY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_STDY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_STFPC(UChar *p, UChar b2, UShort d2);
+UChar *s390_emit_AEBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_ADBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_AXBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_CEBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_CDBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_CXBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_CEFBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_CDFBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_CXFBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_CEGBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_CDGBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_CXGBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_CFEBR(UChar *p, UChar r3, UChar r1, UChar r2);
+UChar *s390_emit_CFDBR(UChar *p, UChar r3, UChar r1, UChar r2);
+UChar *s390_emit_CFXBR(UChar *p, UChar r3, UChar r1, UChar r2);
+UChar *s390_emit_CGEBR(UChar *p, UChar r3, UChar r1, UChar r2);
+UChar *s390_emit_CGDBR(UChar *p, UChar r3, UChar r1, UChar r2);
+UChar *s390_emit_CGXBR(UChar *p, UChar r3, UChar r1, UChar r2);
+UChar *s390_emit_DEBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_DDBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_DXBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LCEBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LCDBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LCXBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LDEBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LXDBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LXEBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LXDB(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_LXEB(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2);
+UChar *s390_emit_LNEBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LNDBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LNXBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LPEBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LPDBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LPXBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LEDBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LDXBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LEXBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_MEEBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_MDBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_MXBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_MAEBR(UChar *p, UChar r1, UChar r3, UChar r2);
+UChar *s390_emit_MADBR(UChar *p, UChar r1, UChar r3, UChar r2);
+UChar *s390_emit_MSEBR(UChar *p, UChar r1, UChar r3, UChar r2);
+UChar *s390_emit_MSDBR(UChar *p, UChar r1, UChar r3, UChar r2);
+UChar *s390_emit_SQEBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_SQDBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_SQXBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_SEBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_SDBR(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_SXBR(UChar *p, UChar r1, UChar r2);
+
+UChar *s390_emit_AFIw(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_NILFw(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_CFIw(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_CLFIw(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_XILFw(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_IILFw(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_LGFIw(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_LTw(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LTGw(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LBRw(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LGBRw(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LHRw(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LGHRw(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LLCRw(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LLGCRw(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LLCw(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LLHRw(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LLGHRw(UChar *p, UChar r1, UChar r2);
+UChar *s390_emit_LLHw(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_LLILFw(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_MFYw(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2);
+UChar *s390_emit_MSFIw(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_OILFw(UChar *p, UChar r1, UInt i2);
+UChar *s390_emit_SLFIw(UChar *p, UChar r1, UInt i2);
+
+/*---------------------------------------------------------------*/
+/*--- end host_s390_emit.h ---*/
+/*---------------------------------------------------------------*/
+
+#endif /* __VEX_HOST_S390_EMIT_H */
--- valgrind/VEX/priv/host_s390_hreg.c
+++ valgrind/VEX/priv/host_s390_hreg.c
@@ -0,0 +1,158 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin host_s390_hreg.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#include "libvex_basictypes.h"
+#include "libvex.h"
+#include "libvex_s390x.h"
+
+#include "main_util.h"
+#include "host_generic_regs.h"
+#include "host_s390_defs.h"
+#include "host_s390_hreg.h"
+
+
+/* Decompile the given register into a static buffer and return it */
+const HChar *
+s390_hreg_as_string(HReg reg)
+{
+ static HChar buf[10];
+
+ static const HChar ireg_names[16][5] = {
+ "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
+ "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15"
+ };
+
+ static const HChar freg_names[16][5] = {
+ "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
+ "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15"
+ };
+
+ UInt r; /* hregNumber() returns an UInt */
+
+ r = hregNumber(reg);
+
+ /* Be generic for all virtual regs. */
+ if (hregIsVirtual(reg)) {
+ buf[0] = '\0';
+ switch (hregClass(reg)) {
+ case HRcInt64: vex_sprintf(buf, "%%vR%d", r); break;
+ case HRcFlt64: vex_sprintf(buf, "%%vF%d", r); break;
+ default: goto fail;
+ }
+ return buf;
+ }
+
+ /* But specific for real regs. */
+ vassert(r < 16);
+
+ switch (hregClass(reg)) {
+ case HRcInt64: return ireg_names[r];
+ case HRcFlt64: return freg_names[r];
+ default: goto fail;
+ }
+
+ fail: vpanic("s390_hreg_as_string");
+}
+
+
+/* Tell the register allocator which registers can be allocated. */
+void
+s390_hreg_get_allocable(Int *nregs, HReg **arr)
+{
+ UInt i;
+
+ /* Total number of allocable registers (all classes) */
+ *nregs = 16 /* GPRs */
+ - 1 /* r0 */
+ - 1 /* r12 register holding VG_(dispatch_ctr) */
+ - 1 /* r13 guest state pointer */
+ - 1 /* r14 link register */
+ - 1 /* r15 stack pointer */
+ + 16 /* FPRs */
+ ;
+
+ *arr = LibVEX_Alloc(*nregs * sizeof(HReg));
+
+ i = 0;
+
+ /* GPR0 is not available because it is interpreted as 0, when used
+ as a base or index register. */
+ (*arr)[i++] = mkHReg(1, HRcInt64, False);
+ (*arr)[i++] = mkHReg(2, HRcInt64, False);
+ (*arr)[i++] = mkHReg(3, HRcInt64, False);
+ (*arr)[i++] = mkHReg(4, HRcInt64, False);
+ (*arr)[i++] = mkHReg(5, HRcInt64, False);
+ (*arr)[i++] = mkHReg(6, HRcInt64, False);
+ (*arr)[i++] = mkHReg(7, HRcInt64, False);
+ (*arr)[i++] = mkHReg(8, HRcInt64, False);
+ (*arr)[i++] = mkHReg(9, HRcInt64, False);
+ /* GPR10 and GPR11 are used for instructions that use register pairs.
+ Otherwise, they are available to the allocator */
+ (*arr)[i++] = mkHReg(10, HRcInt64, False);
+ (*arr)[i++] = mkHReg(11, HRcInt64, False);
+ /* GPR12 is not available because it caches VG_(dispatch_ctr) */
+ /* GPR13 is not available because it is used as guest state pointer */
+ /* GPR14 is not available because it is used as link register */
+ /* GPR15 is not available because it is used as stack pointer */
+
+ /* Add the available real (non-virtual) FPRs */
+ (*arr)[i++] = mkHReg(0, HRcFlt64, False);
+ (*arr)[i++] = mkHReg(1, HRcFlt64, False);
+ (*arr)[i++] = mkHReg(2, HRcFlt64, False);
+ (*arr)[i++] = mkHReg(3, HRcFlt64, False);
+ (*arr)[i++] = mkHReg(4, HRcFlt64, False);
+ (*arr)[i++] = mkHReg(5, HRcFlt64, False);
+ (*arr)[i++] = mkHReg(6, HRcFlt64, False);
+ (*arr)[i++] = mkHReg(7, HRcFlt64, False);
+ (*arr)[i++] = mkHReg(8, HRcFlt64, False);
+ (*arr)[i++] = mkHReg(9, HRcFlt64, False);
+ (*arr)[i++] = mkHReg(10, HRcFlt64, False);
+ (*arr)[i++] = mkHReg(11, HRcFlt64, False);
+ (*arr)[i++] = mkHReg(12, HRcFlt64, False);
+ (*arr)[i++] = mkHReg(13, HRcFlt64, False);
+ (*arr)[i++] = mkHReg(14, HRcFlt64, False);
+ (*arr)[i++] = mkHReg(15, HRcFlt64, False);
+ /* FPR12 - FPR15 are also used as register pairs for 128-bit
+ floating point operations */
+}
+
+
+/* Return the real register that holds the guest state pointer */
+HReg
+s390_hreg_guest_state_pointer(void)
+{
+ return mkHReg(S390_REGNO_GUEST_STATE_POINTER, HRcInt64, False);
+}
+
+/*---------------------------------------------------------------*/
+/*--- end host_s390_hreg.c ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/host_s390_hreg.h
+++ valgrind/VEX/priv/host_s390_hreg.h
@@ -0,0 +1,62 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin host_s390_hreg.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#ifndef __VEX_HOST_S390_HREG_H
+#define __VEX_HOST_S390_HREG_H
+
+#include "libvex_basictypes.h" /* Bool */
+#include "main_util.h" /* needed for host_generic_regs.h */
+#include "host_generic_regs.h" /* HReg */
+
+const HChar *s390_hreg_as_string(HReg);
+
+void s390_hreg_get_allocable(Int *nregs, HReg **arr);
+
+/* Dedicated registers */
+HReg s390_hreg_guest_state_pointer(void);
+
+
+/* Given the index of a function argument, return the number of the
+ general purpose register in which it is being passed. Arguments are
+ counted 0, 1, 2, ... and they are being passed in r2, r3, r4, ... */
+static __inline__ unsigned
+s390_gprno_from_arg_index(unsigned ix)
+{
+ return ix + 2;
+}
+
+
+#endif /* __VEX_HOST_S390_HREG_H */
+
+/*---------------------------------------------------------------*/
+/*--- end host_s390_hreg.h ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/host_s390_insn.c
+++ valgrind/VEX/priv/host_s390_insn.c
@@ -0,0 +1,3727 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin host_s390_insn.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#include "libvex_basictypes.h"
+#include "libvex.h"
+#include "libvex_trc_values.h"
+#include "libvex_guest_offsets.h"
+#include "libvex_s390x.h"
+#include "main_util.h"
+#include "host_generic_regs.h"
+
+#include "host_s390_hreg.h"
+#include "host_s390_emit.h"
+#include "host_s390_insn.h"
+
+#include <stdarg.h>
+
+
+/* Register 0 is used as a scratch register. Give it a symbolic name. */
+#define R0 0
+
+/* Split up a 20-bit displacement into its high and low piece
+ suitable for passing as function arguments */
+#define DISP20(d) ((d) & 0xFFF), (((d) >> 12) & 0xFF)
+
+/*---------------------------------------------------------------*/
+/*--- Constructors for the various s390_insn kinds ---*/
+/*---------------------------------------------------------------*/
+
+s390_insn *
+s390_insn_load(UChar size, HReg dst, s390_amode *src)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_LOAD;
+ insn->size = size;
+ insn->variant.load.src = src;
+ insn->variant.load.dst = dst;
+
+ vassert(size == 1 || size == 2 || size == 4 || size == 8);
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_store(UChar size, s390_amode *dst, HReg src)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_STORE;
+ insn->size = size;
+ insn->variant.store.src = src;
+ insn->variant.store.dst = dst;
+
+ vassert(size == 1 || size == 2 || size == 4 || size == 8);
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_move(UChar size, HReg dst, HReg src)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_MOVE;
+ insn->size = size;
+ insn->variant.move.src = src;
+ insn->variant.move.dst = dst;
+
+ vassert(size == 1 || size == 2 || size == 4 || size == 8);
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_cond_move(UChar size, s390_cc_t cond, HReg dst, s390_opnd_RMI src)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_COND_MOVE;
+ insn->size = size;
+ insn->variant.cond_move.cond = cond;
+ insn->variant.cond_move.src = src;
+ insn->variant.cond_move.dst = dst;
+
+ vassert(size == 1 || size == 2 || size == 4 || size == 8);
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_load_immediate(UChar size, HReg dst, ULong value)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_LOAD_IMMEDIATE;
+ insn->size = size;
+ insn->variant.load_immediate.dst = dst;
+ insn->variant.load_immediate.value = value;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_alu(UChar size, s390_alu_t tag, HReg dst, s390_opnd_RMI op2)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_ALU;
+ insn->size = size;
+ insn->variant.alu.tag = tag;
+ insn->variant.alu.dst = dst;
+ insn->variant.alu.op2 = op2;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_mul(UChar size, HReg dst_hi, HReg dst_lo, s390_opnd_RMI op2,
+ Bool signed_multiply)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ vassert(! hregIsVirtual(dst_hi));
+ vassert(! hregIsVirtual(dst_lo));
+
+ insn->tag = S390_INSN_MUL;
+ insn->size = size;
+ insn->variant.mul.dst_hi = dst_hi;
+ insn->variant.mul.dst_lo = dst_lo;
+ insn->variant.mul.op2 = op2;
+ insn->variant.mul.signed_multiply = signed_multiply;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_div(UChar size, HReg op1_hi, HReg op1_lo, s390_opnd_RMI op2,
+ Bool signed_divide)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ vassert(size == 4 || size == 8);
+ vassert(! hregIsVirtual(op1_hi));
+ vassert(! hregIsVirtual(op1_lo));
+
+ insn->tag = S390_INSN_DIV;
+ insn->size = size;
+ insn->variant.div.op1_hi = op1_hi;
+ insn->variant.div.op1_lo = op1_lo;
+ insn->variant.div.op2 = op2;
+ insn->variant.div.signed_divide = signed_divide;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_divs(UChar size, HReg rem, HReg op1, s390_opnd_RMI op2)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ vassert(size == 8);
+ vassert(! hregIsVirtual(op1));
+ vassert(! hregIsVirtual(rem));
+
+ insn->tag = S390_INSN_DIVS;
+ insn->size = size;
+ insn->variant.divs.rem = rem; /* remainder */
+ insn->variant.divs.op1 = op1; /* also quotient */
+ insn->variant.divs.op2 = op2;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_flogr(UChar size, HReg bitpos, HReg modval, s390_opnd_RMI src)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ vassert(size == 8);
+ vassert(! hregIsVirtual(bitpos));
+ vassert(! hregIsVirtual(modval));
+
+ insn->tag = S390_INSN_FLOGR;
+ insn->size = size;
+ insn->variant.flogr.bitpos = bitpos; /* bit position */
+ insn->variant.flogr.modval = modval; /* modified input value */
+ insn->variant.flogr.src = src;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_unop(UChar size, s390_unop_t tag, HReg dst, s390_opnd_RMI opnd)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_UNOP;
+ insn->size = size;
+ insn->variant.unop.tag = tag;
+ insn->variant.unop.dst = dst;
+ insn->variant.unop.src = opnd;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_test(UChar size, s390_opnd_RMI src)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ vassert(size == 4 || size == 8);
+
+ insn->tag = S390_INSN_TEST;
+ insn->size = size;
+ insn->variant.test.src = src;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_cc2bool(HReg dst, s390_cc_t cond)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_CC2BOOL;
+ insn->size = 0; /* does not matter */
+ insn->variant.cc2bool.cond = cond;
+ insn->variant.cc2bool.dst = dst;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_cas(UChar size, HReg op1, s390_amode *op2, HReg op3, HReg old_mem)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ vassert(size == 4 || size == 8);
+ vassert(op2->x == 0);
+
+ insn->tag = S390_INSN_CAS;
+ insn->size = size;
+ insn->variant.cas.op1 = op1;
+ insn->variant.cas.op2 = op2;
+ insn->variant.cas.op3 = op3;
+ insn->variant.cas.old_mem = old_mem;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_compare(UChar size, HReg src1, s390_opnd_RMI src2,
+ Bool signed_comparison)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ vassert(size == 4 || size == 8);
+
+ insn->tag = S390_INSN_COMPARE;
+ insn->size = size;
+ insn->variant.compare.src1 = src1;
+ insn->variant.compare.src2 = src2;
+ insn->variant.compare.signed_comparison = signed_comparison;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_branch(IRJumpKind kind, s390_cc_t cond, s390_opnd_RMI dst)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_BRANCH;
+ insn->size = 0; /* does not matter */
+ insn->variant.branch.kind = kind;
+ insn->variant.branch.dst = dst;
+ insn->variant.branch.cond = cond;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_helper_call(s390_cc_t cond, Addr64 target, UInt num_args,
+ HChar *name)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_HELPER_CALL;
+ insn->size = 0; /* does not matter */
+ insn->variant.helper_call.cond = cond;
+ insn->variant.helper_call.target = target;
+ insn->variant.helper_call.num_args = num_args;
+ insn->variant.helper_call.name = name;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_bfp_triop(UChar size, s390_bfp_triop_t tag, HReg dst, HReg op2,
+ HReg op3, s390_round_t rounding_mode)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_BFP_TRIOP;
+ insn->size = size;
+ insn->variant.bfp_triop.tag = tag;
+ insn->variant.bfp_triop.dst = dst;
+ insn->variant.bfp_triop.op2 = op2;
+ insn->variant.bfp_triop.op3 = op3;
+ insn->variant.bfp_triop.rounding_mode = rounding_mode;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_bfp_binop(UChar size, s390_bfp_binop_t tag, HReg dst, HReg op2,
+ s390_round_t rounding_mode)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_BFP_BINOP;
+ insn->size = size;
+ insn->variant.bfp_binop.tag = tag;
+ insn->variant.bfp_binop.dst = dst;
+ insn->variant.bfp_binop.op2 = op2;
+ insn->variant.bfp_binop.rounding_mode = rounding_mode;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_bfp_unop(UChar size, s390_bfp_unop_t tag, HReg dst, HReg op,
+ s390_round_t rounding_mode)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_BFP_UNOP;
+ insn->size = size;
+ insn->variant.bfp_unop.tag = tag;
+ insn->variant.bfp_unop.dst = dst;
+ insn->variant.bfp_unop.op = op;
+ insn->variant.bfp_unop.rounding_mode = rounding_mode;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_bfp_compare(UChar size, HReg dst, HReg op1, HReg op2)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ vassert(size == 4 || size == 8);
+
+ insn->tag = S390_INSN_BFP_COMPARE;
+ insn->size = size;
+ insn->variant.bfp_compare.dst = dst;
+ insn->variant.bfp_compare.op1 = op1;
+ insn->variant.bfp_compare.op2 = op2;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_bfp128_binop(UChar size, s390_bfp_binop_t tag, HReg dst_hi,
+ HReg dst_lo, HReg op2_hi, HReg op2_lo,
+ s390_round_t rounding_mode)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_BFP128_BINOP;
+ insn->size = size;
+ insn->variant.bfp128_binop.tag = tag;
+ insn->variant.bfp128_binop.dst_hi = dst_hi;
+ insn->variant.bfp128_binop.dst_lo = dst_lo;
+ insn->variant.bfp128_binop.op2_hi = op2_hi;
+ insn->variant.bfp128_binop.op2_lo = op2_lo;
+ insn->variant.bfp128_binop.rounding_mode = rounding_mode;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_bfp128_unop(UChar size, s390_bfp_binop_t tag, HReg dst_hi,
+ HReg dst_lo, HReg op_hi, HReg op_lo,
+ s390_round_t rounding_mode)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_BFP128_UNOP;
+ insn->size = size;
+ insn->variant.bfp128_unop.tag = tag;
+ insn->variant.bfp128_unop.dst_hi = dst_hi;
+ insn->variant.bfp128_unop.dst_lo = dst_lo;
+ insn->variant.bfp128_unop.op_hi = op_hi;
+ insn->variant.bfp128_unop.op_lo = op_lo;
+ insn->variant.bfp128_unop.rounding_mode = rounding_mode;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_bfp128_compare(UChar size, HReg dst, HReg op1_hi, HReg op1_lo,
+ HReg op2_hi, HReg op2_lo)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_BFP128_COMPARE;
+ insn->size = size;
+ insn->variant.bfp128_compare.dst = dst;
+ insn->variant.bfp128_compare.op1_hi = op1_hi;
+ insn->variant.bfp128_compare.op1_lo = op1_lo;
+ insn->variant.bfp128_compare.op2_hi = op2_hi;
+ insn->variant.bfp128_compare.op2_lo = op2_lo;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_bfp128_convert_to(UChar size, s390_bfp_unop_t tag, HReg dst_hi,
+ HReg dst_lo, HReg op)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_BFP128_CONVERT_TO;
+ insn->size = size;
+ insn->variant.bfp128_unop.tag = tag;
+ insn->variant.bfp128_unop.dst_hi = dst_hi;
+ insn->variant.bfp128_unop.dst_lo = dst_lo;
+ insn->variant.bfp128_unop.op_hi = op;
+ insn->variant.bfp128_unop.op_lo = INVALID_HREG; /* unused */
+ insn->variant.bfp128_unop.rounding_mode = S390_ROUND_CURRENT; /* unused */
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_bfp128_convert_from(UChar size, s390_bfp_unop_t tag, HReg dst,
+ HReg op_hi, HReg op_lo,
+ s390_round_t rounding_mode)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+
+ insn->tag = S390_INSN_BFP128_CONVERT_FROM;
+ insn->size = size;
+ insn->variant.bfp128_unop.tag = tag;
+ insn->variant.bfp128_unop.dst_hi = dst;
+ insn->variant.bfp128_unop.dst_lo = INVALID_HREG; /* unused */
+ insn->variant.bfp128_unop.op_hi = op_hi;
+ insn->variant.bfp128_unop.op_lo = op_lo;
+ insn->variant.bfp128_unop.rounding_mode = rounding_mode;
+
+ return insn;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Debug print ---*/
+/*---------------------------------------------------------------*/
+
+static const HChar *
+s390_cc_as_string(s390_cc_t cc)
+{
+ switch (cc) {
+ case S390_CC_NEVER: return "never";
+ case S390_CC_OVFL: return "overflow";
+ case S390_CC_H: return "greater than"; /* A > B ; high */
+ case S390_CC_NLE: return "not low or equal";
+ case S390_CC_L: return "less than"; /* A < B ; low */
+ case S390_CC_NHE: return "not high or equal";
+ case S390_CC_LH: return "low or high";
+ case S390_CC_NE: return "not equal"; /* A != B ; not zero */
+ case S390_CC_E: return "equal"; /* A == B ; zero */
+ case S390_CC_NLH: return "not low or high";
+ case S390_CC_HE: return "greater or equal"; /* A >= B ; high or equal*/
+ case S390_CC_NL: return "not low"; /* not low */
+ case S390_CC_LE: return "less or equal"; /* A <= B ; low or equal */
+ case S390_CC_NH: return "not high";
+ case S390_CC_NO: return "not overflow";
+ case S390_CC_ALWAYS: return "always";
+ default:
+ vpanic("s390_cc_as_string");
+ }
+}
+
+
+/* Helper function for writing out a V insn */
+static void
+s390_sprintf(HChar *buf, HChar *fmt, ...)
+{
+ HChar *p;
+ ULong value;
+ va_list args;
+ va_start(args, fmt);
+
+ p = buf;
+ for ( ; *fmt; ++fmt) {
+ Int c = *fmt;
+
+ if (c != '%') {
+ *p++ = c;
+ continue;
+ }
+
+ c = *++fmt; /* next char */
+ switch (c) {
+ case '%':
+ *p++ = c; /* %% */
+ continue;
+
+ case 's': /* %s */
+ p += vex_sprintf(p, "%s", va_arg(args, HChar *));
+ continue;
+
+ case 'M': /* %M = mnemonic */
+ p += vex_sprintf(p, "%-8s", va_arg(args, HChar *));
+ continue;
+
+ case 'R': /* %R = register */
+ p += vex_sprintf(p, "%s", s390_hreg_as_string(va_arg(args, HReg)));
+ continue;
+
+ case 'A': /* %A = amode */
+ p += vex_sprintf(p, "%s",
+ s390_amode_as_string(va_arg(args, s390_amode *)));
+ continue;
+
+ case 'C': /* %C = condition code */
+ p += vex_sprintf(p, "%s", s390_cc_as_string(va_arg(args, s390_cc_t)));
+ continue;
+
+ case 'L': { /* %L = argument list in helper call*/
+ UInt i, num_args;
+
+ num_args = va_arg(args, UInt);
+
+ for (i = 0; i < num_args; ++i) {
+ if (i != 0) p += vex_sprintf(p, ", ");
+ p += vex_sprintf(p, "r%d", s390_gprno_from_arg_index(i));
+ }
+ continue;
+ }
+
+ case 'O': { /* %O = RMI operand */
+ s390_opnd_RMI *op = va_arg(args, s390_opnd_RMI *);
+
+ switch (op->tag) {
+ case S390_OPND_REG:
+ p += vex_sprintf(p, "%s", s390_hreg_as_string(op->variant.reg));
+ continue;
+
+ case S390_OPND_AMODE:
+ p += vex_sprintf(p, "%s", s390_amode_as_string(op->variant.am));
+ continue;
+
+ case S390_OPND_IMMEDIATE:
+ value = op->variant.imm;
+ goto print_value;
+
+ default:
+ goto fail;
+ }
+ }
+
+ case 'I': /* %I = immediate value */
+ value = va_arg(args, ULong);
+ goto print_value;
+
+ print_value:
+ if ((Long)value < 0)
+ p += vex_sprintf(p, "%lld", (Long)value);
+ else if (value < 100)
+ p += vex_sprintf(p, "%llu", value);
+ else
+ p += vex_sprintf(p, "0x%llx", value);
+ continue;
+
+ default:
+ goto fail;
+ }
+ }
+ *p = '\0';
+ va_end(args);
+
+ return;
+
+ fail: vpanic("s390_printf");
+}
+
+
+/* Decompile the given insn into a static buffer and return it */
+const HChar *
+s390_insn_as_string(const s390_insn *insn)
+{
+ static HChar buf[300];
+ const HChar *op;
+ HChar *p;
+
+ buf[0] = '\0';
+
+ switch (insn->tag) {
+ case S390_INSN_LOAD:
+ s390_sprintf(buf, "%M %R,%A", "v-load", insn->variant.load.dst,
+ insn->variant.load.src);
+ break;
+
+ case S390_INSN_STORE:
+ s390_sprintf(buf, "%M %R,%A", "v-store", insn->variant.store.src,
+ insn->variant.store.dst);
+ break;
+
+ case S390_INSN_MOVE:
+ s390_sprintf(buf, "%M %R,%R", "v-move", insn->variant.move.dst,
+ insn->variant.move.src);
+ break;
+
+ case S390_INSN_COND_MOVE:
+ s390_sprintf(buf, "%M if (%C) %R,%O", "v-move",
+ insn->variant.cond_move.cond, insn->variant.cond_move.dst,
+ &insn->variant.cond_move.src);
+ break;
+
+ case S390_INSN_LOAD_IMMEDIATE:
+ s390_sprintf(buf, "%M %R,%I", "v-loadi", insn->variant.load_immediate.dst,
+ insn->variant.load_immediate.value);
+ break;
+
+ case S390_INSN_ALU:
+ switch (insn->variant.alu.tag) {
+ case S390_ALU_ADD: op = "v-add"; break;
+ case S390_ALU_SUB: op = "v-sub"; break;
+ case S390_ALU_MUL: op = "v-mul"; break;
+ case S390_ALU_AND: op = "v-and"; break;
+ case S390_ALU_OR: op = "v-or"; break;
+ case S390_ALU_XOR: op = "v-xor"; break;
+ case S390_ALU_LSH: op = "v-lsh"; break;
+ case S390_ALU_RSH: op = "v-rsh"; break;
+ case S390_ALU_RSHA: op = "v-rsha"; break;
+ default: goto fail;
+ }
+ s390_sprintf(buf, "%M %R,%R,%O", op, insn->variant.alu.dst,
+ insn->variant.alu.dst /* op1 same as dst */,
+ &insn->variant.alu.op2);
+ break;
+
+ case S390_INSN_MUL:
+ if (insn->variant.mul.signed_multiply) {
+ op = "v-muls";
+ } else {
+ op = "v-mulu";
+ }
+ s390_sprintf(buf, "%M %R,%O", op, insn->variant.mul.dst_hi,
+ &insn->variant.mul.op2);
+ break;
+
+ case S390_INSN_DIV:
+ if (insn->variant.div.signed_divide) {
+ op = "v-divs";
+ } else {
+ op = "v-divu";
+ }
+ s390_sprintf(buf, "%M %R,%O", op, insn->variant.div.op1_hi,
+ &insn->variant.div.op2);
+ break;
+
+ case S390_INSN_DIVS:
+ s390_sprintf(buf, "%M %R,%O", "v-divsi", insn->variant.divs.op1,
+ &insn->variant.divs.op2);
+ break;
+
+ case S390_INSN_FLOGR:
+ s390_sprintf(buf, "%M %R,%O", "v-flogr", insn->variant.flogr.bitpos,
+ &insn->variant.flogr.src);
+ break;
+
+ case S390_INSN_UNOP:
+ switch (insn->variant.unop.tag) {
+ case S390_ZERO_EXTEND_8:
+ case S390_ZERO_EXTEND_16:
+ case S390_ZERO_EXTEND_32:
+ op = "v-zerox";
+ break;
+
+ case S390_SIGN_EXTEND_8:
+ case S390_SIGN_EXTEND_16:
+ case S390_SIGN_EXTEND_32:
+ op = "v-signx";
+ break;
+
+ case S390_NEGATE:
+ op = "v-neg";
+ break;
+
+ default:
+ goto fail;
+ }
+ s390_sprintf(buf, "%M %R,%O", op, insn->variant.unop.dst,
+ &insn->variant.unop.src);
+ break;
+
+ case S390_INSN_TEST:
+ s390_sprintf(buf, "%M %O", "v-test", &insn->variant.test.src);
+ break;
+
+ case S390_INSN_CC2BOOL:
+ s390_sprintf(buf, "%M %R,%C", "v-cc2b", insn->variant.cc2bool.dst,
+ insn->variant.cc2bool.cond);
+ break;
+
+ case S390_INSN_CAS:
+ s390_sprintf(buf, "%M %R,%A,%R,%R", "v-cas", insn->variant.cas.op1,
+ insn->variant.cas.op2, insn->variant.cas.op3,
+ insn->variant.cas.old_mem);
+ break;
+
+ case S390_INSN_COMPARE:
+ if (insn->variant.compare.signed_comparison) {
+ op = "v-cmps";
+ } else {
+ op = "v-cmpu";
+ }
+ s390_sprintf(buf, "%M %R,%O", op, insn->variant.compare.src1,
+ &insn->variant.compare.src2);
+ break;
+
+ case S390_INSN_BRANCH:
+ switch (insn->variant.branch.kind) {
+ case Ijk_ClientReq: op = "clientreq"; break;
+ case Ijk_Sys_syscall: op = "syscall"; break;
+ case Ijk_Yield: op = "yield"; break;
+ case Ijk_EmWarn: op = "emwarn"; break;
+ case Ijk_EmFail: op = "emfail"; break;
+ case Ijk_MapFail: op = "mapfail"; break;
+ case Ijk_NoDecode: op = "nodecode"; break;
+ case Ijk_TInval: op = "tinval"; break;
+ case Ijk_NoRedir: op = "noredir"; break;
+ case Ijk_SigTRAP: op = "sigtrap"; break;
+ case Ijk_Boring: op = "goto"; break;
+ case Ijk_Call: op = "call"; break;
+ case Ijk_Ret: op = "return"; break;
+ default:
+ goto fail;
+ }
+ s390_sprintf(buf, "if (%C) %s %O", insn->variant.branch.cond, op,
+ &insn->variant.branch.dst);
+ break;
+
+ case S390_INSN_HELPER_CALL: {
+
+ if (insn->variant.helper_call.cond != S390_CC_ALWAYS) {
+ s390_sprintf(buf, "%M if (%C) %s{%I}(%L)", "v-call",
+ insn->variant.helper_call.cond,
+ insn->variant.helper_call.name,
+ insn->variant.helper_call.target,
+ insn->variant.helper_call.num_args);
+ } else {
+ s390_sprintf(buf, "%M %s{%I}(%L)", "v-call",
+ insn->variant.helper_call.name,
+ insn->variant.helper_call.target,
+ insn->variant.helper_call.num_args);
+ }
+ break;
+ }
+
+ case S390_INSN_BFP_TRIOP:
+ switch (insn->variant.bfp_triop.tag) {
+ case S390_BFP_MADD: op = "v-fmadd"; break;
+ case S390_BFP_MSUB: op = "v-fmsub"; break;
+ default: goto fail;
+ }
+ s390_sprintf(buf, "%M %R,%R,%R,%R", op, insn->variant.bfp_triop.dst,
+ insn->variant.bfp_triop.dst /* op1 same as dst */,
+ insn->variant.bfp_triop.op2, insn->variant.bfp_triop.op3);
+ break;
+
+ case S390_INSN_BFP_BINOP:
+ switch (insn->variant.bfp_binop.tag) {
+ case S390_BFP_ADD: op = "v-fadd"; break;
+ case S390_BFP_SUB: op = "v-fsub"; break;
+ case S390_BFP_MUL: op = "v-fmul"; break;
+ case S390_BFP_DIV: op = "v-fdiv"; break;
+ default: goto fail;
+ }
+ s390_sprintf(buf, "%M %R,%R,%R", op, insn->variant.bfp_binop.dst,
+ insn->variant.bfp_binop.dst /* op1 same as dst */,
+ insn->variant.bfp_binop.op2);
+ break;
+
+ case S390_INSN_BFP_COMPARE:
+ s390_sprintf(buf, "%M %R,%R,%R", "v-fcmp", insn->variant.bfp_compare.dst,
+ insn->variant.bfp_compare.op1, insn->variant.bfp_compare.op2);
+ break;
+
+ case S390_INSN_BFP_UNOP:
+ switch (insn->variant.bfp_unop.tag) {
+ case S390_BFP_ABS: op = "v-fabs"; break;
+ case S390_BFP_NABS: op = "v-fnabs"; break;
+ case S390_BFP_NEG: op = "v-fneg"; break;
+ case S390_BFP_SQRT: op = "v-fsqrt"; break;
+ case S390_BFP_I32_TO_F32:
+ case S390_BFP_I32_TO_F64:
+ case S390_BFP_I32_TO_F128:
+ case S390_BFP_I64_TO_F32:
+ case S390_BFP_I64_TO_F64:
+ case S390_BFP_I64_TO_F128: op = "v-i2f"; break;
+ case S390_BFP_F32_TO_I32:
+ case S390_BFP_F32_TO_I64:
+ case S390_BFP_F64_TO_I32:
+ case S390_BFP_F64_TO_I64:
+ case S390_BFP_F128_TO_I32:
+ case S390_BFP_F128_TO_I64: op = "v-f2i"; break;
+ case S390_BFP_F32_TO_F64:
+ case S390_BFP_F32_TO_F128:
+ case S390_BFP_F64_TO_F32:
+ case S390_BFP_F64_TO_F128:
+ case S390_BFP_F128_TO_F32:
+ case S390_BFP_F128_TO_F64: op = "v-f2f"; break;
+ default: goto fail;
+ }
+ s390_sprintf(buf, "%M %R,%R", op, insn->variant.bfp_unop.dst,
+ insn->variant.bfp_unop.op);
+ break;
+
+ case S390_INSN_BFP128_BINOP:
+ switch (insn->variant.bfp128_binop.tag) {
+ case S390_BFP_ADD: op = "v-fadd"; break;
+ case S390_BFP_SUB: op = "v-fsub"; break;
+ case S390_BFP_MUL: op = "v-fmul"; break;
+ case S390_BFP_DIV: op = "v-fdiv"; break;
+ default: goto fail;
+ }
+ /* Only write the register that identifies the register pair */
+ s390_sprintf(buf, "%M %R,%R,%R", op, insn->variant.bfp128_binop.dst_hi,
+ insn->variant.bfp128_binop.dst_hi /* op1 same as dst */,
+ insn->variant.bfp128_binop.op2_hi);
+ break;
+
+ case S390_INSN_BFP128_COMPARE:
+ /* Only write the register that identifies the register pair */
+ s390_sprintf(buf, "%M %R,%R,%R", "v-fcmp", insn->variant.bfp128_compare.dst,
+ insn->variant.bfp128_compare.op1_hi,
+ insn->variant.bfp128_compare.op2_hi);
+ break;
+
+ case S390_INSN_BFP128_UNOP:
+ case S390_INSN_BFP128_CONVERT_TO:
+ case S390_INSN_BFP128_CONVERT_FROM:
+ switch (insn->variant.bfp128_unop.tag) {
+ case S390_BFP_ABS: op = "v-fabs"; break;
+ case S390_BFP_NABS: op = "v-fnabs"; break;
+ case S390_BFP_NEG: op = "v-fneg"; break;
+ case S390_BFP_SQRT: op = "v-fsqrt"; break;
+ case S390_BFP_I32_TO_F128:
+ case S390_BFP_I64_TO_F128: op = "v-i2f"; break;
+ case S390_BFP_F128_TO_I32:
+ case S390_BFP_F128_TO_I64: op = "v-f2i"; break;
+ case S390_BFP_F32_TO_F128:
+ case S390_BFP_F64_TO_F128:
+ case S390_BFP_F128_TO_F32:
+ case S390_BFP_F128_TO_F64: op = "v-f2f"; break;
+ default: goto fail;
+ }
+ /* Only write the register that identifies the register pair */
+ s390_sprintf(buf, "%M %R,%R", op, insn->variant.bfp128_unop.dst_hi,
+ insn->variant.bfp128_unop.op_hi);
+ break;
+
+ default: goto fail;
+ }
+
+ /* Write out how many bytes are involved in the operation */
+
+ {
+ UInt len, i;
+
+ for (p = buf; *p; ++p)
+ continue;
+
+ len = p - buf;
+
+ if (len < 32) {
+ for (i = len; i < 32; ++i)
+ p += vex_sprintf(p, " ");
+ } else {
+ p += vex_sprintf(p, "\t");
+ }
+ }
+
+ /* Special cases first */
+ switch (insn->tag) {
+ case S390_INSN_UNOP:
+ switch (insn->variant.unop.tag) {
+ case S390_SIGN_EXTEND_8:
+ case S390_ZERO_EXTEND_8: p += vex_sprintf(p, "1 -> "); goto common;
+ case S390_SIGN_EXTEND_16:
+ case S390_ZERO_EXTEND_16: p += vex_sprintf(p, "2 -> "); goto common;
+ case S390_SIGN_EXTEND_32:
+ case S390_ZERO_EXTEND_32: p += vex_sprintf(p, "4 -> "); goto common;
+ default:
+ goto common;
+ }
+
+ case S390_INSN_BFP_UNOP:
+ switch (insn->variant.bfp_unop.tag) {
+ case S390_BFP_I32_TO_F32:
+ case S390_BFP_I32_TO_F64:
+ case S390_BFP_I32_TO_F128:
+ case S390_BFP_F32_TO_I32:
+ case S390_BFP_F32_TO_I64:
+ case S390_BFP_F32_TO_F64:
+ case S390_BFP_F32_TO_F128: p += vex_sprintf(p, "4 -> "); goto common;
+ case S390_BFP_I64_TO_F32:
+ case S390_BFP_I64_TO_F64:
+ case S390_BFP_I64_TO_F128:
+ case S390_BFP_F64_TO_I32:
+ case S390_BFP_F64_TO_I64:
+ case S390_BFP_F64_TO_F32:
+ case S390_BFP_F64_TO_F128: p += vex_sprintf(p, "8 -> "); goto common;
+ case S390_BFP_F128_TO_I32:
+ case S390_BFP_F128_TO_I64:
+ case S390_BFP_F128_TO_F32:
+ case S390_BFP_F128_TO_F64: p += vex_sprintf(p, "16 -> "); goto common;
+ default:
+ goto common;
+ }
+
+ case S390_INSN_BFP128_UNOP:
+ case S390_INSN_BFP128_CONVERT_TO:
+ case S390_INSN_BFP128_CONVERT_FROM:
+ switch (insn->variant.bfp128_unop.tag) {
+ case S390_BFP_I32_TO_F128:
+ case S390_BFP_F32_TO_F128: p += vex_sprintf(p, "4 -> "); goto common;
+ case S390_BFP_I64_TO_F128:
+ case S390_BFP_F64_TO_F128: p += vex_sprintf(p, "8 -> "); goto common;
+ case S390_BFP_F128_TO_I32:
+ case S390_BFP_F128_TO_I64:
+ case S390_BFP_F128_TO_F32:
+ case S390_BFP_F128_TO_F64: p += vex_sprintf(p, "16 -> "); goto common;
+ default:
+ goto common;
+ }
+
+ default:
+ goto common;
+ }
+
+ /* Common case */
+ common:
+ vex_sprintf(p, "%u bytes", (UInt)insn->size);
+
+ return buf;
+
+ fail: vpanic("s390_insn_as_string");
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Helper functions ---*/
+/*---------------------------------------------------------------*/
+
+static __inline__ Bool
+uint_fits_signed_16bit(UInt val)
+{
+ int v = val & 0xFFFFu;
+
+ /* sign extend */
+ v = (v << 16) >> 16;
+
+ return val == (UInt)v;
+}
+
+
+static __inline__ Bool
+ulong_fits_signed_16bit(ULong val)
+{
+ Long v = val & 0xFFFFu;
+
+ /* sign extend */
+ v = (v << 48) >> 48;
+
+ return val == (ULong)v;
+}
+
+
+static __inline__ Bool
+ulong_fits_signed_32bit(ULong val)
+{
+ Long v = val & 0xFFFFFFFFu;
+
+ /* sign extend */
+ v = (v << 32) >> 32;
+
+ return val == (ULong)v;
+}
+
+
+static __inline__ Bool
+ulong_fits_unsigned_32bit(ULong val)
+{
+ return (val & 0xFFFFFFFFu) == val;
+}
+
+
+/* Load a 64-bit immediate VAL into register REG. */
+static UChar *
+s390_emit_load_64imm(UChar *p, UChar reg, ULong val)
+{
+ if (ulong_fits_signed_16bit(val)) {
+ return s390_emit_LGHI(p, reg, val);
+ }
+
+ if (s390_host_has_eimm) {
+ if (ulong_fits_unsigned_32bit(val)) {
+ return s390_emit_LLILF(p, reg, val);
+ }
+ if (ulong_fits_signed_32bit(val)) {
+ /* LGFI's sign extension will recreate the correct 64-bit value */
+ return s390_emit_LGFI(p, reg, val);
+ }
+ /* Do it in two steps: upper half [0:31] and lower half [32:63] */
+ p = s390_emit_IIHF(p, reg, val >> 32);
+ return s390_emit_IILF(p, reg, val & 0xFFFFFFFF);
+ }
+
+ /* Fall back */
+ if (ulong_fits_unsigned_32bit(val)) {
+ p = s390_emit_LLILH(p, reg, (val >> 16) & 0xFFFF); /* val[32:47] val[0:31] = 0 */
+ p = s390_emit_IILL(p, reg, val & 0xFFFF); /* val[48:63] */
+ return p;
+ }
+
+ p = s390_emit_IIHH(p, reg, (val >> 48) & 0xFFFF);
+ p = s390_emit_IIHL(p, reg, (val >> 32) & 0xFFFF);
+ p = s390_emit_IILH(p, reg, (val >> 16) & 0xFFFF);
+ p = s390_emit_IILL(p, reg, val & 0xFFFF);
+
+ return p;
+}
+
+
+/* Load a 32-bit immediate VAL into register REG. */
+static UChar *
+s390_emit_load_32imm(UChar *p, UChar reg, UInt val)
+{
+ if (uint_fits_signed_16bit(val)) {
+ /* LHI's sign extension will recreate the correct 32-bit value */
+ return s390_emit_LHI(p, reg, val);
+ }
+
+ return s390_emit_IILFw(p, reg, val);
+}
+
+
+/* Load NUM bytes from memory into register REG using addressing mode AM. */
+static UChar *
+s390_emit_load_mem(UChar *p, UInt num, UChar reg, const s390_amode *am)
+{
+ UInt b = hregNumber(am->b);
+ UInt x = hregNumber(am->x); /* 0 for B12 and B20 */
+ UInt d = am->d;
+
+ switch (am->tag) {
+ case S390_AMODE_B12:
+ case S390_AMODE_BX12:
+ switch (num) {
+ case 1: return s390_emit_IC(p, reg, x, b, d);
+ case 2: return s390_emit_LH(p, reg, x, b, d);
+ case 4: return s390_emit_L(p, reg, x, b, d);
+ case 8: return s390_emit_LG(p, reg, x, b, DISP20(d));
+ default: goto fail;
+ }
+ break;
+
+ case S390_AMODE_B20:
+ case S390_AMODE_BX20:
+ switch (num) {
+ case 1: return s390_emit_ICY(p, reg, x, b, DISP20(d));
+ case 2: return s390_emit_LHY(p, reg, x, b, DISP20(d));
+ case 4: return s390_emit_LY(p, reg, x, b, DISP20(d));
+ case 8: return s390_emit_LG(p, reg, x, b, DISP20(d));
+ default: goto fail;
+ }
+ break;
+
+ default: goto fail;
+ }
+
+ fail:
+ vpanic("s390_emit_load_mem");
+}
+
+
+/* Load condition code into register REG */
+static UChar *
+s390_emit_load_cc(UChar *p, UChar reg)
+{
+ p = s390_emit_LGHI(p, reg, 0); /* Clear out, cc not affected */
+ p = s390_emit_IPM(p, reg, reg);
+ /* Shift 28 bits to the right --> [0,1,2,3] */
+ return s390_emit_SRLG(p, reg, reg, 0, DISP20(28)); /* REG = cc */
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Code generation ---*/
+/*---------------------------------------------------------------*/
+
+/* Do not load more bytes than requested. */
+static UChar *
+s390_insn_load_emit(UChar *buf, const s390_insn *insn)
+{
+ UInt r, x, b, d;
+ const s390_amode *src;
+
+ src = insn->variant.load.src;
+
+ r = hregNumber(insn->variant.load.dst);
+
+ if (hregClass(insn->variant.load.dst) == HRcFlt64) {
+ b = hregNumber(src->b);
+ x = hregNumber(src->x); /* 0 for B12 and B20 */
+ d = src->d;
+
+ switch (insn->size) {
+
+ case 4:
+ switch (src->tag) {
+ case S390_AMODE_B12:
+ case S390_AMODE_BX12:
+ return s390_emit_LE(buf, r, x, b, d);
+
+ case S390_AMODE_B20:
+ case S390_AMODE_BX20:
+ return s390_emit_LEY(buf, r, x, b, DISP20(d));
+ }
+ break;
+
+ case 8:
+ switch (src->tag) {
+ case S390_AMODE_B12:
+ case S390_AMODE_BX12:
+ return s390_emit_LD(buf, r, x, b, d);
+
+ case S390_AMODE_B20:
+ case S390_AMODE_BX20:
+ return s390_emit_LDY(buf, r, x, b, DISP20(d));
+ }
+ break;
+ }
+ vpanic("s390_insn_load_emit");
+ }
+
+ /* Integer stuff */
+ return s390_emit_load_mem(buf, insn->size, r, src);
+}
+
+
+static UChar *
+s390_insn_store_emit(UChar *buf, const s390_insn *insn)
+{
+ UInt r, x, b, d;
+ const s390_amode *dst;
+
+ dst = insn->variant.store.dst;
+
+ r = hregNumber(insn->variant.store.src);
+ b = hregNumber(dst->b);
+ x = hregNumber(dst->x); /* 0 for B12 and B20 */
+ d = dst->d;
+
+ if (hregClass(insn->variant.store.src) == HRcFlt64) {
+ switch (insn->size) {
+
+ case 4:
+ switch (dst->tag) {
+ case S390_AMODE_B12:
+ case S390_AMODE_BX12:
+ return s390_emit_STE(buf, r, x, b, d);
+
+ case S390_AMODE_B20:
+ case S390_AMODE_BX20:
+ return s390_emit_STEY(buf, r, x, b, DISP20(d));
+ }
+ break;
+
+ case 8:
+ switch (dst->tag) {
+ case S390_AMODE_B12:
+ case S390_AMODE_BX12:
+ return s390_emit_STD(buf, r, x, b, d);
+
+ case S390_AMODE_B20:
+ case S390_AMODE_BX20:
+ return s390_emit_STDY(buf, r, x, b, DISP20(d));
+ }
+ break;
+ }
+ vpanic("s390_insn_store_emit");
+ }
+
+ /* Integer stuff */
+ switch (insn->size) {
+ case 1:
+ switch (dst->tag) {
+ case S390_AMODE_B12:
+ case S390_AMODE_BX12:
+ return s390_emit_STC(buf, r, x, b, d);
+
+ case S390_AMODE_B20:
+ case S390_AMODE_BX20:
+ return s390_emit_STCY(buf, r, x, b, DISP20(d));
+ }
+ break;
+
+ case 2:
+ switch (dst->tag) {
+ case S390_AMODE_B12:
+ case S390_AMODE_BX12:
+ return s390_emit_STH(buf, r, x, b, d);
+
+ case S390_AMODE_B20:
+ case S390_AMODE_BX20:
+ return s390_emit_STHY(buf, r, x, b, DISP20(d));
+ }
+ break;
+
+ case 4:
+ switch (dst->tag) {
+ case S390_AMODE_B12:
+ case S390_AMODE_BX12:
+ return s390_emit_ST(buf, r, x, b, d);
+
+ case S390_AMODE_B20:
+ case S390_AMODE_BX20:
+ return s390_emit_STY(buf, r, x, b, DISP20(d));
+ }
+ break;
+
+ case 8:
+ return s390_emit_STG(buf, r, x, b, DISP20(d));
+
+ default:
+ break;
+ }
+
+ vpanic("s390_insn_store_emit");
+}
+
+
+static UChar *
+s390_insn_move_emit(UChar *buf, const s390_insn *insn)
+{
+ UInt dst, src;
+ HRegClass dst_class, src_class;
+
+ dst = hregNumber(insn->variant.move.dst);
+ src = hregNumber(insn->variant.move.src);
+
+ dst_class = hregClass(insn->variant.move.dst);
+ src_class = hregClass(insn->variant.move.src);
+
+ if (dst_class == src_class) {
+ if (dst_class == HRcInt64)
+ return s390_emit_LGR(buf, dst, src);
+ if (dst_class == HRcFlt64)
+ return s390_emit_LDR(buf, dst, src);
+ } else {
+ if (dst_class == HRcFlt64 && src_class == HRcInt64)
+ return s390_emit_LDGR(buf, dst, src);
+ if (dst_class == HRcInt64 && src_class == HRcFlt64)
+ return s390_emit_LGDR(buf, dst, src);
+ /* A move between floating point registers and general purpose
+ registers of different size should never occur and indicates
+ an error elsewhere. */
+ }
+
+ vpanic("s390_insn_move_emit");
+}
+
+
+static UChar *
+s390_insn_load_immediate_emit(UChar *buf, const s390_insn *insn)
+{
+ UInt r;
+ ULong value = insn->variant.load_immediate.value;
+
+ r = hregNumber(insn->variant.load_immediate.dst);
+
+ if (hregClass(insn->variant.load_immediate.dst) == HRcFlt64) {
+ vassert(value == 0);
+ switch (insn->size) {
+ case 4: return s390_emit_LZER(buf, r, value);
+ case 8: return s390_emit_LZDR(buf, r, value);
+ }
+ vpanic("s390_insn_load_immediate_emit");
+ }
+
+ switch (insn->size) {
+ case 1:
+ case 2:
+ /* Load the immediate values as a 4 byte value. That does not hurt as
+ those extra bytes will not be looked at. Fall through .... */
+ case 4:
+ return s390_emit_load_32imm(buf, r, value);
+
+ case 8:
+ return s390_emit_load_64imm(buf, r, value);
+ }
+
+ vpanic("s390_insn_load_immediate_emit");
+}
+
+
+/* There is no easy way to do ALU operations on 1-byte or 2-byte operands.
+ So we simply perform a 4-byte operation. Doing so uses possibly undefined
+ bits and produces an undefined result in those extra bit positions. But
+ upstream does not look at those positions, so this is OK. */
+static UChar *
+s390_insn_alu_emit(UChar *buf, const s390_insn *insn)
+{
+ s390_opnd_RMI op2;
+ UInt dst;
+
+ dst = hregNumber(insn->variant.alu.dst);
+ op2 = insn->variant.alu.op2;
+
+ /* Second operand is in a register */
+ if (op2.tag == S390_OPND_REG) {
+ UInt r2 = hregNumber(op2.variant.reg);
+
+ switch (insn->size) {
+ case 1:
+ case 2:
+ case 4:
+ switch (insn->variant.alu.tag) {
+ case S390_ALU_ADD: return s390_emit_AR(buf, dst, r2);
+ case S390_ALU_SUB: return s390_emit_SR(buf, dst, r2);
+ case S390_ALU_MUL: return s390_emit_MSR(buf, dst, r2);
+ case S390_ALU_AND: return s390_emit_NR(buf, dst, r2);
+ case S390_ALU_OR: return s390_emit_OR(buf, dst, r2);
+ case S390_ALU_XOR: return s390_emit_XR(buf, dst, r2);
+ case S390_ALU_LSH: return s390_emit_SLL(buf, dst, 0, r2, 0);
+ case S390_ALU_RSH: return s390_emit_SRL(buf, dst, 0, r2, 0);
+ case S390_ALU_RSHA: return s390_emit_SRA(buf, dst, 0, r2, 0);
+ }
+ goto fail;
+
+ case 8:
+ switch (insn->variant.alu.tag) {
+ case S390_ALU_ADD: return s390_emit_AGR(buf, dst, r2);
+ case S390_ALU_SUB: return s390_emit_SGR(buf, dst, r2);
+ case S390_ALU_MUL: return s390_emit_MSGR(buf, dst, r2);
+ case S390_ALU_AND: return s390_emit_NGR(buf, dst, r2);
+ case S390_ALU_OR: return s390_emit_OGR(buf, dst, r2);
+ case S390_ALU_XOR: return s390_emit_XGR(buf, dst, r2);
+ case S390_ALU_LSH: return s390_emit_SLLG(buf, dst, dst, r2, DISP20(0));
+ case S390_ALU_RSH: return s390_emit_SRLG(buf, dst, dst, r2, DISP20(0));
+ case S390_ALU_RSHA: return s390_emit_SRAG(buf, dst, dst, r2, DISP20(0));
+ }
+ goto fail;
+ }
+ goto fail;
+ }
+
+ /* 2nd operand is in memory */
+ if (op2.tag == S390_OPND_AMODE) {
+ UInt b, x, d;
+ const s390_amode *src = op2.variant.am;
+
+ b = hregNumber(src->b);
+ x = hregNumber(src->x); /* 0 for B12 and B20 */
+ d = src->d;
+
+ /* Shift operands are special here as there are no opcodes that
+ allow a memory operand. So we first load the 2nd operand to R0. */
+ if (insn->variant.alu.tag == S390_ALU_LSH ||
+ insn->variant.alu.tag == S390_ALU_RSH ||
+ insn->variant.alu.tag == S390_ALU_RSHA) {
+
+ buf = s390_emit_load_mem(buf, insn->size, R0, src);
+
+ if (insn->size == 8) {
+ if (insn->variant.alu.tag == S390_ALU_LSH)
+ return s390_emit_SLLG(buf, dst, dst, R0, DISP20(0));
+ if (insn->variant.alu.tag == S390_ALU_RSH)
+ return s390_emit_SRLG(buf, dst, dst, R0, DISP20(0));
+ if (insn->variant.alu.tag == S390_ALU_RSHA)
+ return s390_emit_SRAG(buf, dst, dst, R0, DISP20(0));
+ } else {
+ if (insn->variant.alu.tag == S390_ALU_LSH)
+ return s390_emit_SLL(buf, dst, 0, R0, 0);
+ if (insn->variant.alu.tag == S390_ALU_RSH)
+ return s390_emit_SRL(buf, dst, 0, R0, 0);
+ if (insn->variant.alu.tag == S390_ALU_RSHA)
+ return s390_emit_SRA(buf, dst, 0, R0, 0);
+ }
+ }
+
+ switch (insn->size) {
+ case 1:
+ /* Move the byte from memory into scratch register r0 */
+ buf = s390_emit_load_mem(buf, 1, R0, src);
+
+ switch (insn->variant.alu.tag) {
+ case S390_ALU_ADD: return s390_emit_AR(buf, dst, R0);
+ case S390_ALU_SUB: return s390_emit_SR(buf, dst, R0);
+ case S390_ALU_MUL: return s390_emit_MSR(buf, dst, R0);
+ case S390_ALU_AND: return s390_emit_NR(buf, dst, R0);
+ case S390_ALU_OR: return s390_emit_OR(buf, dst, R0);
+ case S390_ALU_XOR: return s390_emit_XR(buf, dst, R0);
+ case S390_ALU_LSH:
+ case S390_ALU_RSH:
+ case S390_ALU_RSHA: ; /* avoid GCC warning */
+ }
+ goto fail;
+
+ case 2:
+ switch (src->tag) {
+ case S390_AMODE_B12:
+ case S390_AMODE_BX12:
+ switch (insn->variant.alu.tag) {
+ case S390_ALU_ADD:
+ return s390_emit_AH(buf, dst, x, b, d);
+
+ case S390_ALU_SUB:
+ return s390_emit_SH(buf, dst, x, b, d);
+
+ case S390_ALU_MUL:
+ return s390_emit_MH(buf, dst, x, b, d);
+
+ /* For bitwise operations: Move two bytes from memory into scratch
+ register r0; then perform operation */
+ case S390_ALU_AND:
+ buf = s390_emit_LH(buf, R0, x, b, d);
+ return s390_emit_NR(buf, dst, R0);
+
+ case S390_ALU_OR:
+ buf = s390_emit_LH(buf, R0, x, b, d);
+ return s390_emit_OR(buf, dst, R0);
+
+ case S390_ALU_XOR:
+ buf = s390_emit_LH(buf, R0, x, b, d);
+ return s390_emit_XR(buf, dst, R0);
+
+ case S390_ALU_LSH:
+ case S390_ALU_RSH:
+ case S390_ALU_RSHA: ; /* avoid GCC warning */
+ }
+ goto fail;
+
+ case S390_AMODE_B20:
+ case S390_AMODE_BX20:
+ switch (insn->variant.alu.tag) {
+ case S390_ALU_ADD:
+ return s390_emit_AHY(buf, dst, x, b, DISP20(d));
+
+ case S390_ALU_SUB:
+ return s390_emit_SHY(buf, dst, x, b, DISP20(d));
+
+ case S390_ALU_MUL:
+ return s390_emit_MHY(buf, dst, x, b, DISP20(d));
+
+ /* For bitwise operations: Move two bytes from memory into scratch
+ register r0; then perform operation */
+ case S390_ALU_AND:
+ buf = s390_emit_LHY(buf, R0, x, b, DISP20(d));
+ return s390_emit_NR(buf, dst, R0);
+
+ case S390_ALU_OR:
+ buf = s390_emit_LHY(buf, R0, x, b, DISP20(d));
+ return s390_emit_OR(buf, dst, R0);
+
+ case S390_ALU_XOR:
+ buf = s390_emit_LHY(buf, R0, x, b, DISP20(d));
+ return s390_emit_XR(buf, dst, R0);
+
+ case S390_ALU_LSH:
+ case S390_ALU_RSH:
+ case S390_ALU_RSHA: ; /* avoid GCC warning */
+ }
+ goto fail;
+ }
+ goto fail;
+
+ case 4:
+ switch (src->tag) {
+ case S390_AMODE_B12:
+ case S390_AMODE_BX12:
+ switch (insn->variant.alu.tag) {
+ case S390_ALU_ADD: return s390_emit_A(buf, dst, x, b, d);
+ case S390_ALU_SUB: return s390_emit_S(buf, dst, x, b, d);
+ case S390_ALU_MUL: return s390_emit_MS(buf, dst, x, b, d);
+ case S390_ALU_AND: return s390_emit_N(buf, dst, x, b, d);
+ case S390_ALU_OR: return s390_emit_O(buf, dst, x, b, d);
+ case S390_ALU_XOR: return s390_emit_X(buf, dst, x, b, d);
+ case S390_ALU_LSH:
+ case S390_ALU_RSH:
+ case S390_ALU_RSHA: ; /* avoid GCC warning */
+ }
+ goto fail;
+
+ case S390_AMODE_B20:
+ case S390_AMODE_BX20:
+ switch (insn->variant.alu.tag) {
+ case S390_ALU_ADD: return s390_emit_AY(buf, dst, x, b, DISP20(d));
+ case S390_ALU_SUB: return s390_emit_SY(buf, dst, x, b, DISP20(d));
+ case S390_ALU_MUL: return s390_emit_MSY(buf, dst, x, b, DISP20(d));
+ case S390_ALU_AND: return s390_emit_NY(buf, dst, x, b, DISP20(d));
+ case S390_ALU_OR: return s390_emit_OY(buf, dst, x, b, DISP20(d));
+ case S390_ALU_XOR: return s390_emit_XY(buf, dst, x, b, DISP20(d));
+ case S390_ALU_LSH:
+ case S390_ALU_RSH:
+ case S390_ALU_RSHA: ; /* avoid GCC warning */
+ }
+ goto fail;
+ }
+ goto fail;
+
+ case 8:
+ switch (insn->variant.alu.tag) {
+ case S390_ALU_ADD: return s390_emit_AG(buf, dst, x, b, DISP20(d));
+ case S390_ALU_SUB: return s390_emit_SG(buf, dst, x, b, DISP20(d));
+ case S390_ALU_MUL: return s390_emit_MSG(buf, dst, x, b, DISP20(d));
+ case S390_ALU_AND: return s390_emit_NG(buf, dst, x, b, DISP20(d));
+ case S390_ALU_OR: return s390_emit_OG(buf, dst, x, b, DISP20(d));
+ case S390_ALU_XOR: return s390_emit_XG(buf, dst, x, b, DISP20(d));
+ case S390_ALU_LSH:
+ case S390_ALU_RSH:
+ case S390_ALU_RSHA: ; /* avoid GCC warning */
+ }
+ goto fail;
+ }
+ goto fail;
+ }
+
+ /* 2nd operand is an immediate value */
+ if (op2.tag == S390_OPND_IMMEDIATE) {
+ ULong value;
+
+ /* No masking of the value is required as it is not sign extended */
+ value = op2.variant.imm;
+
+ switch (insn->size) {
+ case 1:
+ case 2:
+ /* There is no 1-byte opcode. Do the computation in
+ 2 bytes. The extra byte will be ignored. */
+ switch (insn->variant.alu.tag) {
+ case S390_ALU_ADD:
+ return s390_emit_AHI(buf, dst, value);
+
+ case S390_ALU_SUB:
+ /* fixs390 later: as an optimization could perhaps use SLFI ? */
+ buf = s390_emit_LHI(buf, R0, value);
+ return s390_emit_SR(buf, dst, R0);
+
+ case S390_ALU_MUL:
+ return s390_emit_MHI(buf, dst, value);
+
+ case S390_ALU_AND: return s390_emit_NILL(buf, dst, value);
+ case S390_ALU_OR: return s390_emit_OILL(buf, dst, value);
+ case S390_ALU_XOR:
+ /* There is no XILL instruction. Load the immediate value into
+ R0 and combine with the destination register. */
+ buf = s390_emit_LHI(buf, R0, value);
+ return s390_emit_XR(buf, dst, R0);
+
+ case S390_ALU_LSH:
+ return s390_emit_SLL(buf, dst, 0, 0, value);
+
+ case S390_ALU_RSH:
+ return s390_emit_SRL(buf, dst, 0, 0, value);
+
+ case S390_ALU_RSHA:
+ return s390_emit_SRA(buf, dst, 0, 0, value);
+ }
+ goto fail;
+
+ case 4:
+ switch (insn->variant.alu.tag) {
+ case S390_ALU_ADD:
+ if (uint_fits_signed_16bit(value)) {
+ return s390_emit_AHI(buf, dst, value);
+ }
+ return s390_emit_AFIw(buf, dst, value);
+
+ case S390_ALU_SUB: return s390_emit_SLFIw(buf, dst, value);
+ case S390_ALU_MUL: return s390_emit_MSFIw(buf, dst, value);
+ case S390_ALU_AND: return s390_emit_NILFw(buf, dst, value);
+ case S390_ALU_OR: return s390_emit_OILFw(buf, dst, value);
+ case S390_ALU_XOR: return s390_emit_XILFw(buf, dst, value);
+ case S390_ALU_LSH: return s390_emit_SLL(buf, dst, 0, 0, value);
+ case S390_ALU_RSH: return s390_emit_SRL(buf, dst, 0, 0, value);
+ case S390_ALU_RSHA: return s390_emit_SRA(buf, dst, 0, 0, value);
+ }
+ goto fail;
+
+ case 8:
+ switch (insn->variant.alu.tag) {
+ case S390_ALU_ADD:
+ if (ulong_fits_signed_16bit(value)) {
+ return s390_emit_AGHI(buf, dst, value);
+ }
+ if (ulong_fits_signed_32bit(value) && s390_host_has_eimm) {
+ return s390_emit_AGFI(buf, dst, value);
+ }
+ /* Load constant into R0 then add */
+ buf = s390_emit_load_64imm(buf, R0, value);
+ return s390_emit_AGR(buf, dst, R0);
+
+ case S390_ALU_SUB:
+ /* fixs390 later: as an optimization could perhaps use SLFI ? */
+ /* Load value into R0; then subtract from destination reg */
+ buf = s390_emit_load_64imm(buf, R0, value);
+ return s390_emit_SGR(buf, dst, R0);
+
+ case S390_ALU_MUL:
+ if (ulong_fits_signed_32bit(value) && s390_host_has_gie) {
+ return s390_emit_MSGFI(buf, dst, value);
+ }
+ /* Load constant into R0 then add */
+ buf = s390_emit_load_64imm(buf, R0, value);
+ return s390_emit_MSGR(buf, dst, R0);
+
+ /* Do it in two steps: upper half [0:31] and lower half [32:63] */
+ case S390_ALU_AND:
+ if (s390_host_has_eimm) {
+ buf = s390_emit_NIHF(buf, dst, value >> 32);
+ return s390_emit_NILF(buf, dst, value & 0xFFFFFFFF);
+ }
+ /* Load value into R0; then combine with destination reg */
+ buf = s390_emit_load_64imm(buf, R0, value);
+ return s390_emit_NGR(buf, dst, R0);
+
+ case S390_ALU_OR:
+ if (s390_host_has_eimm) {
+ buf = s390_emit_OIHF(buf, dst, value >> 32);
+ return s390_emit_OILF(buf, dst, value & 0xFFFFFFFF);
+ }
+ /* Load value into R0; then combine with destination reg */
+ buf = s390_emit_load_64imm(buf, R0, value);
+ return s390_emit_OGR(buf, dst, R0);
+
+ case S390_ALU_XOR:
+ if (s390_host_has_eimm) {
+ buf = s390_emit_XIHF(buf, dst, value >> 32);
+ return s390_emit_XILF(buf, dst, value & 0xFFFFFFFF);
+ }
+ /* Load value into R0; then combine with destination reg */
+ buf = s390_emit_load_64imm(buf, R0, value);
+ return s390_emit_XGR(buf, dst, R0);
+
+ case S390_ALU_LSH: return s390_emit_SLLG(buf, dst, dst, 0, DISP20(value));
+ case S390_ALU_RSH: return s390_emit_SRLG(buf, dst, dst, 0, DISP20(value));
+ case S390_ALU_RSHA: return s390_emit_SRAG(buf, dst, dst, 0, DISP20(value));
+ }
+ goto fail;
+ }
+ goto fail;
+ }
+
+ fail:
+ vpanic("s390_insn_alu_emit");
+}
+
+
+static UChar *
+s390_widen_emit(UChar *buf, const s390_insn *insn, UInt from_size,
+ Bool sign_extend)
+{
+ s390_opnd_RMI opnd;
+ UInt dst;
+
+ dst = hregNumber(insn->variant.unop.dst);
+ opnd = insn->variant.unop.src;
+
+ switch (opnd.tag) {
+ case S390_OPND_REG: {
+ UChar r1 = hregNumber(insn->variant.unop.dst);
+ UChar r2 = hregNumber(opnd.variant.reg);
+
+ switch (from_size) {
+ case 1:
+ /* Widening to a half-word is implemeneted like widening to a word
+ because the upper half-word will not be looked at. */
+ if (insn->size == 4 || insn->size == 2) { /* 8 --> 32 8 --> 16 */
+ if (sign_extend)
+ return s390_emit_LBRw(buf, r1, r2);
+ else
+ return s390_emit_LLCRw(buf, r1, r2);
+ }
+ if (insn->size == 8) { /* 8 --> 64 */
+ if (sign_extend)
+ return s390_emit_LGBRw(buf, r1, r2);
+ else
+ return s390_emit_LLGCRw(buf, r1, r2);
+ }
+ goto fail;
+
+ case 2:
+ if (insn->size == 4) { /* 16 --> 32 */
+ if (sign_extend)
+ return s390_emit_LHRw(buf, r1, r2);
+ else
+ return s390_emit_LLHRw(buf, r1, r2);
+ }
+ if (insn->size == 8) { /* 16 --> 64 */
+ if (sign_extend)
+ return s390_emit_LGHRw(buf, r1, r2);
+ else
+ return s390_emit_LLGHRw(buf, r1, r2);
+ }
+ goto fail;
+
+ case 4:
+ if (insn->size == 8) { /* 32 --> 64 */
+ if (sign_extend)
+ return s390_emit_LGFR(buf, r1, r2);
+ else
+ return s390_emit_LLGFR(buf, r1, r2);
+ }
+ goto fail;
+
+ default: /* unexpected "from" size */
+ goto fail;
+ }
+ }
+
+ case S390_OPND_AMODE: {
+ UChar r1 = hregNumber(insn->variant.unop.dst);
+ const s390_amode *src = opnd.variant.am;
+ UChar b = hregNumber(src->b);
+ UChar x = hregNumber(src->x);
+ Int d = src->d;
+
+ switch (from_size) {
+ case 1:
+ if (insn->size == 4 || insn->size == 2) {
+ if (sign_extend)
+ return s390_emit_LB(buf, r1, x, b, DISP20(d));
+ else
+ return s390_emit_LLCw(buf, r1, x, b, DISP20(d));
+ }
+ if (insn->size == 8) {
+ if (sign_extend)
+ return s390_emit_LGB(buf, r1, x, b, DISP20(d));
+ else
+ /* No wrapper required. Opcode exists as RXE and RXY */
+ return s390_emit_LLGC(buf, r1, x, b, DISP20(d));
+ }
+ goto fail;
+
+ case 2:
+ if (insn->size == 4) { /* 16 --> 32 */
+ if (sign_extend == 0)
+ return s390_emit_LLHw(buf, r1, x, b, DISP20(d));
+
+ switch (src->tag) {
+ case S390_AMODE_B12:
+ case S390_AMODE_BX12:
+ return s390_emit_LH(buf, r1, x, b, d);
+
+ case S390_AMODE_B20:
+ case S390_AMODE_BX20:
+ return s390_emit_LHY(buf, r1, x, b, DISP20(d));
+ }
+ goto fail;
+ }
+ if (insn->size == 8) { /* 16 --> 64 */
+ /* No wrappers required. Opcodes exist as RXE and RXY */
+ if (sign_extend)
+ return s390_emit_LGH(buf, r1, x, b, DISP20(d));
+ else
+ return s390_emit_LLGH(buf, r1, x, b, DISP20(d));
+ }
+ goto fail;
+
+ case 4:
+ if (insn->size == 8) { /* 32 --> 64 */
+ /* No wrappers required. Opcodes exist as RXE and RXY */
+ if (sign_extend)
+ return s390_emit_LGF(buf, r1, x, b, DISP20(d));
+ else
+ return s390_emit_LLGF(buf, r1, x, b, DISP20(d));
+ }
+ goto fail;
+
+ default: /* unexpected "from" size */
+ goto fail;
+ }
+ }
+
+ case S390_OPND_IMMEDIATE: {
+ UChar r1 = hregNumber(insn->variant.unop.dst);
+ ULong value = opnd.variant.imm;
+
+ switch (from_size) {
+ case 1:
+ if (insn->size == 4 || insn->size == 2) { /* 8 --> 32 8 --> 16 */
+ if (sign_extend) {
+ /* host can do the sign extension to 16-bit; LHI does the rest */
+ return s390_emit_LHI(buf, r1, (Short)(Char)(UChar)value);
+ } else {
+ return s390_emit_LHI(buf, r1, value);
+ }
+ }
+ if (insn->size == 8) { /* 8 --> 64 */
+ if (sign_extend) {
+ /* host can do the sign extension to 16-bit; LGHI does the rest */
+ return s390_emit_LGHI(buf, r1, (Short)(Char)(UChar)value);
+ } else {
+ return s390_emit_LGHI(buf, r1, value);
+ }
+ }
+ goto fail;
+
+ case 2:
+ if (insn->size == 4) { /* 16 --> 32 */
+ return s390_emit_LHI(buf, r1, value);
+ }
+ if (insn->size == 8) { /* 16 --> 64 */
+ if (sign_extend)
+ return s390_emit_LGHI(buf, r1, value);
+ else
+ return s390_emit_LLILL(buf, r1, value);
+ }
+ goto fail;
+
+ case 4:
+ if (insn->size == 8) { /* 32 --> 64 */
+ if (sign_extend)
+ return s390_emit_LGFIw(buf, r1, value);
+ else
+ return s390_emit_LLILFw(buf, r1, value);
+ }
+ goto fail;
+
+ default: /* unexpected "from" size */
+ goto fail;
+ }
+ }
+ }
+
+ fail:
+ vpanic("s390_widen_emit");
+}
+
+
+static UChar *
+s390_negate_emit(UChar *buf, const s390_insn *insn)
+{
+ s390_opnd_RMI opnd;
+
+ opnd = insn->variant.unop.src;
+
+ switch (opnd.tag) {
+ case S390_OPND_REG: {
+ UChar r1 = hregNumber(insn->variant.unop.dst);
+ UChar r2 = hregNumber(opnd.variant.reg);
+
+ switch (insn->size) {
+ case 1:
+ case 2:
+ case 4:
+ return s390_emit_LCR(buf, r1, r2);
+
+ case 8:
+ return s390_emit_LCGR(buf, r1, r2);
+
+ default:
+ goto fail;
+ }
+ }
+
+ case S390_OPND_AMODE: {
+ UChar r1 = hregNumber(insn->variant.unop.dst);
+
+ /* Load bytes into scratch register R0, then negate */
+ buf = s390_emit_load_mem(buf, insn->size, R0, opnd.variant.am);
+
+ switch (insn->size) {
+ case 1:
+ case 2:
+ case 4:
+ return s390_emit_LCR(buf, r1, R0);
+
+ case 8:
+ return s390_emit_LCGR(buf, r1, R0);
+
+ default:
+ goto fail;
+ }
+ }
+
+ case S390_OPND_IMMEDIATE: {
+ UChar r1 = hregNumber(insn->variant.unop.dst);
+ ULong value = opnd.variant.imm;
+
+ value = ~value + 1; /* two's complement */
+
+ switch (insn->size) {
+ case 1:
+ case 2:
+ /* Load the immediate values as a 4 byte value. That does not hurt as
+ those extra bytes will not be looked at. Fall through .... */
+ case 4:
+ return s390_emit_load_32imm(buf, r1, value);
+
+ case 8:
+ return s390_emit_load_64imm(buf, r1, value);
+
+ default:
+ goto fail;
+ }
+ }
+ }
+
+ fail:
+ vpanic("s390_negate_emit");
+}
+
+
+static UChar *
+s390_insn_unop_emit(UChar *buf, const s390_insn *insn)
+{
+ switch (insn->variant.unop.tag) {
+ case S390_ZERO_EXTEND_8: return s390_widen_emit(buf, insn, 1, 0);
+ case S390_ZERO_EXTEND_16: return s390_widen_emit(buf, insn, 2, 0);
+ case S390_ZERO_EXTEND_32: return s390_widen_emit(buf, insn, 4, 0);
+
+ case S390_SIGN_EXTEND_8: return s390_widen_emit(buf, insn, 1, 1);
+ case S390_SIGN_EXTEND_16: return s390_widen_emit(buf, insn, 2, 1);
+ case S390_SIGN_EXTEND_32: return s390_widen_emit(buf, insn, 4, 1);
+
+ case S390_NEGATE: return s390_negate_emit(buf, insn);
+ }
+
+ vpanic("s390_insn_unop_emit");
+}
+
+
+/* Only 4-byte and 8-byte operands are handled. 1-byte and 2-byte
+ comparisons will have been converted to 4-byte comparisons in
+ s390_isel_cc and should not occur here. */
+static UChar *
+s390_insn_test_emit(UChar *buf, const s390_insn *insn)
+{
+ s390_opnd_RMI opnd;
+
+ opnd = insn->variant.test.src;
+
+ switch (opnd.tag) {
+ case S390_OPND_REG: {
+ UInt reg = hregNumber(opnd.variant.reg);
+
+ switch (insn->size) {
+ case 4:
+ return s390_emit_LTR(buf, reg, reg);
+
+ case 8:
+ return s390_emit_LTGR(buf, reg, reg);
+
+ default:
+ goto fail;
+ }
+ }
+
+ case S390_OPND_AMODE: {
+ const s390_amode *am = opnd.variant.am;
+ UChar b = hregNumber(am->b);
+ UChar x = hregNumber(am->x);
+ Int d = am->d;
+
+ switch (insn->size) {
+ case 4:
+ return s390_emit_LTw(buf, R0, x, b, DISP20(d));
+
+ case 8:
+ return s390_emit_LTGw(buf, R0, x, b, DISP20(d));
+
+ default:
+ goto fail;
+ }
+ }
+
+ case S390_OPND_IMMEDIATE: {
+ ULong value = opnd.variant.imm;
+
+ switch (insn->size) {
+ case 4:
+ buf = s390_emit_load_32imm(buf, R0, value);
+ return s390_emit_LTR(buf, R0, R0);
+
+ case 8:
+ buf = s390_emit_load_64imm(buf, R0, value);
+ return s390_emit_LTGR(buf, R0, R0);
+
+ default:
+ goto fail;
+ }
+ }
+
+ default:
+ goto fail;
+ }
+
+ fail:
+ vpanic("s390_insn_test_emit");
+}
+
+
+static UChar *
+s390_insn_cc2bool_emit(UChar *buf, const s390_insn *insn)
+{
+ UChar r1 = hregNumber(insn->variant.cc2bool.dst);
+ s390_cc_t cond = insn->variant.cc2bool.cond;
+
+ /* Make the destination register be 1 or 0, depending on whether
+ the relevant condition holds. A 64-bit value is computed. */
+ if (cond == S390_CC_ALWAYS)
+ return s390_emit_LGHI(buf, r1, 1); /* r1 = 1 */
+
+ buf = s390_emit_load_cc(buf, r1); /* r1 = cc */
+ buf = s390_emit_LGHI(buf, R0, cond); /* r0 = mask */
+ buf = s390_emit_SLLG(buf, r1, R0, r1, DISP20(0)); /* r1 = mask << cc */
+ buf = s390_emit_SRLG(buf, r1, r1, 0, DISP20(3)); /* r1 = r1 >> 3 */
+ buf = s390_emit_NILL(buf, r1, 1); /* r1 = r1 & 0x1 */
+
+ return buf;
+}
+
+
+/* Only 4-byte and 8-byte operands are handled. */
+static UChar *
+s390_insn_cas_emit(UChar *buf, const s390_insn *insn)
+{
+ UChar r1, r3, b, old;
+ Int d;
+ s390_amode *am;
+
+ r1 = hregNumber(insn->variant.cas.op1); /* expected value */
+ r3 = hregNumber(insn->variant.cas.op3);
+ old= hregNumber(insn->variant.cas.old_mem);
+ am = insn->variant.cas.op2;
+ b = hregNumber(am->b);
+ d = am->d;
+
+ switch (insn->size) {
+ case 4:
+ /* r1 must no be overwritten. So copy it to R0 and let CS clobber it */
+ buf = s390_emit_LR(buf, R0, r1);
+ if (am->tag == S390_AMODE_B12)
+ buf = s390_emit_CS(buf, R0, r3, b, d);
+ else
+ buf = s390_emit_CSY(buf, R0, r3, b, DISP20(d));
+ /* Now copy R0 which has the old memory value to OLD */
+ return s390_emit_LR(buf, old, R0);
+
+ case 8:
+ /* r1 must no be overwritten. So copy it to R0 and let CS clobber it */
+ buf = s390_emit_LGR(buf, R0, r1);
+ buf = s390_emit_CSG(buf, R0, r3, b, DISP20(d));
+ /* Now copy R0 which has the old memory value to OLD */
+ return s390_emit_LGR(buf, old, R0);
+
+ default:
+ goto fail;
+ }
+
+ fail:
+ vpanic("s390_insn_cas_emit");
+}
+
+
+/* Only 4-byte and 8-byte comparisons are handled. 1-byte and 2-byte
+ comparisons will have been converted to 4-byte comparisons in
+ s390_isel_cc and should not occur here. */
+static UChar *
+s390_insn_compare_emit(UChar *buf, const s390_insn *insn)
+{
+ s390_opnd_RMI op2;
+ HReg op1;
+ Bool signed_comparison;
+
+ op1 = insn->variant.compare.src1;
+ op2 = insn->variant.compare.src2;
+ signed_comparison = insn->variant.compare.signed_comparison;
+
+ switch (op2.tag) {
+ case S390_OPND_REG: {
+ UInt r1 = hregNumber(op1);
+ UInt r2 = hregNumber(op2.variant.reg);
+
+ switch (insn->size) {
+ case 4:
+ if (signed_comparison)
+ return s390_emit_CR(buf, r1, r2);
+ else
+ return s390_emit_CLR(buf, r1, r2);
+
+ case 8:
+ if (signed_comparison)
+ return s390_emit_CGR(buf, r1, r2);
+ else
+ return s390_emit_CLGR(buf, r1, r2);
+
+ default:
+ goto fail;
+ }
+ }
+
+ case S390_OPND_AMODE: {
+ UChar r1 = hregNumber(op1);
+ const s390_amode *am = op2.variant.am;
+ UChar b = hregNumber(am->b);
+ UChar x = hregNumber(am->x);
+ Int d = am->d;
+
+ switch (insn->size) {
+ case 4:
+ switch (am->tag) {
+ case S390_AMODE_B12:
+ case S390_AMODE_BX12:
+ if (signed_comparison)
+ return s390_emit_C(buf, r1, x, b, d);
+ else
+ return s390_emit_CL(buf, r1, x, b, d);
+
+ case S390_AMODE_B20:
+ case S390_AMODE_BX20:
+ if (signed_comparison)
+ return s390_emit_CY(buf, r1, x, b, DISP20(d));
+ else
+ return s390_emit_CLY(buf, r1, x, b, DISP20(d));
+ }
+ goto fail;
+
+ case 8:
+ if (signed_comparison)
+ return s390_emit_CG(buf, r1, x, b, DISP20(d));
+ else
+ return s390_emit_CLG(buf, r1, x, b, DISP20(d));
+
+ default:
+ goto fail;
+ }
+ }
+
+ case S390_OPND_IMMEDIATE: {
+ UChar r1 = hregNumber(op1);
+ ULong value = op2.variant.imm;
+
+ switch (insn->size) {
+ case 4:
+ if (signed_comparison)
+ return s390_emit_CFIw(buf, r1, value);
+ else
+ return s390_emit_CLFIw(buf, r1, value);
+
+ case 8:
+ buf = s390_emit_load_64imm(buf, R0, value);
+ if (signed_comparison)
+ return s390_emit_CGR(buf, r1, R0);
+ else
+ return s390_emit_CLGR(buf, r1, R0);
+
+ default:
+ goto fail;
+ }
+ }
+
+ default:
+ goto fail;
+ }
+
+ fail:
+ vpanic("s390_insn_compare_emit");
+}
+
+
+static UChar *
+s390_insn_mul_emit(UChar *buf, const s390_insn *insn)
+{
+ s390_opnd_RMI op2;
+ UChar r1;
+ Bool signed_multiply;
+
+ /* The register number identifying the register pair */
+ r1 = hregNumber(insn->variant.mul.dst_hi);
+
+ op2 = insn->variant.mul.op2;
+ signed_multiply = insn->variant.mul.signed_multiply;
+
+ switch (op2.tag) {
+ case S390_OPND_REG: {
+ UInt r2 = hregNumber(op2.variant.reg);
+
+ switch (insn->size) {
+ case 1:
+ case 2:
+ case 4:
+ if (signed_multiply)
+ return s390_emit_MR(buf, r1, r2);
+ else
+ return s390_emit_MLR(buf, r1, r2);
+
+ case 8:
+ if (signed_multiply)
+ vpanic("s390_insn_mul_emit");
+ else
+ return s390_emit_MLGR(buf, r1, r2);
+
+ default:
+ goto fail;
+ }
+ }
+
+ case S390_OPND_AMODE: {
+ const s390_amode *am = op2.variant.am;
+ UChar b = hregNumber(am->b);
+ UChar x = hregNumber(am->x);
+ Int d = am->d;
+
+ switch (insn->size) {
+ case 1:
+ case 2:
+ /* Load bytes into scratch register R0, then multiply */
+ buf = s390_emit_load_mem(buf, insn->size, R0, am);
+ if (signed_multiply)
+ return s390_emit_MR(buf, r1, R0);
+ else
+ return s390_emit_MLR(buf, r1, R0);
+
+ case 4:
+ switch (am->tag) {
+ case S390_AMODE_B12:
+ case S390_AMODE_BX12:
+ if (signed_multiply)
+ return s390_emit_M(buf, r1, x, b, d);
+ else
+ return s390_emit_ML(buf, r1, x, b, DISP20(d));
+
+ case S390_AMODE_B20:
+ case S390_AMODE_BX20:
+ if (signed_multiply)
+ return s390_emit_MFYw(buf, r1, x, b, DISP20(d));
+ else
+ vpanic("s390_insn_mul_emit");
+ }
+ goto fail;
+
+ case 8:
+ if (signed_multiply)
+ vpanic("s390_insn_mul_emit");
+ else
+ return s390_emit_MLG(buf, r1, x, b, DISP20(d));
+
+ default:
+ goto fail;
+ }
+ }
+
+ case S390_OPND_IMMEDIATE: {
+ ULong value = op2.variant.imm;
+
+ switch (insn->size) {
+ case 1:
+ case 2:
+ case 4:
+ buf = s390_emit_load_32imm(buf, R0, value);
+ if (signed_multiply)
+ return s390_emit_MR(buf, r1, R0);
+ else
+ return s390_emit_MLR(buf, r1, R0);
+
+ case 8:
+ buf = s390_emit_load_64imm(buf, R0, value);
+ if (signed_multiply)
+ vpanic("s390_insn_mul_emit");
+ else
+ return s390_emit_MLGR(buf, r1, R0);
+
+ default:
+ goto fail;
+ }
+ }
+
+ default:
+ goto fail;
+ }
+
+ fail:
+ vpanic("s390_insn_mul_emit");
+}
+
+
+static UChar *
+s390_insn_div_emit(UChar *buf, const s390_insn *insn)
+{
+ s390_opnd_RMI op2;
+ UChar r1;
+ Bool signed_divide;
+
+ r1 = hregNumber(insn->variant.div.op1_hi);
+ op2 = insn->variant.div.op2;
+ signed_divide = insn->variant.div.signed_divide;
+
+ switch (op2.tag) {
+ case S390_OPND_REG: {
+ UInt r2 = hregNumber(op2.variant.reg);
+
+ switch (insn->size) {
+ case 4:
+ if (signed_divide)
+ return s390_emit_DR(buf, r1, r2);
+ else
+ return s390_emit_DLR(buf, r1, r2);
+
+ case 8:
+ if (signed_divide)
+ vpanic("s390_insn_div_emit");
+ else
+ return s390_emit_DLGR(buf, r1, r2);
+
+ default:
+ goto fail;
+ }
+ }
+
+ case S390_OPND_AMODE: {
+ const s390_amode *am = op2.variant.am;
+ UChar b = hregNumber(am->b);
+ UChar x = hregNumber(am->x);
+ Int d = am->d;
+
+ switch (insn->size) {
+ case 4:
+ switch (am->tag) {
+ case S390_AMODE_B12:
+ case S390_AMODE_BX12:
+ if (signed_divide)
+ return s390_emit_D(buf, r1, x, b, d);
+ else
+ return s390_emit_DL(buf, r1, x, b, DISP20(d));
+
+ case S390_AMODE_B20:
+ case S390_AMODE_BX20:
+ buf = s390_emit_LY(buf, R0, x, b, DISP20(d));
+ if (signed_divide)
+ return s390_emit_DR(buf, r1, R0);
+ else
+ return s390_emit_DLR(buf, r1, R0);
+ }
+ goto fail;
+
+ case 8:
+ if (signed_divide)
+ vpanic("s390_insn_div_emit");
+ else
+ return s390_emit_DLG(buf, r1, x, b, DISP20(d));
+
+ default:
+ goto fail;
+ }
+ }
+
+ case S390_OPND_IMMEDIATE: {
+ ULong value = op2.variant.imm;
+
+ switch (insn->size) {
+ case 4:
+ buf = s390_emit_load_32imm(buf, R0, value);
+ if (signed_divide)
+ return s390_emit_DR(buf, r1, R0);
+ else
+ return s390_emit_DLR(buf, r1, R0);
+
+ case 8:
+ buf = s390_emit_load_64imm(buf, R0, value);
+ if (signed_divide)
+ vpanic("s390_insn_div_emit");
+ else
+ return s390_emit_DLGR(buf, r1, R0);
+
+ default:
+ goto fail;
+ }
+ }
+
+ default:
+ goto fail;
+ }
+
+ fail:
+ vpanic("s390_insn_div_emit");
+}
+
+
+static UChar *
+s390_insn_divs_emit(UChar *buf, const s390_insn *insn)
+{
+ s390_opnd_RMI op2;
+ UChar r1;
+
+ r1 = hregNumber(insn->variant.divs.rem);
+ op2 = insn->variant.divs.op2;
+
+ switch (op2.tag) {
+ case S390_OPND_REG: {
+ UInt r2 = hregNumber(op2.variant.reg);
+
+ return s390_emit_DSGR(buf, r1, r2);
+ }
+
+ case S390_OPND_AMODE: {
+ const s390_amode *am = op2.variant.am;
+ UChar b = hregNumber(am->b);
+ UChar x = hregNumber(am->x);
+ Int d = am->d;
+
+ return s390_emit_DSG(buf, r1, x, b, DISP20(d));
+ }
+
+ case S390_OPND_IMMEDIATE: {
+ ULong value = op2.variant.imm;
+
+ buf = s390_emit_load_64imm(buf, R0, value);
+ return s390_emit_DSGR(buf, r1, R0);
+ }
+
+ default:
+ goto fail;
+ }
+
+ fail:
+ vpanic("s390_insn_divs_emit");
+}
+
+
+static UChar *
+s390_insn_flogr_emit(UChar *buf, const s390_insn *insn)
+{
+ s390_opnd_RMI src;
+ UChar r1, r1p1;
+
+ r1 = hregNumber(insn->variant.flogr.bitpos);
+ r1p1 = hregNumber(insn->variant.flogr.modval);
+
+ vassert((r1 & 0x1) == 0);
+ vassert(r1p1 == r1 + 1);
+
+ src = insn->variant.flogr.src;
+
+ switch (src.tag) {
+ case S390_OPND_REG: {
+ UInt r2 = hregNumber(src.variant.reg);
+
+ return s390_emit_FLOGR(buf, r1, r2);
+ }
+
+ case S390_OPND_AMODE: {
+ const s390_amode *am = src.variant.am;
+ UChar b = hregNumber(am->b);
+ UChar x = hregNumber(am->x);
+ Int d = am->d;
+
+ buf = s390_emit_LG(buf, R0, x, b, DISP20(d));
+ return s390_emit_FLOGR(buf, r1, R0);
+ }
+
+ case S390_OPND_IMMEDIATE: {
+ ULong value = src.variant.imm;
+
+ buf = s390_emit_load_64imm(buf, R0, value);
+ return s390_emit_FLOGR(buf, r1, R0);
+ }
+
+ default:
+ goto fail;
+ }
+
+ fail:
+ vpanic("s390_insn_flogr_emit");
+}
+
+
+static UChar *
+s390_insn_branch_emit(UChar *buf, const s390_insn *insn)
+{
+ s390_opnd_RMI dst;
+ s390_cc_t cond;
+ IRJumpKind kind;
+ UInt trc;
+ UChar *p, *ptmp = 0; /* avoid compiler warnings */
+
+ kind = insn->variant.branch.kind;
+ cond = insn->variant.branch.cond;
+ dst = insn->variant.branch.dst;
+
+ p = buf;
+ trc = 0;
+
+ if (cond != S390_CC_ALWAYS) {
+ /* So we have something like this
+ if (cond) goto X;
+ Y: ...
+ We convert this into
+ if (! cond) goto Y; // BRC insn; 4 bytes
+ return_reg = X;
+ return to dispatcher
+ Y:
+ */
+ ptmp = p; /* 4 bytes (a BRC insn) to be filled in here */
+ p += 4;
+ }
+
+ /* If a non-boring, set guest-state-pointer appropriately. */
+
+ switch (insn->variant.branch.kind) {
+ case Ijk_ClientReq: trc = VEX_TRC_JMP_CLIENTREQ; break;
+ case Ijk_Sys_syscall: trc = VEX_TRC_JMP_SYS_SYSCALL; break;
+ case Ijk_Yield: trc = VEX_TRC_JMP_YIELD; break;
+ case Ijk_EmWarn: trc = VEX_TRC_JMP_EMWARN; break;
+ case Ijk_EmFail: trc = VEX_TRC_JMP_EMFAIL; break;
+ case Ijk_MapFail: trc = VEX_TRC_JMP_MAPFAIL; break;
+ case Ijk_NoDecode: trc = VEX_TRC_JMP_NODECODE; break;
+ case Ijk_TInval: trc = VEX_TRC_JMP_TINVAL; break;
+ case Ijk_NoRedir: trc = VEX_TRC_JMP_NOREDIR; break;
+ case Ijk_SigTRAP: trc = VEX_TRC_JMP_SIGTRAP; break;
+ case Ijk_Ret: trc = 0; break;
+ case Ijk_Call: trc = 0; break;
+ case Ijk_Boring: trc = 0; break;
+ break;
+
+ default:
+ vpanic("s390_insn_branch_emit: unknown jump kind");
+ }
+
+ /* Get the destination address into the return register */
+ switch (dst.tag) {
+ case S390_OPND_REG:
+ p = s390_emit_LGR(p, S390_REGNO_RETURN_VALUE, hregNumber(dst.variant.reg));
+ break;
+
+ case S390_OPND_AMODE: {
+ const s390_amode *am = dst.variant.am;
+ UChar b = hregNumber(am->b);
+ UChar x = hregNumber(am->x);
+ Int d = am->d;
+
+ p = s390_emit_LG(p, S390_REGNO_RETURN_VALUE, x, b, DISP20(d));
+ break;
+ }
+
+ case S390_OPND_IMMEDIATE:
+ p = s390_emit_load_64imm(p, S390_REGNO_RETURN_VALUE, dst.variant.imm);
+ break;
+
+ default:
+ goto fail;
+ }
+
+ if (trc != 0) {
+ /* Something special. Set guest-state pointer appropriately */
+ p = s390_emit_LGHI(p, S390_REGNO_GUEST_STATE_POINTER, trc);
+ } else {
+ /* Nothing special needs to be done for calls and returns. */
+ }
+
+ p = s390_emit_BCR(p, S390_CC_ALWAYS, S390_REGNO_LINK_REGISTER);
+
+ if (cond != S390_CC_ALWAYS) {
+ Int delta = p - ptmp;
+
+ delta >>= 1; /* immediate constant is #half-words */
+ vassert(delta > 0 && delta < (1 << 16));
+ s390_emit_BRC(ptmp, s390_cc_invert(cond), delta);
+ }
+
+ return p;
+
+ fail:
+ vpanic("s390_insn_branch_emit");
+}
+
+
+static UChar *
+s390_insn_helper_call_emit(UChar *buf, const s390_insn *insn)
+{
+ s390_cc_t cond;
+ ULong target;
+ UChar *ptmp;
+
+ cond = insn->variant.helper_call.cond;
+ target = insn->variant.helper_call.target;
+
+ if (cond != S390_CC_ALWAYS) {
+ /* So we have something like this
+ if (cond) call X;
+ Y: ...
+ We convert this into
+ if (! cond) goto Y; // BRC opcode; 4 bytes
+ call X;
+ Y:
+ */
+ ptmp = buf; /* 4 bytes (a BRC insn) to be filled in here */
+ buf += 4;
+ }
+
+ /* Load the target address into a register, that
+ (a) is not used for passing parameters to the helper and
+ (b) can be clobbered by the callee
+ r1 looks like a good choice.
+ Also, need to arrange for the return address be put into the
+ link-register */
+ buf = s390_emit_load_64imm(buf, 1, target);
+
+ /* Stash away the client's FPC register because the helper might change it. */
+ buf = s390_emit_STFPC(buf, S390_REGNO_STACK_POINTER, S390_OFFSET_SAVED_FPC_C);
+
+ /* Before we can call the helper, we need to save the link register,
+ because the BASR will overwrite it. We cannot use a register for that.
+ (a) Volatile registers will be modified by the helper.
+ (b) For saved registers the client code assumes that they have not
+ changed after the function returns. So we cannot use it to store
+ the link register.
+ In the dispatcher, before calling the client code, we have arranged for
+ a location on the stack for this purpose. See dispatch-s390x-linux.S. */
+ buf = s390_emit_STG(buf, S390_REGNO_LINK_REGISTER, 0, // save LR
+ S390_REGNO_STACK_POINTER, S390_OFFSET_SAVED_LR, 0);
+ buf = s390_emit_BASR(buf, S390_REGNO_LINK_REGISTER, 1); // call helper
+ buf = s390_emit_LG(buf, S390_REGNO_LINK_REGISTER, 0, // restore LR
+ S390_REGNO_STACK_POINTER, S390_OFFSET_SAVED_LR, 0);
+ buf = s390_emit_LFPC(buf, S390_REGNO_STACK_POINTER, // restore FPC
+ S390_OFFSET_SAVED_FPC_C);
+
+ if (cond != S390_CC_ALWAYS) {
+ Int delta = buf - ptmp;
+
+ delta >>= 1; /* immediate constant is #half-words */
+ vassert(delta > 0 && delta < (1 << 16));
+ s390_emit_BRC(ptmp, s390_cc_invert(cond), delta);
+ }
+
+ return buf;
+}
+
+
+static UChar *
+s390_insn_cond_move_emit(UChar *buf, const s390_insn *insn)
+{
+ HReg dst;
+ s390_opnd_RMI src;
+ s390_cc_t cond;
+ UChar *p, *ptmp = 0; /* avoid compiler warnings */
+
+ cond = insn->variant.cond_move.cond;
+ dst = insn->variant.cond_move.dst;
+ src = insn->variant.cond_move.src;
+
+ p = buf;
+
+ /* Branch (if cond fails) over move instrs */
+ if (cond != S390_CC_ALWAYS) {
+ /* Don't know how many bytes to jump over yet.
+ Make space for a BRC instruction (4 bytes) and fill in later. */
+ ptmp = p; /* to be filled in here */
+ p += 4;
+ }
+
+ // cond true: move src => dst
+
+ switch (src.tag) {
+ case S390_OPND_REG:
+ p = s390_emit_LGR(p, hregNumber(dst), hregNumber(src.variant.reg));
+ break;
+
+ case S390_OPND_AMODE:
+ p = s390_emit_load_mem(p, insn->size, hregNumber(dst), src.variant.am);
+ break;
+
+ case S390_OPND_IMMEDIATE: {
+ ULong value = src.variant.imm;
+ UInt r = hregNumber(dst);
+
+ switch (insn->size) {
+ case 1:
+ case 2:
+ /* Load the immediate values as a 4 byte value. That does not hurt as
+ those extra bytes will not be looked at. Fall through .... */
+ case 4:
+ p = s390_emit_load_32imm(p, r, value);
+ break;
+
+ case 8:
+ p = s390_emit_load_64imm(p, r, value);
+ break;
+ }
+ break;
+ }
+
+ default:
+ goto fail;
+ }
+
+ if (cond != S390_CC_ALWAYS) {
+ Int delta = p - ptmp;
+
+ delta >>= 1; /* immediate constant is #half-words */
+ vassert(delta > 0 && delta < (1 << 16));
+ s390_emit_BRC(ptmp, s390_cc_invert(cond), delta);
+ }
+
+ return p;
+
+ fail:
+ vpanic("s390_insn_cond_move_emit");
+}
+
+
+/* Little helper function to the rounding mode in the real FPC
+ register */
+static UChar *
+s390_set_fpc_rounding_mode(UChar *buf, s390_round_t rounding_mode)
+{
+ UChar bits;
+
+ /* Determine BFP rounding bits */
+ switch (rounding_mode) {
+ case S390_ROUND_NEAREST_EVEN: bits = 0; break;
+ case S390_ROUND_ZERO: bits = 1; break;
+ case S390_ROUND_POSINF: bits = 2; break;
+ case S390_ROUND_NEGINF: bits = 3; break;
+ default: vpanic("invalid rounding mode\n");
+ }
+
+ /* Copy FPC from guest state to R0 and OR in the new rounding mode */
+ buf = s390_emit_L(buf, R0, 0, S390_REGNO_GUEST_STATE_POINTER,
+ OFFSET_s390x_fpc); // r0 = guest_fpc
+
+ buf = s390_emit_NILL(buf, R0, 0xFFFC); /* Clear out right-most 2 bits */
+ buf = s390_emit_OILL(buf, R0, bits); /* OR in the new rounding mode */
+ buf = s390_emit_SFPC(buf, R0, 0); /* Load FPC register from R0 */
+
+ return buf;
+}
+
+
+static UChar *
+s390_insn_bfp_triop_emit(UChar *buf, const s390_insn *insn)
+{
+ UInt r1 = hregNumber(insn->variant.bfp_triop.dst);
+ UInt r2 = hregNumber(insn->variant.bfp_triop.op2);
+ UInt r3 = hregNumber(insn->variant.bfp_triop.op3);
+ s390_round_t rounding_mode = insn->variant.bfp_triop.rounding_mode;
+
+ if (rounding_mode != S390_ROUND_CURRENT) {
+ buf = s390_set_fpc_rounding_mode(buf, rounding_mode);
+ }
+
+ switch (insn->size) {
+ case 4:
+ switch (insn->variant.bfp_triop.tag) {
+ case S390_BFP_MADD: buf = s390_emit_MAEBR(buf, r1, r3, r2); break;
+ case S390_BFP_MSUB: buf = s390_emit_MSEBR(buf, r1, r3, r2); break;
+ default: goto fail;
+ }
+ break;
+
+ case 8:
+ switch (insn->variant.bfp_triop.tag) {
+ case S390_BFP_MADD: buf = s390_emit_MADBR(buf, r1, r3, r2); break;
+ case S390_BFP_MSUB: buf = s390_emit_MSDBR(buf, r1, r3, r2); break;
+ default: goto fail;
+ }
+ break;
+
+ default: goto fail;
+ }
+
+ if (rounding_mode != S390_ROUND_CURRENT) {
+ /* Restore FPC register from guest state */
+ buf = s390_emit_LFPC(buf, S390_REGNO_GUEST_STATE_POINTER,
+ OFFSET_s390x_fpc); // fpc = guest_fpc
+ }
+ return buf;
+
+ fail:
+ vpanic("s390_insn_bfp_triop_emit");
+}
+
+
+static UChar *
+s390_insn_bfp_binop_emit(UChar *buf, const s390_insn *insn)
+{
+ UInt r1 = hregNumber(insn->variant.bfp_binop.dst);
+ UInt r2 = hregNumber(insn->variant.bfp_binop.op2);
+ s390_round_t rounding_mode = insn->variant.bfp_binop.rounding_mode;
+
+ if (rounding_mode != S390_ROUND_CURRENT) {
+ buf = s390_set_fpc_rounding_mode(buf, rounding_mode);
+ }
+
+ switch (insn->size) {
+ case 4:
+ switch (insn->variant.bfp_binop.tag) {
+ case S390_BFP_ADD: buf = s390_emit_AEBR(buf, r1, r2); break;
+ case S390_BFP_SUB: buf = s390_emit_SEBR(buf, r1, r2); break;
+ case S390_BFP_MUL: buf = s390_emit_MEEBR(buf, r1, r2); break;
+ case S390_BFP_DIV: buf = s390_emit_DEBR(buf, r1, r2); break;
+ default: goto fail;
+ }
+ break;
+
+ case 8:
+ switch (insn->variant.bfp_binop.tag) {
+ case S390_BFP_ADD: buf = s390_emit_ADBR(buf, r1, r2); break;
+ case S390_BFP_SUB: buf = s390_emit_SDBR(buf, r1, r2); break;
+ case S390_BFP_MUL: buf = s390_emit_MDBR(buf, r1, r2); break;
+ case S390_BFP_DIV: buf = s390_emit_DDBR(buf, r1, r2); break;
+ default: goto fail;
+ }
+ break;
+
+ default: goto fail;
+ }
+
+ if (rounding_mode != S390_ROUND_CURRENT) {
+ /* Restore FPC register from guest state */
+ buf = s390_emit_LFPC(buf, S390_REGNO_GUEST_STATE_POINTER,
+ OFFSET_s390x_fpc);
+ }
+ return buf;
+
+ fail:
+ vpanic("s390_insn_bfp_binop_emit");
+}
+
+
+static UChar *
+s390_insn_bfp_unop_emit(UChar *buf, const s390_insn *insn)
+{
+ UInt r1 = hregNumber(insn->variant.bfp_unop.dst);
+ UInt r2 = hregNumber(insn->variant.bfp_unop.op);
+ s390_round_t rounding_mode = insn->variant.bfp_unop.rounding_mode;
+ s390_round_t m3 = rounding_mode;
+
+ /* The "convert to fixed" instructions have a field for the rounding
+ mode and no FPC modification is necessary. So we handle them
+ upfront. */
+ switch (insn->variant.bfp_unop.tag) {
+ case S390_BFP_F32_TO_I32: return s390_emit_CFEBR(buf, m3, r1, r2);
+ case S390_BFP_F64_TO_I32: return s390_emit_CFDBR(buf, m3, r1, r2);
+ case S390_BFP_F32_TO_I64: return s390_emit_CGEBR(buf, m3, r1, r2);
+ case S390_BFP_F64_TO_I64: return s390_emit_CGDBR(buf, m3, r1, r2);
+ default: break;
+ }
+
+ /* For all other insns if a special rounding mode is requested,
+ we need to set the FPC first and restore it later. */
+ if (rounding_mode != S390_ROUND_CURRENT) {
+ buf = s390_set_fpc_rounding_mode(buf, rounding_mode);
+ }
+
+ switch (insn->variant.bfp_unop.tag) {
+ case S390_BFP_ABS:
+ switch (insn->size) {
+ case 4: buf = s390_emit_LPEBR(buf, r1, r2); break;
+ case 8: buf = s390_emit_LPDBR(buf, r1, r2); break;
+ case 16: buf = s390_emit_LPXBR(buf, r1, r2); break;
+ default: goto fail;
+ }
+ break;
+
+ case S390_BFP_NABS:
+ switch (insn->size) {
+ case 4: buf = s390_emit_LNEBR(buf, r1, r2); break;
+ case 8: buf = s390_emit_LNDBR(buf, r1, r2); break;
+ case 16: buf = s390_emit_LNXBR(buf, r1, r2); break;
+ default: goto fail;
+ }
+ break;
+
+ case S390_BFP_NEG:
+ switch (insn->size) {
+ case 4: buf = s390_emit_LCEBR(buf, r1, r2); break;
+ case 8: buf = s390_emit_LCDBR(buf, r1, r2); break;
+ case 16: buf = s390_emit_LCXBR(buf, r1, r2); break;
+ default: goto fail;
+ }
+ break;
+
+ case S390_BFP_SQRT:
+ switch (insn->size) {
+ case 4: buf = s390_emit_SQEBR(buf, r1, r2); break;
+ case 8: buf = s390_emit_SQDBR(buf, r1, r2); break;
+ case 16: buf = s390_emit_SQXBR(buf, r1, r2); break;
+ default: goto fail;
+ }
+ break;
+
+ case S390_BFP_I32_TO_F32: buf = s390_emit_CEFBR(buf, r1, r2); break;
+ case S390_BFP_I32_TO_F64: buf = s390_emit_CDFBR(buf, r1, r2); break;
+ case S390_BFP_I32_TO_F128: buf = s390_emit_CXFBR(buf, r1, r2); break;
+ case S390_BFP_I64_TO_F32: buf = s390_emit_CEGBR(buf, r1, r2); break;
+ case S390_BFP_I64_TO_F64: buf = s390_emit_CDGBR(buf, r1, r2); break;
+ case S390_BFP_I64_TO_F128: buf = s390_emit_CXGBR(buf, r1, r2); break;
+
+ case S390_BFP_F32_TO_F64: buf = s390_emit_LDEBR(buf, r1, r2); break;
+ case S390_BFP_F32_TO_F128: buf = s390_emit_LXEBR(buf, r1, r2); break;
+ case S390_BFP_F64_TO_F32: buf = s390_emit_LEDBR(buf, r1, r2); break;
+ case S390_BFP_F64_TO_F128: buf = s390_emit_LXDBR(buf, r1, r2); break;
+
+ default: goto fail;
+ }
+
+ if (rounding_mode != S390_ROUND_CURRENT) {
+ /* Restore FPC register from guest state */
+ buf = s390_emit_LFPC(buf, S390_REGNO_GUEST_STATE_POINTER,
+ OFFSET_s390x_fpc); // fpc = guest_fpc
+ }
+ return buf;
+
+ fail:
+ vpanic("s390_insn_bfp_unop_emit");
+}
+
+
+static UChar *
+s390_insn_bfp_compare_emit(UChar *buf, const s390_insn *insn)
+{
+ UInt dst = hregNumber(insn->variant.bfp_compare.dst);
+ UInt r1 = hregNumber(insn->variant.bfp_compare.op1);
+ UInt r2 = hregNumber(insn->variant.bfp_compare.op2);
+
+ switch (insn->size) {
+ case 4:
+ buf = s390_emit_CEBR(buf, r1, r2);
+ break;
+
+ case 8:
+ buf = s390_emit_CDBR(buf, r1, r2);
+ break;
+
+ default: goto fail;
+ }
+
+ return s390_emit_load_cc(buf, dst); /* Load condition code into DST */
+
+ fail:
+ vpanic("s390_insn_bfp_compare_emit");
+}
+
+
+static UChar *
+s390_insn_bfp128_binop_emit(UChar *buf, const s390_insn *insn)
+{
+ UInt r1_hi = hregNumber(insn->variant.bfp128_binop.dst_hi);
+ UInt r1_lo = hregNumber(insn->variant.bfp128_binop.dst_lo);
+ UInt r2_hi = hregNumber(insn->variant.bfp128_binop.op2_hi);
+ UInt r2_lo = hregNumber(insn->variant.bfp128_binop.op2_lo);
+ s390_round_t rounding_mode = insn->variant.bfp_binop.rounding_mode;
+
+ /* Paranoia */
+ vassert(insn->size == 16);
+ vassert(r1_lo == r1_hi + 2);
+ vassert(r2_lo == r2_hi + 2);
+ vassert((r1_hi & 0x2) == 0);
+ vassert((r2_hi & 0x2) == 0);
+
+ if (rounding_mode != S390_ROUND_CURRENT) {
+ buf = s390_set_fpc_rounding_mode(buf, rounding_mode);
+ }
+
+ switch (insn->variant.bfp128_binop.tag) {
+ case S390_BFP_ADD: buf = s390_emit_AXBR(buf, r1_hi, r2_hi); break;
+ case S390_BFP_SUB: buf = s390_emit_SXBR(buf, r1_hi, r2_hi); break;
+ case S390_BFP_MUL: buf = s390_emit_MXBR(buf, r1_hi, r2_hi); break;
+ case S390_BFP_DIV: buf = s390_emit_DXBR(buf, r1_hi, r2_hi); break;
+ default: goto fail;
+ }
+
+ if (rounding_mode != S390_ROUND_CURRENT) {
+ /* Restore FPC register from guest state */
+ buf = s390_emit_LFPC(buf, S390_REGNO_GUEST_STATE_POINTER,
+ OFFSET_s390x_fpc); // fpc = guest_fpc
+ }
+ return buf;
+
+ fail:
+ vpanic("s390_insn_bfp128_binop_emit");
+}
+
+
+static UChar *
+s390_insn_bfp128_compare_emit(UChar *buf, const s390_insn *insn)
+{
+ UInt dst = hregNumber(insn->variant.bfp128_compare.dst);
+ UInt r1_hi = hregNumber(insn->variant.bfp128_compare.op1_hi);
+ UInt r1_lo = hregNumber(insn->variant.bfp128_compare.op1_lo);
+ UInt r2_hi = hregNumber(insn->variant.bfp128_compare.op2_hi);
+ UInt r2_lo = hregNumber(insn->variant.bfp128_compare.op2_lo);
+
+ /* Paranoia */
+ vassert(insn->size == 16);
+ vassert(r1_lo == r1_hi + 2);
+ vassert(r2_lo == r2_hi + 2);
+ vassert((r1_hi & 0x2) == 0);
+ vassert((r2_hi & 0x2) == 0);
+
+ buf = s390_emit_CXBR(buf, r1_hi, r2_hi);
+
+ /* Load condition code into DST */
+ return s390_emit_load_cc(buf, dst);
+}
+
+
+static UChar *
+s390_insn_bfp128_unop_emit(UChar *buf, const s390_insn *insn)
+{
+ UInt r1_hi = hregNumber(insn->variant.bfp128_unop.dst_hi);
+ UInt r1_lo = hregNumber(insn->variant.bfp128_unop.dst_lo);
+ UInt r2_hi = hregNumber(insn->variant.bfp128_unop.op_hi);
+ UInt r2_lo = hregNumber(insn->variant.bfp128_unop.op_lo);
+ s390_round_t rounding_mode = insn->variant.bfp_binop.rounding_mode;
+
+ /* Paranoia */
+ vassert(insn->size == 16);
+ vassert(r1_lo == r1_hi + 2);
+ vassert(r2_lo == r2_hi + 2);
+ vassert((r1_hi & 0x2) == 0);
+ vassert((r2_hi & 0x2) == 0);
+
+ if (rounding_mode != S390_ROUND_CURRENT) {
+ buf = s390_set_fpc_rounding_mode(buf, rounding_mode);
+ }
+
+ switch (insn->variant.bfp128_unop.tag) {
+ case S390_BFP_ABS: buf = s390_emit_LPXBR(buf, r1_hi, r2_hi); break;
+ case S390_BFP_NABS: buf = s390_emit_LNXBR(buf, r1_hi, r2_hi); break;
+ case S390_BFP_NEG: buf = s390_emit_LCXBR(buf, r1_hi, r2_hi); break;
+ case S390_BFP_SQRT: buf = s390_emit_SQXBR(buf, r1_hi, r2_hi); break;
+ case S390_BFP_F128_TO_F32: buf = s390_emit_LEXBR(buf, r1_hi, r2_hi); break;
+ case S390_BFP_F128_TO_F64: buf = s390_emit_LDXBR(buf, r1_hi, r2_hi); break;
+ default: goto fail;
+ }
+
+ if (rounding_mode != S390_ROUND_CURRENT) {
+ /* Restore FPC register from guest state */
+ buf = s390_emit_LFPC(buf, S390_REGNO_GUEST_STATE_POINTER,
+ OFFSET_s390x_fpc); // fpc = guest_fpc
+ }
+ return buf;
+
+ fail:
+ vpanic("s390_insn_bfp128_unop_emit");
+}
+
+
+/* Conversion to 128-bit BFP does not require a rounding mode */
+static UChar *
+s390_insn_bfp128_convert_to_emit(UChar *buf, const s390_insn *insn)
+{
+ UInt r1_hi = hregNumber(insn->variant.bfp128_unop.dst_hi);
+ UInt r1_lo = hregNumber(insn->variant.bfp128_unop.dst_lo);
+ UInt r2 = hregNumber(insn->variant.bfp128_unop.op_hi);
+
+ /* Paranoia */
+ vassert(insn->size == 16);
+ vassert(r1_lo == r1_hi + 2);
+ vassert((r1_hi & 0x2) == 0);
+
+ switch (insn->variant.bfp128_unop.tag) {
+ case S390_BFP_I32_TO_F128: buf = s390_emit_CXFBR(buf, r1_hi, r2); break;
+ case S390_BFP_I64_TO_F128: buf = s390_emit_CXGBR(buf, r1_hi, r2); break;
+ case S390_BFP_F32_TO_F128: buf = s390_emit_LXEBR(buf, r1_hi, r2); break;
+ case S390_BFP_F64_TO_F128: buf = s390_emit_LXDBR(buf, r1_hi, r2); break;
+ default: goto fail;
+ }
+
+ return buf;
+
+ fail:
+ vpanic("s390_insn_bfp128_convert_to_emit");
+}
+
+
+static UChar *
+s390_insn_bfp128_convert_from_emit(UChar *buf, const s390_insn *insn)
+{
+ UInt r1 = hregNumber(insn->variant.bfp128_unop.dst_hi);
+ UInt r2_hi = hregNumber(insn->variant.bfp128_unop.op_hi);
+ UInt r2_lo = hregNumber(insn->variant.bfp128_unop.op_lo);
+ s390_round_t rounding_mode = insn->variant.bfp_binop.rounding_mode;
+
+ /* Paranoia */
+ vassert(insn->size != 16);
+ vassert(r2_lo == r2_hi + 2);
+ vassert((r2_hi & 0x2) == 0);
+
+ /* The "convert to fixed" instructions have a field for the rounding
+ mode and no FPC modification is necessary. So we handle them
+ upfront. */
+ switch (insn->variant.bfp_unop.tag) {
+ case S390_BFP_F128_TO_I32: return s390_emit_CFXBR(buf, rounding_mode,
+ r1, r2_hi); break;
+ case S390_BFP_F128_TO_I64: return s390_emit_CGXBR(buf, rounding_mode,
+ r1, r2_hi); break;
+ default: break;
+ }
+
+ vpanic("s390_insn_bfp128_convert_from_emit");
+}
+
+
+UInt
+s390_insn_emit(UChar *buf, Int nbuf, const struct s390_insn *insn,
+ void *dispatch)
+{
+ UChar *end;
+
+ switch (insn->tag) {
+ case S390_INSN_LOAD:
+ end = s390_insn_load_emit(buf, insn);
+ break;
+
+ case S390_INSN_STORE:
+ end = s390_insn_store_emit(buf, insn);
+ break;
+
+ case S390_INSN_MOVE:
+ end = s390_insn_move_emit(buf, insn);
+ break;
+
+ case S390_INSN_COND_MOVE:
+ end = s390_insn_cond_move_emit(buf, insn);
+ break;
+
+ case S390_INSN_LOAD_IMMEDIATE:
+ end = s390_insn_load_immediate_emit(buf, insn);
+ break;
+
+ case S390_INSN_ALU:
+ end = s390_insn_alu_emit(buf, insn);
+ break;
+
+ case S390_INSN_MUL:
+ end = s390_insn_mul_emit(buf, insn);
+ break;
+
+ case S390_INSN_DIV:
+ end = s390_insn_div_emit(buf, insn);
+ break;
+
+ case S390_INSN_DIVS:
+ end = s390_insn_divs_emit(buf, insn);
+ break;
+
+ case S390_INSN_FLOGR:
+ end = s390_insn_flogr_emit(buf, insn);
+ break;
+
+ case S390_INSN_UNOP:
+ end = s390_insn_unop_emit(buf, insn);
+ break;
+
+ case S390_INSN_TEST:
+ end = s390_insn_test_emit(buf, insn);
+ break;
+
+ case S390_INSN_CC2BOOL:
+ end = s390_insn_cc2bool_emit(buf, insn);
+ break;
+
+ case S390_INSN_CAS:
+ end = s390_insn_cas_emit(buf, insn);
+ break;
+
+ case S390_INSN_COMPARE:
+ end = s390_insn_compare_emit(buf, insn);
+ break;
+
+ case S390_INSN_BRANCH:
+ end = s390_insn_branch_emit(buf, insn);
+ break;
+
+ case S390_INSN_HELPER_CALL:
+ end = s390_insn_helper_call_emit(buf, insn);
+ break;
+
+ case S390_INSN_BFP_TRIOP:
+ end = s390_insn_bfp_triop_emit(buf, insn);
+ break;
+
+ case S390_INSN_BFP_BINOP:
+ end = s390_insn_bfp_binop_emit(buf, insn);
+ break;
+
+ case S390_INSN_BFP_UNOP:
+ end = s390_insn_bfp_unop_emit(buf, insn);
+ break;
+
+ case S390_INSN_BFP_COMPARE:
+ end = s390_insn_bfp_compare_emit(buf, insn);
+ break;
+
+ case S390_INSN_BFP128_BINOP:
+ end = s390_insn_bfp128_binop_emit(buf, insn);
+ break;
+
+ case S390_INSN_BFP128_COMPARE:
+ end = s390_insn_bfp128_compare_emit(buf, insn);
+ break;
+
+ case S390_INSN_BFP128_UNOP:
+ end = s390_insn_bfp128_unop_emit(buf, insn);
+ break;
+
+ case S390_INSN_BFP128_CONVERT_TO:
+ end = s390_insn_bfp128_convert_to_emit(buf, insn);
+ break;
+
+ case S390_INSN_BFP128_CONVERT_FROM:
+ end = s390_insn_bfp128_convert_from_emit(buf, insn);
+ break;
+
+ default:
+ vpanic("s390_insn_emit");
+ }
+
+ vassert(end - buf <= nbuf);
+
+ return end - buf;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Support functions for the register allocator ---*/
+/*---------------------------------------------------------------*/
+
+/* Helper function for s390_insn_get_reg_usage */
+static void
+s390_opnd_RMI_get_reg_usage(HRegUsage *u, s390_opnd_RMI op)
+{
+ switch (op.tag) {
+ case S390_OPND_REG:
+ addHRegUse(u, HRmRead, op.variant.reg);
+ break;
+
+ case S390_OPND_AMODE:
+ s390_amode_get_reg_usage(u, op.variant.am);
+ break;
+
+ case S390_OPND_IMMEDIATE:
+ break;
+
+ default:
+ vpanic("s390_opnd_RMI_get_reg_usage");
+ }
+}
+
+
+/* Tell the register allocator how the given insn uses the registers */
+void
+s390_insn_get_reg_usage(HRegUsage *u, const s390_insn *insn)
+{
+ initHRegUsage(u);
+
+ switch (insn->tag) {
+ case S390_INSN_LOAD:
+ addHRegUse(u, HRmWrite, insn->variant.load.dst);
+ s390_amode_get_reg_usage(u, insn->variant.load.src);
+ break;
+
+ case S390_INSN_LOAD_IMMEDIATE:
+ addHRegUse(u, HRmWrite, insn->variant.load_immediate.dst);
+ break;
+
+ case S390_INSN_STORE:
+ addHRegUse(u, HRmRead, insn->variant.store.src);
+ s390_amode_get_reg_usage(u, insn->variant.store.dst);
+ break;
+
+ case S390_INSN_MOVE:
+ addHRegUse(u, HRmRead, insn->variant.move.src);
+ addHRegUse(u, HRmWrite, insn->variant.move.dst);
+ break;
+
+ case S390_INSN_COND_MOVE:
+ s390_opnd_RMI_get_reg_usage(u, insn->variant.cond_move.src);
+ addHRegUse(u, HRmWrite, insn->variant.cond_move.dst);
+ break;
+
+ case S390_INSN_ALU:
+ addHRegUse(u, HRmWrite, insn->variant.alu.dst);
+ addHRegUse(u, HRmRead, insn->variant.alu.dst); /* op1 */
+ s390_opnd_RMI_get_reg_usage(u, insn->variant.alu.op2);
+ break;
+
+ case S390_INSN_MUL:
+ addHRegUse(u, HRmRead, insn->variant.mul.dst_lo); /* op1 */
+ addHRegUse(u, HRmWrite, insn->variant.mul.dst_lo);
+ addHRegUse(u, HRmWrite, insn->variant.mul.dst_hi);
+ s390_opnd_RMI_get_reg_usage(u, insn->variant.mul.op2);
+ break;
+
+ case S390_INSN_DIV:
+ addHRegUse(u, HRmRead, insn->variant.div.op1_lo);
+ addHRegUse(u, HRmRead, insn->variant.div.op1_hi);
+ addHRegUse(u, HRmWrite, insn->variant.div.op1_lo);
+ addHRegUse(u, HRmWrite, insn->variant.div.op1_hi);
+ s390_opnd_RMI_get_reg_usage(u, insn->variant.div.op2);
+ break;
+
+ case S390_INSN_DIVS:
+ addHRegUse(u, HRmRead, insn->variant.divs.op1);
+ addHRegUse(u, HRmWrite, insn->variant.divs.op1); /* quotient */
+ addHRegUse(u, HRmWrite, insn->variant.divs.rem); /* remainder */
+ s390_opnd_RMI_get_reg_usage(u, insn->variant.divs.op2);
+ break;
+
+ case S390_INSN_FLOGR:
+ addHRegUse(u, HRmWrite, insn->variant.flogr.bitpos);
+ addHRegUse(u, HRmWrite, insn->variant.flogr.modval);
+ s390_opnd_RMI_get_reg_usage(u, insn->variant.flogr.src);
+ break;
+
+ case S390_INSN_UNOP:
+ addHRegUse(u, HRmWrite, insn->variant.unop.dst);
+ s390_opnd_RMI_get_reg_usage(u, insn->variant.unop.src);
+ break;
+
+ case S390_INSN_TEST:
+ s390_opnd_RMI_get_reg_usage(u, insn->variant.test.src);
+ break;
+
+ case S390_INSN_CC2BOOL:
+ addHRegUse(u, HRmWrite, insn->variant.cc2bool.dst);
+ break;
+
+ case S390_INSN_CAS:
+ addHRegUse(u, HRmRead, insn->variant.cas.op1);
+ s390_amode_get_reg_usage(u, insn->variant.cas.op2);
+ addHRegUse(u, HRmRead, insn->variant.cas.op3);
+ addHRegUse(u, HRmWrite, insn->variant.cas.old_mem);
+ break;
+
+ case S390_INSN_COMPARE:
+ addHRegUse(u, HRmRead, insn->variant.compare.src1);
+ s390_opnd_RMI_get_reg_usage(u, insn->variant.compare.src2);
+ break;
+
+ case S390_INSN_BRANCH:
+ s390_opnd_RMI_get_reg_usage(u, insn->variant.branch.dst);
+ /* The destination address is loaded into S390_REGNO_RETURN_VALUE.
+ See s390_insn_branch_emit. */
+ addHRegUse(u, HRmWrite,
+ mkHReg(S390_REGNO_RETURN_VALUE, HRcInt64, False));
+ break;
+
+ case S390_INSN_HELPER_CALL: {
+ UInt i;
+
+ /* Assume that all volatile registers are clobbered. ABI says,
+ volatile registers are: r0 - r5. Valgrind's register allocator
+ does not know about r0, so we can leave that out */
+ for (i = 1; i <= 5; ++i) {
+ addHRegUse(u, HRmWrite, mkHReg(i, HRcInt64, False));
+ }
+
+ /* Ditto for floating point registers. f0 - f7 are volatile */
+ for (i = 0; i <= 7; ++i) {
+ addHRegUse(u, HRmWrite, mkHReg(i, HRcFlt64, False));
+ }
+
+ /* The registers that are used for passing arguments will be read.
+ Not all of them may, but in general we need to assume that. */
+ for (i = 0; i < insn->variant.helper_call.num_args; ++i) {
+ addHRegUse(u, HRmRead, mkHReg(s390_gprno_from_arg_index(i),
+ HRcInt64, False));
+ }
+
+ /* s390_insn_helper_call_emit also reads / writes the link register
+ and stack pointer. But those registers are not visible to the
+ register allocator. So we don't need to do anything for them. */
+ break;
+ }
+
+ case S390_INSN_BFP_TRIOP:
+ addHRegUse(u, HRmWrite, insn->variant.bfp_triop.dst);
+ addHRegUse(u, HRmRead, insn->variant.bfp_triop.dst); /* first */
+ addHRegUse(u, HRmRead, insn->variant.bfp_triop.op2); /* second */
+ addHRegUse(u, HRmRead, insn->variant.bfp_triop.op3); /* third */
+ break;
+
+ case S390_INSN_BFP_BINOP:
+ addHRegUse(u, HRmWrite, insn->variant.bfp_binop.dst);
+ addHRegUse(u, HRmRead, insn->variant.bfp_binop.dst); /* left */
+ addHRegUse(u, HRmRead, insn->variant.bfp_binop.op2); /* right */
+ break;
+
+ case S390_INSN_BFP_UNOP:
+ addHRegUse(u, HRmWrite, insn->variant.bfp_unop.dst);
+ addHRegUse(u, HRmRead, insn->variant.bfp_unop.op); /* operand */
+ break;
+
+ case S390_INSN_BFP_COMPARE:
+ addHRegUse(u, HRmWrite, insn->variant.bfp_compare.dst);
+ addHRegUse(u, HRmRead, insn->variant.bfp_compare.op1); /* left */
+ addHRegUse(u, HRmRead, insn->variant.bfp_compare.op2); /* right */
+ break;
+
+ case S390_INSN_BFP128_BINOP:
+ addHRegUse(u, HRmWrite, insn->variant.bfp128_binop.dst_hi);
+ addHRegUse(u, HRmWrite, insn->variant.bfp128_binop.dst_lo);
+ addHRegUse(u, HRmRead, insn->variant.bfp128_binop.dst_hi); /* left */
+ addHRegUse(u, HRmRead, insn->variant.bfp128_binop.dst_lo); /* left */
+ addHRegUse(u, HRmRead, insn->variant.bfp128_binop.op2_hi); /* right */
+ addHRegUse(u, HRmRead, insn->variant.bfp128_binop.op2_lo); /* right */
+ break;
+
+ case S390_INSN_BFP128_COMPARE:
+ addHRegUse(u, HRmWrite, insn->variant.bfp128_compare.dst);
+ addHRegUse(u, HRmRead, insn->variant.bfp128_compare.op1_hi); /* left */
+ addHRegUse(u, HRmRead, insn->variant.bfp128_compare.op1_lo); /* left */
+ addHRegUse(u, HRmRead, insn->variant.bfp128_compare.op2_hi); /* right */
+ addHRegUse(u, HRmRead, insn->variant.bfp128_compare.op2_lo); /* right */
+ break;
+
+ case S390_INSN_BFP128_UNOP:
+ addHRegUse(u, HRmWrite, insn->variant.bfp128_unop.dst_hi);
+ addHRegUse(u, HRmWrite, insn->variant.bfp128_unop.dst_lo);
+ addHRegUse(u, HRmRead, insn->variant.bfp128_unop.op_hi);
+ addHRegUse(u, HRmRead, insn->variant.bfp128_unop.op_lo);
+ break;
+
+ case S390_INSN_BFP128_CONVERT_TO:
+ addHRegUse(u, HRmWrite, insn->variant.bfp128_unop.dst_hi);
+ addHRegUse(u, HRmWrite, insn->variant.bfp128_unop.dst_lo);
+ addHRegUse(u, HRmRead, insn->variant.bfp128_unop.op_hi);
+ break;
+
+ case S390_INSN_BFP128_CONVERT_FROM:
+ addHRegUse(u, HRmWrite, insn->variant.bfp128_unop.dst_hi);
+ addHRegUse(u, HRmRead, insn->variant.bfp128_unop.op_hi);
+ addHRegUse(u, HRmRead, insn->variant.bfp128_unop.op_lo);
+ break;
+
+ default:
+ vpanic("s390_insn_get_reg_usage");
+ }
+}
+
+
+/* Helper function for s390_insn_map_regs */
+static void
+s390_opnd_RMI_map_regs(HRegRemap *m, s390_opnd_RMI *op)
+{
+ switch (op->tag) {
+ case S390_OPND_REG:
+ op->variant.reg = lookupHRegRemap(m, op->variant.reg);
+ break;
+
+ case S390_OPND_IMMEDIATE:
+ break;
+
+ case S390_OPND_AMODE:
+ s390_amode_map_regs(m, op->variant.am);
+ break;
+
+ default:
+ vpanic("s390_opnd_RMI_map_regs");
+ }
+}
+
+
+void
+s390_insn_map_regs(HRegRemap *m, s390_insn *insn)
+{
+ switch (insn->tag) {
+ case S390_INSN_LOAD:
+ insn->variant.load.dst = lookupHRegRemap(m, insn->variant.load.dst);
+ s390_amode_map_regs(m, insn->variant.load.src);
+ break;
+
+ case S390_INSN_STORE:
+ s390_amode_map_regs(m, insn->variant.store.dst);
+ insn->variant.store.src = lookupHRegRemap(m, insn->variant.store.src);
+ break;
+
+ case S390_INSN_MOVE:
+ insn->variant.move.dst = lookupHRegRemap(m, insn->variant.move.dst);
+ insn->variant.move.src = lookupHRegRemap(m, insn->variant.move.src);
+ break;
+
+ case S390_INSN_COND_MOVE:
+ insn->variant.cond_move.dst = lookupHRegRemap(m, insn->variant.cond_move.dst);
+ s390_opnd_RMI_map_regs(m, &insn->variant.cond_move.src);
+ break;
+
+ case S390_INSN_LOAD_IMMEDIATE:
+ insn->variant.load_immediate.dst =
+ lookupHRegRemap(m, insn->variant.load_immediate.dst);
+ break;
+
+ case S390_INSN_ALU:
+ insn->variant.alu.dst = lookupHRegRemap(m, insn->variant.alu.dst);
+ s390_opnd_RMI_map_regs(m, &insn->variant.alu.op2);
+ break;
+
+ case S390_INSN_MUL:
+ insn->variant.mul.dst_hi = lookupHRegRemap(m, insn->variant.mul.dst_hi);
+ insn->variant.mul.dst_lo = lookupHRegRemap(m, insn->variant.mul.dst_lo);
+ s390_opnd_RMI_map_regs(m, &insn->variant.mul.op2);
+ break;
+
+ case S390_INSN_DIV:
+ insn->variant.div.op1_hi = lookupHRegRemap(m, insn->variant.div.op1_hi);
+ insn->variant.div.op1_lo = lookupHRegRemap(m, insn->variant.div.op1_lo);
+ s390_opnd_RMI_map_regs(m, &insn->variant.div.op2);
+ break;
+
+ case S390_INSN_DIVS:
+ insn->variant.divs.op1 = lookupHRegRemap(m, insn->variant.divs.op1);
+ insn->variant.divs.rem = lookupHRegRemap(m, insn->variant.divs.rem);
+ s390_opnd_RMI_map_regs(m, &insn->variant.divs.op2);
+ break;
+
+ case S390_INSN_FLOGR:
+ insn->variant.flogr.bitpos = lookupHRegRemap(m, insn->variant.flogr.bitpos);
+ insn->variant.flogr.modval = lookupHRegRemap(m, insn->variant.flogr.modval);
+ s390_opnd_RMI_map_regs(m, &insn->variant.flogr.src);
+ break;
+
+ case S390_INSN_UNOP:
+ insn->variant.unop.dst = lookupHRegRemap(m, insn->variant.unop.dst);
+ s390_opnd_RMI_map_regs(m, &insn->variant.unop.src);
+ break;
+
+ case S390_INSN_TEST:
+ s390_opnd_RMI_map_regs(m, &insn->variant.test.src);
+ break;
+
+ case S390_INSN_CC2BOOL:
+ insn->variant.cc2bool.dst = lookupHRegRemap(m, insn->variant.cc2bool.dst);
+ break;
+
+ case S390_INSN_CAS:
+ insn->variant.cas.op1 = lookupHRegRemap(m, insn->variant.cas.op1);
+ s390_amode_map_regs(m, insn->variant.cas.op2);
+ insn->variant.cas.op3 = lookupHRegRemap(m, insn->variant.cas.op3);
+ insn->variant.cas.old_mem = lookupHRegRemap(m, insn->variant.cas.old_mem);
+ break;
+
+ case S390_INSN_COMPARE:
+ insn->variant.compare.src1 = lookupHRegRemap(m, insn->variant.compare.src1);
+ s390_opnd_RMI_map_regs(m, &insn->variant.compare.src2);
+ break;
+
+ case S390_INSN_BRANCH:
+ s390_opnd_RMI_map_regs(m, &insn->variant.branch.dst);
+ /* No need to map S390_REGNO_RETURN_VALUE. It's not virtual */
+ break;
+
+ case S390_INSN_HELPER_CALL:
+ /* s390_insn_helper_call_emit also reads / writes the link register
+ and stack pointer. But those registers are not visible to the
+ register allocator. So we don't need to do anything for them.
+ As for the arguments of the helper call -- they will be loaded into
+ non-virtual registers. Again, we don't need to do anything for those
+ here. */
+ break;
+
+ case S390_INSN_BFP_TRIOP:
+ insn->variant.bfp_triop.dst = lookupHRegRemap(m, insn->variant.bfp_triop.dst);
+ insn->variant.bfp_triop.op2 = lookupHRegRemap(m, insn->variant.bfp_triop.op2);
+ insn->variant.bfp_triop.op3 = lookupHRegRemap(m, insn->variant.bfp_triop.op3);
+ break;
+
+ case S390_INSN_BFP_BINOP:
+ insn->variant.bfp_binop.dst = lookupHRegRemap(m, insn->variant.bfp_binop.dst);
+ insn->variant.bfp_binop.op2 = lookupHRegRemap(m, insn->variant.bfp_binop.op2);
+ break;
+
+ case S390_INSN_BFP_UNOP:
+ insn->variant.bfp_unop.dst = lookupHRegRemap(m, insn->variant.bfp_unop.dst);
+ insn->variant.bfp_unop.op = lookupHRegRemap(m, insn->variant.bfp_unop.op);
+ break;
+
+ case S390_INSN_BFP_COMPARE:
+ insn->variant.bfp_compare.dst = lookupHRegRemap(m, insn->variant.bfp_compare.dst);
+ insn->variant.bfp_compare.op1 = lookupHRegRemap(m, insn->variant.bfp_compare.op1);
+ insn->variant.bfp_compare.op2 = lookupHRegRemap(m, insn->variant.bfp_compare.op2);
+ break;
+
+ case S390_INSN_BFP128_BINOP:
+ insn->variant.bfp128_binop.dst_hi =
+ lookupHRegRemap(m, insn->variant.bfp128_binop.dst_hi);
+ insn->variant.bfp128_binop.dst_lo =
+ lookupHRegRemap(m, insn->variant.bfp128_binop.dst_lo);
+ insn->variant.bfp128_binop.op2_hi =
+ lookupHRegRemap(m, insn->variant.bfp128_binop.op2_hi);
+ insn->variant.bfp128_binop.op2_lo =
+ lookupHRegRemap(m, insn->variant.bfp128_binop.op2_lo);
+ break;
+
+ case S390_INSN_BFP128_COMPARE:
+ insn->variant.bfp128_compare.dst =
+ lookupHRegRemap(m, insn->variant.bfp128_compare.dst);
+ insn->variant.bfp128_compare.op1_hi =
+ lookupHRegRemap(m, insn->variant.bfp128_compare.op1_hi);
+ insn->variant.bfp128_compare.op1_lo =
+ lookupHRegRemap(m, insn->variant.bfp128_compare.op1_lo);
+ insn->variant.bfp128_compare.op2_hi =
+ lookupHRegRemap(m, insn->variant.bfp128_compare.op2_hi);
+ insn->variant.bfp128_compare.op2_lo =
+ lookupHRegRemap(m, insn->variant.bfp128_compare.op2_lo);
+ break;
+
+ case S390_INSN_BFP128_UNOP:
+ insn->variant.bfp128_unop.dst_hi =
+ lookupHRegRemap(m, insn->variant.bfp128_unop.dst_hi);
+ insn->variant.bfp128_unop.dst_lo =
+ lookupHRegRemap(m, insn->variant.bfp128_unop.dst_lo);
+ insn->variant.bfp128_unop.op_hi =
+ lookupHRegRemap(m, insn->variant.bfp128_unop.op_hi);
+ insn->variant.bfp128_unop.op_lo =
+ lookupHRegRemap(m, insn->variant.bfp128_unop.op_lo);
+ break;
+
+ case S390_INSN_BFP128_CONVERT_TO:
+ insn->variant.bfp128_unop.dst_hi =
+ lookupHRegRemap(m, insn->variant.bfp128_unop.dst_hi);
+ insn->variant.bfp128_unop.dst_lo =
+ lookupHRegRemap(m, insn->variant.bfp128_unop.dst_lo);
+ insn->variant.bfp128_unop.op_hi =
+ lookupHRegRemap(m, insn->variant.bfp128_unop.op_hi);
+ break;
+
+ case S390_INSN_BFP128_CONVERT_FROM:
+ insn->variant.bfp128_unop.dst_hi =
+ lookupHRegRemap(m, insn->variant.bfp128_unop.dst_hi);
+ insn->variant.bfp128_unop.op_hi =
+ lookupHRegRemap(m, insn->variant.bfp128_unop.op_hi);
+ insn->variant.bfp128_unop.op_lo =
+ lookupHRegRemap(m, insn->variant.bfp128_unop.op_lo);
+ break;
+
+ default:
+ vpanic("s390_insn_map_regs");
+ }
+}
+
+
+/* Return True, if INSN is a move between two registers of the same class.
+ In that case assign the source and destination registers to SRC and DST,
+ respectively. */
+Bool
+s390_insn_is_reg_reg_move(const s390_insn *insn, HReg *src, HReg *dst)
+{
+ if (insn->tag == S390_INSN_MOVE &&
+ hregClass(insn->variant.move.src) == hregClass(insn->variant.move.dst)) {
+ *src = insn->variant.move.src;
+ *dst = insn->variant.move.dst;
+ return True;
+ }
+
+ return False;
+}
+
+/*---------------------------------------------------------------*/
+/*--- end host_s390_insn.c ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/host_s390_insn.h
+++ valgrind/VEX/priv/host_s390_insn.h
@@ -0,0 +1,423 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin host_s390_insn.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#ifndef __VEX_HOST_S390_INSN_H
+#define __VEX_HOST_S390_INSN_H
+
+#include "libvex_basictypes.h" /* UInt etc */
+#include "libvex.h" /* VexArchInfo */
+
+#include "main_util.h" /* for host_generic_regs.h */
+#include "host_generic_regs.h" /* HReg */
+#include "host_s390_amode.h" /* s390_amode */
+
+/* ------------- 2nd (right) operand of binary operation ---------------- */
+
+typedef enum {
+ S390_OPND_REG,
+ S390_OPND_IMMEDIATE,
+ S390_OPND_AMODE
+} s390_opnd_t;
+
+
+/* Naming convention for operand locations:
+ R - GPR
+ I - immediate value
+ M - memory (any Amode may be used)
+*/
+
+/* An operand that is either in a GPR or is addressable via a BX20 amode */
+typedef struct {
+ s390_opnd_t tag;
+ union {
+ HReg reg;
+ s390_amode *am;
+ ULong imm;
+ } variant;
+} s390_opnd_RMI;
+
+
+/* The kind of instructions */
+typedef enum {
+ S390_INSN_LOAD, /* load register from memory */
+ S390_INSN_STORE, /* store register to memory */
+ S390_INSN_MOVE, /* from register to register */
+ S390_INSN_COND_MOVE, /* conditonal "move" to register */
+ S390_INSN_LOAD_IMMEDIATE,
+ S390_INSN_ALU,
+ S390_INSN_MUL, /* n-bit operands; 2n-bit result */
+ S390_INSN_DIV, /* 2n-bit dividend; n-bit divisor; n-bit quot/rem */
+ S390_INSN_DIVS, /* n-bit dividend; n-bit divisor; n-bit quot/rem */
+ S390_INSN_FLOGR,
+ S390_INSN_UNOP,
+ S390_INSN_TEST, /* test operand and set cc */
+ S390_INSN_CC2BOOL,/* convert condition code to 0/1 */
+ S390_INSN_COMPARE,
+ S390_INSN_BRANCH, /* un/conditional goto */
+ S390_INSN_HELPER_CALL,
+ S390_INSN_CAS, /* compare and swap */
+ S390_INSN_BFP_BINOP, /* Binary floating point 32-bit / 64-bit */
+ S390_INSN_BFP_UNOP,
+ S390_INSN_BFP_TRIOP,
+ S390_INSN_BFP_COMPARE,
+ S390_INSN_BFP128_BINOP, /* Binary floating point 128-bit */
+ S390_INSN_BFP128_UNOP,
+ S390_INSN_BFP128_COMPARE,
+ S390_INSN_BFP128_CONVERT_TO,
+ S390_INSN_BFP128_CONVERT_FROM
+} s390_insn_tag;
+
+
+/* The kind of ALU instructions */
+typedef enum {
+ S390_ALU_ADD,
+ S390_ALU_SUB,
+ S390_ALU_MUL, /* n-bit operands; result is lower n-bit of product */
+ S390_ALU_AND,
+ S390_ALU_OR,
+ S390_ALU_XOR,
+ S390_ALU_LSH,
+ S390_ALU_RSH,
+ S390_ALU_RSHA /* arithmetic */
+} s390_alu_t;
+
+
+/* The kind of unary integer operations */
+typedef enum {
+ S390_ZERO_EXTEND_8,
+ S390_ZERO_EXTEND_16,
+ S390_ZERO_EXTEND_32,
+ S390_SIGN_EXTEND_8,
+ S390_SIGN_EXTEND_16,
+ S390_SIGN_EXTEND_32,
+ S390_NEGATE
+} s390_unop_t;
+
+/* The kind of ternary BFP operations */
+typedef enum {
+ S390_BFP_MADD,
+ S390_BFP_MSUB,
+} s390_bfp_triop_t;
+
+/* The kind of binary BFP operations */
+typedef enum {
+ S390_BFP_ADD,
+ S390_BFP_SUB,
+ S390_BFP_MUL,
+ S390_BFP_DIV
+} s390_bfp_binop_t;
+
+
+/* The kind of unary BFP operations */
+typedef enum {
+ S390_BFP_ABS,
+ S390_BFP_NABS,
+ S390_BFP_NEG,
+ S390_BFP_SQRT,
+ S390_BFP_I32_TO_F32,
+ S390_BFP_I32_TO_F64,
+ S390_BFP_I32_TO_F128,
+ S390_BFP_I64_TO_F32,
+ S390_BFP_I64_TO_F64,
+ S390_BFP_I64_TO_F128,
+ S390_BFP_F32_TO_I32,
+ S390_BFP_F32_TO_I64,
+ S390_BFP_F32_TO_F64,
+ S390_BFP_F32_TO_F128,
+ S390_BFP_F64_TO_I32,
+ S390_BFP_F64_TO_I64,
+ S390_BFP_F64_TO_F32,
+ S390_BFP_F64_TO_F128,
+ S390_BFP_F128_TO_I32,
+ S390_BFP_F128_TO_I64,
+ S390_BFP_F128_TO_F32,
+ S390_BFP_F128_TO_F64
+} s390_bfp_unop_t;
+
+
+/* Condition code. The encoding of the enumerators matches the value of
+ the mask field in the various branch opcodes. */
+typedef enum {
+ S390_CC_NEVER= 0,
+ S390_CC_OVFL = 1, /* overflow */
+ S390_CC_H = 2, /* A > B ; high */
+ S390_CC_NLE = 3, /* not low or equal */
+ S390_CC_L = 4, /* A < B ; low */
+ S390_CC_NHE = 5, /* not high or equal */
+ S390_CC_LH = 6, /* low or high */
+ S390_CC_NE = 7, /* A != B ; not zero */
+ S390_CC_E = 8, /* A == B ; zero */
+ S390_CC_NLH = 9, /* not low or high */
+ S390_CC_HE = 10, /* A >= B ; high or equal*/
+ S390_CC_NL = 11, /* not low */
+ S390_CC_LE = 12, /* A <= B ; low or equal */
+ S390_CC_NH = 13, /* not high */
+ S390_CC_NO = 14, /* not overflow */
+ S390_CC_ALWAYS = 15
+} s390_cc_t;
+
+
+/* Rounding mode as it is encoded in the m3/m4 fields of certain
+ instructions (e.g. CFEBR) */
+typedef enum {
+ S390_ROUND_CURRENT = 0,
+ S390_ROUND_NEAREST_AWAY = 1,
+ S390_ROUND_NEAREST_EVEN = 4,
+ S390_ROUND_ZERO = 5,
+ S390_ROUND_POSINF = 6,
+ S390_ROUND_NEGINF = 7
+} s390_round_t;
+
+
+/* Invert the condition code */
+static __inline__ s390_cc_t
+s390_cc_invert(s390_cc_t cond)
+{
+ return S390_CC_ALWAYS - cond;
+}
+
+
+typedef struct s390_insn {
+ s390_insn_tag tag;
+ UChar size; /* size of the result in bytes */
+ union {
+ struct {
+ HReg dst;
+ s390_amode *src;
+ } load;
+ struct {
+ s390_amode *dst;
+ HReg src;
+ } store;
+ struct {
+ HReg dst;
+ HReg src;
+ } move;
+ struct {
+ s390_cc_t cond;
+ HReg dst;
+ s390_opnd_RMI src;
+ } cond_move;
+ struct {
+ HReg dst;
+ ULong value; /* not sign extended */
+ } load_immediate;
+ /* add, and, or, xor */
+ struct {
+ s390_alu_t tag;
+ HReg dst; /* op1 */
+ s390_opnd_RMI op2;
+ } alu;
+ struct {
+ Bool signed_multiply;
+ HReg dst_hi; /* r10 */
+ HReg dst_lo; /* also op1 r11 */
+ s390_opnd_RMI op2;
+ } mul;
+ struct {
+ Bool signed_divide;
+ HReg op1_hi; /* also remainder r10 */
+ HReg op1_lo; /* also quotient r11 */
+ s390_opnd_RMI op2;
+ } div;
+ struct {
+ HReg rem; /* remainder r10 */
+ HReg op1; /* also quotient r11 */
+ s390_opnd_RMI op2;
+ } divs;
+ struct {
+ HReg bitpos; /* position of leftmost '1' bit r10 */
+ HReg modval; /* modified input value r11 */
+ s390_opnd_RMI src;
+ } flogr;
+ struct {
+ s390_unop_t tag;
+ HReg dst;
+ s390_opnd_RMI src;
+ } unop;
+ struct {
+ Bool signed_comparison;
+ HReg src1;
+ s390_opnd_RMI src2;
+ } compare;
+ struct {
+ HReg dst; /* condition code in s390 encoding */
+ HReg op1;
+ HReg op2;
+ } bfp_compare;
+ struct {
+ s390_opnd_RMI src;
+ } test;
+ /* Convert the condition code to a boolean value. */
+ struct {
+ s390_cc_t cond;
+ HReg dst;
+ } cc2bool;
+ struct {
+ HReg op1;
+ s390_amode *op2;
+ HReg op3;
+ HReg old_mem;
+ } cas;
+ struct {
+ IRJumpKind kind;
+ s390_cc_t cond;
+ s390_opnd_RMI dst;
+ } branch;
+ /* Pseudo-insn for representing a helper call.
+ TARGET is the absolute address of the helper function
+ NUM_ARGS says how many arguments are being passed.
+ All arguments have integer type and are being passed according to ABI,
+ i.e. in registers r2, r3, r4, r5, and r6, with argument #0 being
+ passed in r2 and so forth. */
+ struct {
+ s390_cc_t cond;
+ Addr64 target;
+ UInt num_args;
+ HChar *name; /* callee's name (for debugging) */
+ } helper_call;
+ struct {
+ s390_bfp_triop_t tag;
+ s390_round_t rounding_mode;
+ HReg dst; /* first operand */
+ HReg op2; /* second operand */
+ HReg op3; /* third operand */
+ } bfp_triop;
+ struct {
+ s390_bfp_binop_t tag;
+ s390_round_t rounding_mode;
+ HReg dst; /* left operand */
+ HReg op2; /* right operand */
+ } bfp_binop;
+ struct {
+ s390_bfp_unop_t tag;
+ s390_round_t rounding_mode;
+ HReg dst; /* result */
+ HReg op; /* operand */
+ } bfp_unop;
+ struct {
+ s390_bfp_binop_t tag;
+ s390_round_t rounding_mode;
+ HReg dst_hi; /* left operand; high part */
+ HReg dst_lo; /* left operand; low part */
+ HReg op2_hi; /* right operand; high part */
+ HReg op2_lo; /* right operand; low part */
+ } bfp128_binop;
+ /* This variant is also used by the BFP128_CONVERT_TO and
+ BFP128_CONVERT_FROM insns. */
+ struct {
+ s390_bfp_unop_t tag;
+ s390_round_t rounding_mode;
+ HReg dst_hi; /* result; high part */
+ HReg dst_lo; /* result; low part */
+ HReg op_hi; /* operand; high part */
+ HReg op_lo; /* operand; low part */
+ } bfp128_unop;
+ struct {
+ HReg dst; /* condition code in s390 encoding */
+ HReg op1_hi; /* left operand; high part */
+ HReg op1_lo; /* left operand; low part */
+ HReg op2_hi; /* right operand; high part */
+ HReg op2_lo; /* right operand; low part */
+ } bfp128_compare;
+ } variant;
+} s390_insn;
+
+s390_insn *s390_insn_load(UChar size, HReg dst, s390_amode *src);
+s390_insn *s390_insn_store(UChar size, s390_amode *dst, HReg src);
+s390_insn *s390_insn_move(UChar size, HReg dst, HReg src);
+s390_insn *s390_insn_cond_move(UChar size, s390_cc_t cond, HReg dst,
+ s390_opnd_RMI src);
+s390_insn *s390_insn_load_immediate(UChar size, HReg dst, ULong val);
+s390_insn *s390_insn_alu(UChar size, s390_alu_t, HReg dst,
+ s390_opnd_RMI op2);
+s390_insn *s390_insn_mul(UChar size, HReg dst_hi, HReg dst_lo, s390_opnd_RMI op2,
+ Bool signed_multiply);
+s390_insn *s390_insn_div(UChar size, HReg op1_hi, HReg op1_lo, s390_opnd_RMI op2,
+ Bool signed_divide);
+s390_insn *s390_insn_divs(UChar size, HReg rem, HReg op1, s390_opnd_RMI op2);
+s390_insn *s390_insn_flogr(UChar size, HReg bitpos, HReg modval, s390_opnd_RMI op);
+s390_insn *s390_insn_cas(UChar size, HReg op1, s390_amode *op2, HReg op3,
+ HReg old);
+s390_insn *s390_insn_unop(UChar size, s390_unop_t tag, HReg dst,
+ s390_opnd_RMI opnd);
+s390_insn *s390_insn_cc2bool(HReg dst, s390_cc_t src);
+s390_insn *s390_insn_test(UChar size, s390_opnd_RMI src);
+s390_insn *s390_insn_compare(UChar size, HReg dst, s390_opnd_RMI opnd,
+ Bool signed_comparison);
+s390_insn *s390_insn_branch(IRJumpKind jk, s390_cc_t cond, s390_opnd_RMI dst);
+s390_insn *s390_insn_helper_call(s390_cc_t cond, Addr64 target, UInt num_args,
+ HChar *name);
+s390_insn *s390_insn_bfp_triop(UChar size, s390_bfp_triop_t, HReg dst, HReg op2,
+ HReg op3, s390_round_t);
+s390_insn *s390_insn_bfp_binop(UChar size, s390_bfp_binop_t, HReg dst, HReg op2,
+ s390_round_t);
+s390_insn *s390_insn_bfp_unop(UChar size, s390_bfp_unop_t tag, HReg dst,
+ HReg op, s390_round_t);
+s390_insn *s390_insn_bfp_compare(UChar size, HReg dst, HReg op1, HReg op2);
+s390_insn *s390_insn_bfp128_binop(UChar size, s390_bfp_binop_t, HReg dst_hi,
+ HReg dst_lo, HReg op2_hi, HReg op2_lo,
+ s390_round_t);
+s390_insn *s390_insn_bfp128_unop(UChar size, s390_bfp_binop_t, HReg dst_hi,
+ HReg dst_lo, HReg op_hi, HReg op_lo,
+ s390_round_t);
+s390_insn *s390_insn_bfp128_compare(UChar size, HReg dst, HReg op1_hi,
+ HReg op1_lo, HReg op2_hi, HReg op2_lo);
+s390_insn *s390_insn_bfp128_convert_to(UChar size, s390_bfp_unop_t,
+ HReg dst_hi, HReg dst_lo, HReg op);
+s390_insn *s390_insn_bfp128_convert_from(UChar size, s390_bfp_unop_t,
+ HReg dst, HReg op_hi, HReg op_lo,
+ s390_round_t);
+void s390_insn_map_regs(HRegRemap *, s390_insn *);
+Bool s390_insn_is_reg_reg_move(const s390_insn *, HReg *, HReg *);
+void s390_insn_get_reg_usage(HRegUsage *u, const s390_insn *);
+UInt s390_insn_emit(UChar *buf, Int nbuf, const struct s390_insn *insn,
+ void *dispatch);
+
+const HChar *s390_insn_as_string(const s390_insn *);
+
+/* KLUDGE: See detailled comment in host_s390_defs.c. */
+extern const VexArchInfo *s390_archinfo_host;
+
+/* Convenience macros to test installed facilities */
+#define s390_host_has_eimm \
+ (s390_archinfo_host->hwcaps & (VEX_HWCAPS_S390X_EIMM))
+#define s390_host_has_gie \
+ (s390_archinfo_host->hwcaps & (VEX_HWCAPS_S390X_GIE))
+#define s390_host_has_dfp \
+ (s390_archinfo_host->hwcaps & (VEX_HWCAPS_S390X_DFP))
+
+#endif /* ndef __VEX_HOST_S390_INSN_H */
+
+/*---------------------------------------------------------------*/
+/*--- end host_s390_insn.h ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/host_s390_isel.c
+++ valgrind/VEX/priv/host_s390_isel.c
@@ -0,0 +1,2480 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin host_s390_isel.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+#include "libvex_s390x.h"
+
+#include "ir_match.h"
+#include "main_util.h"
+#include "main_globals.h"
+#include "host_generic_regs.h"
+#include "host_s390_insn.h"
+#include "host_s390_amode.h"
+#include "host_s390_hreg.h"
+#include "host_s390_isel.h"
+
+
+/*---------------------------------------------------------*/
+/*--- ISelEnv ---*/
+/*---------------------------------------------------------*/
+
+/* This carries around:
+
+ - A mapping from IRTemp to IRType, giving the type of any IRTemp we
+ might encounter. This is computed before insn selection starts,
+ and does not change.
+
+ - A mapping from IRTemp to HReg. This tells the insn selector
+ which virtual register(s) are associated with each IRTemp
+ temporary. This is computed before insn selection starts, and
+ does not change. We expect this mapping to map precisely the
+ same set of IRTemps as the type mapping does.
+
+ - vregmap holds the primary register for the IRTemp.
+ - vregmapHI holds the secondary register for the IRTemp,
+ if any is needed. That's only for Ity_I64 temps
+ in 32 bit mode or Ity_I128 temps in 64-bit mode.
+
+ - The name of the vreg in which we stash a copy of the link reg,
+ so helper functions don't kill it.
+
+ - The code array, that is, the insns selected so far.
+
+ - A counter, for generating new virtual registers.
+
+ - The host subarchitecture we are selecting insns for.
+ This is set at the start and does not change.
+
+ - A Bool to tell us if the host is 32 or 64bit.
+ This is set at the start and does not change.
+*/
+
+typedef struct {
+ IRTypeEnv *type_env;
+
+ HReg *vregmap;
+ HReg *vregmapHI;
+ UInt n_vregmap;
+
+ HReg savedLR;
+
+ HInstrArray *code;
+
+ UInt vreg_ctr;
+
+ UInt hwcaps;
+
+ Bool mode64;
+} ISelEnv;
+
+
+/* Forward declarations */
+static HReg s390_isel_int_expr(ISelEnv *, IRExpr *);
+static s390_amode *s390_isel_amode(ISelEnv *, IRExpr *);
+static s390_cc_t s390_isel_cc(ISelEnv *, IRExpr *);
+static s390_opnd_RMI s390_isel_int_expr_RMI(ISelEnv *, IRExpr *);
+static void s390_isel_int128_expr(HReg *, HReg *, ISelEnv *, IRExpr *);
+static HReg s390_isel_float_expr(ISelEnv *, IRExpr *);
+static void s390_isel_float128_expr(HReg *, HReg *, ISelEnv *, IRExpr *);
+
+
+/* Add an instruction */
+static void
+addInstr(ISelEnv *env, s390_insn *insn)
+{
+ addHInstr(env->code, insn);
+
+ if (vex_traceflags & VEX_TRACE_VCODE) {
+ vex_printf("%s\n", s390_insn_as_string(insn));
+ }
+}
+
+
+static __inline__ IRExpr *
+mkU64(ULong value)
+{
+ return IRExpr_Const(IRConst_U64(value));
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Registers ---*/
+/*---------------------------------------------------------*/
+
+/* Return the virtual register to which a given IRTemp is mapped. */
+static HReg
+lookupIRTemp(ISelEnv *env, IRTemp tmp)
+{
+ vassert(tmp < env->n_vregmap);
+ vassert(env->vregmap[tmp] != INVALID_HREG);
+
+ return env->vregmap[tmp];
+}
+
+
+/* Return the two virtual registers to which the IRTemp is mapped. */
+static void
+lookupIRTemp128(HReg *hi, HReg *lo, ISelEnv *env, IRTemp tmp)
+{
+ vassert(tmp < env->n_vregmap);
+ vassert(env->vregmapHI[tmp] != INVALID_HREG);
+
+ *lo = env->vregmap[tmp];
+ *hi = env->vregmapHI[tmp];
+}
+
+
+/* Allocate a new integer register */
+static HReg
+newVRegI(ISelEnv *env)
+{
+ HReg reg = mkHReg(env->vreg_ctr, HRcInt64, True /* virtual */ );
+ env->vreg_ctr++;
+
+ return reg;
+}
+
+
+/* Allocate a new floating point register */
+static HReg
+newVRegF(ISelEnv *env)
+{
+ HReg reg = mkHReg(env->vreg_ctr, HRcFlt64, True /* virtual */ );
+
+ env->vreg_ctr++;
+
+ return reg;
+}
+
+
+/* Construct a non-virtual general purpose register */
+static __inline__ HReg
+make_gpr(ISelEnv *env, UInt regno)
+{
+ return mkHReg(regno, HRcInt64, False /* virtual */ );
+}
+
+
+/* Construct a non-virtual floating point register */
+static __inline__ HReg
+make_fpr(UInt regno)
+{
+ return mkHReg(regno, HRcFlt64, False /* virtual */ );
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Amode ---*/
+/*---------------------------------------------------------*/
+
+static __inline__ Bool
+ulong_fits_unsigned_12bit(ULong val)
+{
+ return (val & 0xFFFu) == val;
+}
+
+
+static __inline__ Bool
+uint_fits_unsigned_12bit(UInt val)
+{
+ return (val & 0xFFFu) == val;
+}
+
+
+static __inline__ Bool
+ulong_fits_signed_20bit(ULong val)
+{
+ Long v = val & 0xFFFFFu;
+
+ /* sign extend using arithmetic shift right .. */
+ v = (v << 44) >> 44;
+
+ return val == (ULong)v;
+}
+
+
+static __inline__ Bool
+uint_fits_signed_20bit(UInt val)
+{
+ Int v = val & 0xFFFFFu;
+
+ /* sign extend using arithmetic shift right .. */
+ v = (v << 12) >> 12;
+
+ return val == (UInt)v;
+}
+
+
+/* EXPR is an expression that is used as an address. Return an s390_amode
+ for it. */
+static s390_amode *
+s390_isel_amode_wrk(ISelEnv *env, IRExpr *expr)
+{
+ if (expr->tag == Iex_Binop && expr->Iex.Binop.op == Iop_Add64) {
+ IRExpr *arg1 = expr->Iex.Binop.arg1;
+ IRExpr *arg2 = expr->Iex.Binop.arg2;
+
+ /* Move constant into right subtree */
+ if (arg1->tag == Iex_Const) {
+ IRExpr *tmp;
+ tmp = arg1;
+ arg1 = arg2;
+ arg2 = tmp;
+ }
+
+ /* r + constant: Check for b12 first, then b20 */
+ if (arg2->tag == Iex_Const && arg2->Iex.Const.con->tag == Ico_U64) {
+ ULong value = arg2->Iex.Const.con->Ico.U64;
+
+ if (ulong_fits_unsigned_12bit(value)) {
+ return s390_amode_b12((Int)value, s390_isel_int_expr(env, arg1));
+ }
+ if (ulong_fits_signed_20bit(value)) {
+ return s390_amode_b20((Int)value, s390_isel_int_expr(env, arg1));
+ }
+ }
+ }
+
+ /* Doesn't match anything in particular. Generate it into
+ a register and use that. */
+ return s390_amode_b12(0, s390_isel_int_expr(env, expr));
+}
+
+
+static s390_amode *
+s390_isel_amode(ISelEnv *env, IRExpr *expr)
+{
+ s390_amode *am = s390_isel_amode_wrk(env, expr);
+
+ /* Address computation should yield a 64-bit value */
+ vassert(typeOfIRExpr(env->type_env, expr) == Ity_I64);
+
+ am = s390_isel_amode_wrk(env, expr);
+
+ /* Check post-condition */
+ vassert(s390_amode_is_sane(am));
+
+ return am;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Helper functions ---*/
+/*---------------------------------------------------------*/
+
+/* Constants and memory accesses should be right operands */
+#define order_commutative_operands(left, right) \
+ do { \
+ if (left->tag == Iex_Const || left->tag == Iex_Load || \
+ left->tag == Iex_Get) { \
+ IRExpr *tmp; \
+ tmp = left; \
+ left = right; \
+ right = tmp; \
+ } \
+ } while (0)
+
+
+/* Copy an RMI operand to the DST register */
+static s390_insn *
+s390_opnd_copy(UChar size, HReg dst, s390_opnd_RMI opnd)
+{
+ switch (opnd.tag) {
+ case S390_OPND_AMODE:
+ return s390_insn_load(size, dst, opnd.variant.am);
+
+ case S390_OPND_REG:
+ return s390_insn_move(size, dst, opnd.variant.reg);
+
+ case S390_OPND_IMMEDIATE:
+ return s390_insn_load_immediate(size, dst, opnd.variant.imm);
+
+ default:
+ vpanic("s390_opnd_copy");
+ }
+}
+
+
+/* Construct a RMI operand for a register */
+static __inline__ s390_opnd_RMI
+s390_opnd_reg(HReg reg)
+{
+ s390_opnd_RMI opnd;
+
+ opnd.tag = S390_OPND_REG;
+ opnd.variant.reg = reg;
+
+ return opnd;
+}
+
+
+/* Construct a RMI operand for an immediate constant */
+static __inline__ s390_opnd_RMI
+s390_opnd_imm(ULong value)
+{
+ s390_opnd_RMI opnd;
+
+ opnd.tag = S390_OPND_IMMEDIATE;
+ opnd.variant.imm = value;
+
+ return opnd;
+}
+
+
+/* Return 1, if EXPR represents the cosntant 0 */
+static int
+s390_expr_is_const_zero(IRExpr *expr)
+{
+ ULong value;
+
+ if (expr->tag == Iex_Const) {
+ switch (expr->Iex.Const.con->tag) {
+ case Ico_U1: value = expr->Iex.Const.con->Ico.U1; break;
+ case Ico_U8: value = expr->Iex.Const.con->Ico.U8; break;
+ case Ico_U16: value = expr->Iex.Const.con->Ico.U16; break;
+ case Ico_U32: value = expr->Iex.Const.con->Ico.U32; break;
+ case Ico_U64: value = expr->Iex.Const.con->Ico.U64; break;
+ default:
+ vpanic("s390_expr_is_const_zero");
+ }
+ return value == 0;
+ }
+
+ return 0;
+}
+
+
+/* Call a helper (clean or dirty)
+ Arguments must satisfy the following conditions:
+ (a) they are expressions yielding an integer result
+ (b) there can be no more than S390_NUM_GPRPARMS arguments
+ guard is a Ity_Bit expression indicating whether or not the
+ call happens. If guard==NULL, the call is unconditional.
+*/
+static void
+doHelperCall(ISelEnv *env, Bool passBBP, IRExpr *guard,
+ IRCallee *callee, IRExpr **args)
+{
+ UInt n_args, i, argreg, size;
+ ULong target;
+ HReg tmpregs[S390_NUM_GPRPARMS];
+ s390_cc_t cc;
+
+ vassert(env->mode64);
+
+ n_args = 0;
+ for (i = 0; args[i]; i++)
+ ++n_args;
+
+ if (n_args > (S390_NUM_GPRPARMS - (passBBP ? 1 : 0))) {
+ vpanic("doHelperCall: too many arguments");
+ }
+
+ /* This is the "slow scheme". fixs390: implement the fast one */
+ argreg = 0;
+
+ /* If we need the guest state pointer put it in a temporary arg reg */
+ if (passBBP) {
+ tmpregs[argreg] = newVRegI(env);
+ addInstr(env, s390_insn_move(sizeof(ULong), tmpregs[argreg],
+ s390_hreg_guest_state_pointer()));
+ argreg++;
+ }
+
+ /* Compute the function arguments into a temporary register each */
+ for (i = 0; i < n_args; i++) {
+ tmpregs[argreg] = s390_isel_int_expr(env, args[i]);
+ argreg++;
+ }
+
+ /* Compute the condition */
+ cc = S390_CC_ALWAYS;
+ if (guard) {
+ if (guard->tag == Iex_Const
+ && guard->Iex.Const.con->tag == Ico_U1
+ && guard->Iex.Const.con->Ico.U1 == True) {
+ /* unconditional -- do nothing */
+ } else {
+ cc = s390_isel_cc(env, guard);
+ }
+ }
+
+ /* Move the args to the final register */
+ for (i = 0; i < argreg; i++) {
+ HReg finalreg;
+
+ finalreg = mkHReg(s390_gprno_from_arg_index(i), HRcInt64, False);
+ size = sizeofIRType(Ity_I64);
+ addInstr(env, s390_insn_move(size, finalreg, tmpregs[i]));
+ }
+
+ target = Ptr_to_ULong(callee->addr);
+
+ /* Finally, the call itself. */
+ addInstr(env, s390_insn_helper_call(cc, (Addr64)target, n_args,
+ callee->name));
+}
+
+
+/* Given an expression representing a rounding mode using IRRoundingMode
+ encoding convert it to an s390_round_t value. */
+static s390_round_t
+decode_rounding_mode(IRExpr *rounding_expr)
+{
+ if (rounding_expr->tag == Iex_Const &&
+ rounding_expr->Iex.Const.con->tag == Ico_U32) {
+ IRRoundingMode mode = rounding_expr->Iex.Const.con->Ico.U32;
+
+ switch (mode) {
+ case Irrm_CURRENT: return S390_ROUND_CURRENT;
+ case Irrm_NEAREST_AWAY: return S390_ROUND_NEAREST_AWAY;
+ case Irrm_NEAREST: return S390_ROUND_NEAREST_EVEN;
+ case Irrm_ZERO: return S390_ROUND_ZERO;
+ case Irrm_PosINF: return S390_ROUND_POSINF;
+ case Irrm_NegINF: return S390_ROUND_NEGINF;
+ }
+ }
+
+ vpanic("decode_rounding_mode");
+}
+
+
+/* CC_S390 holds the condition code in s390 encoding. Convert it to
+ VEX encoding
+
+ s390 VEX b6 b2 b0 cc.1 cc.0
+ 0 0x40 EQ 1 0 0 0 0
+ 1 0x01 LT 0 0 1 0 1
+ 2 0x00 GT 0 0 0 1 0
+ 3 0x45 Unordered 1 1 1 1 1
+
+ b0 = cc.0
+ b2 = cc.0 & cc.1
+ b6 = ~(cc.0 ^ cc.1) // ((cc.0 - cc.1) + 0x1 ) & 0x1
+
+ VEX = b0 | (b2 << 2) | (b6 << 6);
+*/
+static HReg
+convert_s390_fpcc_to_vex(ISelEnv *env, HReg cc_s390)
+{
+ HReg cc0, cc1, b2, b6, cc_vex;
+
+ cc0 = newVRegI(env);
+ addInstr(env, s390_insn_move(4, cc0, cc_s390));
+ addInstr(env, s390_insn_alu(4, S390_ALU_AND, cc0, s390_opnd_imm(1)));
+
+ cc1 = newVRegI(env);
+ addInstr(env, s390_insn_move(4, cc1, cc_s390));
+ addInstr(env, s390_insn_alu(4, S390_ALU_RSH, cc1, s390_opnd_imm(1)));
+
+ b2 = newVRegI(env);
+ addInstr(env, s390_insn_move(4, b2, cc0));
+ addInstr(env, s390_insn_alu(4, S390_ALU_AND, b2, s390_opnd_reg(cc1)));
+ addInstr(env, s390_insn_alu(4, S390_ALU_LSH, b2, s390_opnd_imm(2)));
+
+ b6 = newVRegI(env);
+ addInstr(env, s390_insn_move(4, b6, cc0));
+ addInstr(env, s390_insn_alu(4, S390_ALU_SUB, b6, s390_opnd_reg(cc1)));
+ addInstr(env, s390_insn_alu(4, S390_ALU_ADD, b6, s390_opnd_imm(1)));
+ addInstr(env, s390_insn_alu(4, S390_ALU_AND, b6, s390_opnd_imm(1)));
+ addInstr(env, s390_insn_alu(4, S390_ALU_LSH, b6, s390_opnd_imm(6)));
+
+ cc_vex = newVRegI(env);
+ addInstr(env, s390_insn_move(4, cc_vex, cc0));
+ addInstr(env, s390_insn_alu(4, S390_ALU_OR, cc_vex, s390_opnd_reg(b2)));
+ addInstr(env, s390_insn_alu(4, S390_ALU_OR, cc_vex, s390_opnd_reg(b6)));
+
+ return cc_vex;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (128 bit) ---*/
+/*---------------------------------------------------------*/
+static void
+s390_isel_int128_expr_wrk(HReg *dst_hi, HReg *dst_lo, ISelEnv *env,
+ IRExpr *expr)
+{
+ IRType ty = typeOfIRExpr(env->type_env, expr);
+
+ vassert(ty == Ity_I128);
+
+ /* No need to consider the following
+ - 128-bit constants (they do not exist in VEX)
+ - 128-bit loads from memory (will not be generated)
+ */
+
+ /* Read 128-bit IRTemp */
+ if (expr->tag == Iex_RdTmp) {
+ lookupIRTemp128(dst_hi, dst_lo, env, expr->Iex.RdTmp.tmp);
+ return;
+ }
+
+ if (expr->tag == Iex_Binop) {
+ IRExpr *arg1 = expr->Iex.Binop.arg1;
+ IRExpr *arg2 = expr->Iex.Binop.arg2;
+ Bool is_signed_multiply, is_signed_divide;
+
+ switch (expr->Iex.Binop.op) {
+ case Iop_MullU64:
+ is_signed_multiply = False;
+ goto do_multiply64;
+
+ case Iop_MullS64:
+ is_signed_multiply = True;
+ goto do_multiply64;
+
+ case Iop_DivModU128to64:
+ is_signed_divide = False;
+ goto do_divide64;
+
+ case Iop_DivModS128to64:
+ is_signed_divide = True;
+ goto do_divide64;
+
+ case Iop_64HLto128:
+ *dst_hi = s390_isel_int_expr(env, arg1);
+ *dst_lo = s390_isel_int_expr(env, arg2);
+ return;
+
+ case Iop_DivModS64to64: {
+ HReg r10, r11, h1;
+ s390_opnd_RMI op2;
+
+ h1 = s390_isel_int_expr(env, arg1); /* Process 1st operand */
+ op2 = s390_isel_int_expr_RMI(env, arg2); /* Process 2nd operand */
+
+ /* We use non-virtual registers r10 and r11 as pair */
+ r10 = make_gpr(env, 10);
+ r11 = make_gpr(env, 11);
+
+ /* Move 1st operand into r11 and */
+ addInstr(env, s390_insn_move(8, r11, h1));
+
+ /* Divide */
+ addInstr(env, s390_insn_divs(8, r10, r11, op2));
+
+ /* The result is in registers r10 (remainder) and r11 (quotient).
+ Move the result into the reg pair that is being returned such
+ such that the low 64 bits are the quotient and the upper 64 bits
+ are the remainder. (see libvex_ir.h). */
+ *dst_hi = newVRegI(env);
+ *dst_lo = newVRegI(env);
+ addInstr(env, s390_insn_move(8, *dst_hi, r10));
+ addInstr(env, s390_insn_move(8, *dst_lo, r11));
+ return;
+ }
+
+ default:
+ break;
+
+ do_multiply64: {
+ HReg r10, r11, h1;
+ s390_opnd_RMI op2;
+
+ order_commutative_operands(arg1, arg2);
+
+ h1 = s390_isel_int_expr(env, arg1); /* Process 1st operand */
+ op2 = s390_isel_int_expr_RMI(env, arg2); /* Process 2nd operand */
+
+ /* We use non-virtual registers r10 and r11 as pair */
+ r10 = make_gpr(env, 10);
+ r11 = make_gpr(env, 11);
+
+ /* Move the first operand to r11 */
+ addInstr(env, s390_insn_move(8, r11, h1));
+
+ /* Multiply */
+ addInstr(env, s390_insn_mul(8, r10, r11, op2, is_signed_multiply));
+
+ /* The result is in registers r10 and r11. Assign to two virtual regs
+ and return. */
+ *dst_hi = newVRegI(env);
+ *dst_lo = newVRegI(env);
+ addInstr(env, s390_insn_move(8, *dst_hi, r10));
+ addInstr(env, s390_insn_move(8, *dst_lo, r11));
+ return;
+ }
+
+ do_divide64: {
+ HReg r10, r11, hi, lo;
+ s390_opnd_RMI op2;
+
+ s390_isel_int128_expr(&hi, &lo, env, arg1);
+ op2 = s390_isel_int_expr_RMI(env, arg2); /* Process 2nd operand */
+
+ /* We use non-virtual registers r10 and r11 as pair */
+ r10 = make_gpr(env, 10);
+ r11 = make_gpr(env, 11);
+
+ /* Move high 64 bits of the 1st operand into r10 and
+ the low 64 bits into r11. */
+ addInstr(env, s390_insn_move(8, r10, hi));
+ addInstr(env, s390_insn_move(8, r11, lo));
+
+ /* Divide */
+ addInstr(env, s390_insn_div(8, r10, r11, op2, is_signed_divide));
+
+ /* The result is in registers r10 (remainder) and r11 (quotient).
+ Move the result into the reg pair that is being returned such
+ such that the low 64 bits are the quotient and the upper 64 bits
+ are the remainder. (see libvex_ir.h). */
+ *dst_hi = newVRegI(env);
+ *dst_lo = newVRegI(env);
+ addInstr(env, s390_insn_move(8, *dst_hi, r10));
+ addInstr(env, s390_insn_move(8, *dst_lo, r11));
+ return;
+ }
+ }
+ }
+
+ vpanic("s390_isel_int128_expr");
+}
+
+
+/* Compute a 128-bit value into two 64-bit registers. These may be either
+ real or virtual regs; in any case they must not be changed by subsequent
+ code emitted by the caller. */
+static void
+s390_isel_int128_expr(HReg *dst_hi, HReg *dst_lo, ISelEnv *env, IRExpr *expr)
+{
+ s390_isel_int128_expr_wrk(dst_hi, dst_lo, env, expr);
+
+ /* Sanity checks ... */
+ vassert(hregIsVirtual(*dst_hi));
+ vassert(hregIsVirtual(*dst_lo));
+ vassert(hregClass(*dst_hi) == HRcInt64);
+ vassert(hregClass(*dst_lo) == HRcInt64);
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (64/32/16/8 bit) ---*/
+/*---------------------------------------------------------*/
+
+/* Select insns for an integer-typed expression, and add them to the
+ code list. Return a reg holding the result. This reg will be a
+ virtual register. THE RETURNED REG MUST NOT BE MODIFIED. If you
+ want to modify it, ask for a new vreg, copy it in there, and modify
+ the copy. The register allocator will do its best to map both
+ vregs to the same real register, so the copies will often disappear
+ later in the game.
+
+ This should handle expressions of 64, 32, 16 and 8-bit type.
+ All results are returned in a 64bit register.
+ For 16- and 8-bit expressions, the upper (32/48/56 : 16/24) bits
+ are arbitrary, so you should mask or sign extend partial values
+ if necessary.
+*/
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static HReg
+s390_isel_int_expr_wrk(ISelEnv *env, IRExpr *expr)
+{
+ IRType ty = typeOfIRExpr(env->type_env, expr);
+ UChar size;
+ s390_bfp_unop_t bfpop;
+
+ vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 || ty == Ity_I64);
+
+ size = sizeofIRType(ty); /* size of the result after evaluating EXPR */
+
+ switch (expr->tag) {
+
+ /* --------- TEMP --------- */
+ case Iex_RdTmp:
+ /* Return the virtual register that holds the temporary. */
+ return lookupIRTemp(env, expr->Iex.RdTmp.tmp);
+
+ /* --------- LOAD --------- */
+ case Iex_Load: {
+ HReg dst = newVRegI(env);
+ s390_amode *am = s390_isel_amode(env, expr->Iex.Load.addr);
+
+ if (expr->Iex.Load.end != Iend_BE)
+ goto irreducible;
+
+ addInstr(env, s390_insn_load(size, dst, am));
+
+ return dst;
+ }
+
+ /* --------- BINARY OP --------- */
+ case Iex_Binop: {
+ IRExpr *arg1 = expr->Iex.Binop.arg1;
+ IRExpr *arg2 = expr->Iex.Binop.arg2;
+ HReg h1, res;
+ s390_alu_t opkind;
+ s390_opnd_RMI op2, value, opnd;
+ s390_insn *insn;
+ Bool is_commutative, is_signed_multiply, is_signed_divide;
+
+ is_commutative = True;
+
+ switch (expr->Iex.Binop.op) {
+ case Iop_MullU8:
+ case Iop_MullU16:
+ case Iop_MullU32:
+ is_signed_multiply = False;
+ goto do_multiply;
+
+ case Iop_MullS8:
+ case Iop_MullS16:
+ case Iop_MullS32:
+ is_signed_multiply = True;
+ goto do_multiply;
+
+ do_multiply: {
+ HReg r10, r11;
+ UInt arg_size = size / 2;
+
+ order_commutative_operands(arg1, arg2);
+
+ h1 = s390_isel_int_expr(env, arg1); /* Process 1st operand */
+ op2 = s390_isel_int_expr_RMI(env, arg2); /* Process 2nd operand */
+
+ /* We use non-virtual registers r10 and r11 as pair */
+ r10 = make_gpr(env, 10);
+ r11 = make_gpr(env, 11);
+
+ /* Move the first operand to r11 */
+ addInstr(env, s390_insn_move(arg_size, r11, h1));
+
+ /* Multiply */
+ addInstr(env, s390_insn_mul(arg_size, r10, r11, op2, is_signed_multiply));
+
+ /* The result is in registers r10 and r11. Combine them into a SIZE-bit
+ value into the destination register. */
+ res = newVRegI(env);
+ addInstr(env, s390_insn_move(arg_size, res, r10));
+ value = s390_opnd_imm(arg_size * 8);
+ addInstr(env, s390_insn_alu(size, S390_ALU_LSH, res, value));
+ value = s390_opnd_imm((((ULong)1) << arg_size * 8) - 1);
+ addInstr(env, s390_insn_alu(size, S390_ALU_AND, r11, value));
+ opnd = s390_opnd_reg(r11);
+ addInstr(env, s390_insn_alu(size, S390_ALU_OR, res, opnd));
+ return res;
+ }
+
+ case Iop_DivModS64to32:
+ is_signed_divide = True;
+ goto do_divide;
+
+ case Iop_DivModU64to32:
+ is_signed_divide = False;
+ goto do_divide;
+
+ do_divide: {
+ HReg r10, r11;
+
+ h1 = s390_isel_int_expr(env, arg1); /* Process 1st operand */
+ op2 = s390_isel_int_expr_RMI(env, arg2); /* Process 2nd operand */
+
+ /* We use non-virtual registers r10 and r11 as pair */
+ r10 = make_gpr(env, 10);
+ r11 = make_gpr(env, 11);
+
+ /* Split the first operand and put the high 32 bits into r10 and
+ the low 32 bits into r11. */
+ addInstr(env, s390_insn_move(8, r10, h1));
+ addInstr(env, s390_insn_move(8, r11, h1));
+ value = s390_opnd_imm(32);
+ addInstr(env, s390_insn_alu(8, S390_ALU_RSH, r10, value));
+
+ /* Divide */
+ addInstr(env, s390_insn_div(4, r10, r11, op2, is_signed_divide));
+
+ /* The result is in registers r10 (remainder) and r11 (quotient).
+ Combine them into a 64-bit value such that the low 32 bits are
+ the quotient and the upper 32 bits are the remainder. (see
+ libvex_ir.h). */
+ res = newVRegI(env);
+ addInstr(env, s390_insn_move(8, res, r10));
+ value = s390_opnd_imm(32);
+ addInstr(env, s390_insn_alu(8, S390_ALU_LSH, res, value));
+ value = s390_opnd_imm((((ULong)1) << 32) - 1);
+ addInstr(env, s390_insn_alu(8, S390_ALU_AND, r11, value));
+ opnd = s390_opnd_reg(r11);
+ addInstr(env, s390_insn_alu(8, S390_ALU_OR, res, opnd));
+ return res;
+ }
+
+ case Iop_F32toI32S: bfpop = S390_BFP_F32_TO_I32; goto do_convert;
+ case Iop_F32toI64S: bfpop = S390_BFP_F32_TO_I64; goto do_convert;
+ case Iop_F64toI32S: bfpop = S390_BFP_F64_TO_I32; goto do_convert;
+ case Iop_F64toI64S: bfpop = S390_BFP_F64_TO_I64; goto do_convert;
+ case Iop_F128toI32S: bfpop = S390_BFP_F128_TO_I32; goto do_convert_128;
+ case Iop_F128toI64S: bfpop = S390_BFP_F128_TO_I64; goto do_convert_128;
+
+ do_convert: {
+ s390_round_t rounding_mode;
+
+ res = newVRegI(env);
+ h1 = s390_isel_float_expr(env, arg2); /* Process operand */
+
+ rounding_mode = decode_rounding_mode(arg1);
+ addInstr(env, s390_insn_bfp_unop(size, bfpop, res, h1, rounding_mode));
+ return res;
+ }
+
+ do_convert_128: {
+ s390_round_t rounding_mode;
+ HReg op_hi, op_lo, f13, f15;
+
+ res = newVRegI(env);
+ s390_isel_float128_expr(&op_hi, &op_lo, env, arg2); /* operand */
+
+ /* We use non-virtual registers r13 and r15 as pair */
+ f13 = make_fpr(13);
+ f15 = make_fpr(15);
+
+ /* operand --> (f13, f15) */
+ addInstr(env, s390_insn_move(8, f13, op_hi));
+ addInstr(env, s390_insn_move(8, f15, op_lo));
+
+ rounding_mode = decode_rounding_mode(arg1);
+ addInstr(env, s390_insn_bfp128_convert_from(size, bfpop, res, f13, f15,
+ rounding_mode));
+ return res;
+ }
+
+ case Iop_8HLto16:
+ case Iop_16HLto32:
+ case Iop_32HLto64: {
+ HReg h2;
+ UInt arg_size = size / 2;
+
+ res = newVRegI(env);
+ h1 = s390_isel_int_expr(env, arg1); /* Process 1st operand */
+ h2 = s390_isel_int_expr(env, arg2); /* Process 2nd operand */
+
+ addInstr(env, s390_insn_move(arg_size, res, h1));
+ value = s390_opnd_imm(arg_size * 8);
+ addInstr(env, s390_insn_alu(size, S390_ALU_LSH, res, value));
+ value = s390_opnd_imm((((ULong)1) << arg_size * 8) - 1);
+ addInstr(env, s390_insn_alu(size, S390_ALU_AND, h2, value));
+ opnd = s390_opnd_reg(h2);
+ addInstr(env, s390_insn_alu(size, S390_ALU_OR, res, opnd));
+ return res;
+ }
+
+ case Iop_Max32U: {
+ /* arg1 > arg2 ? arg1 : arg2 using uint32_t arguments */
+ res = newVRegI(env);
+ h1 = s390_isel_int_expr(env, arg1);
+ op2 = s390_isel_int_expr_RMI(env, arg2);
+
+ addInstr(env, s390_insn_move(size, res, h1));
+ addInstr(env, s390_insn_compare(size, res, op2, False /* signed */));
+ addInstr(env, s390_insn_cond_move(size, S390_CC_L, res, op2));
+ return res;
+ }
+
+ case Iop_CmpF32:
+ case Iop_CmpF64: {
+ HReg cc_s390, h2;
+
+ h1 = s390_isel_float_expr(env, arg1);
+ h2 = s390_isel_float_expr(env, arg2);
+ cc_s390 = newVRegI(env);
+
+ size = (expr->Iex.Binop.op == Iop_CmpF32) ? 4 : 8;
+
+ addInstr(env, s390_insn_bfp_compare(size, cc_s390, h1, h2));
+
+ return convert_s390_fpcc_to_vex(env, cc_s390);
+ }
+
+ case Iop_CmpF128: {
+ HReg op1_hi, op1_lo, op2_hi, op2_lo, f12, f13, f14, f15, cc_s390;
+
+ s390_isel_float128_expr(&op1_hi, &op1_lo, env, arg1); /* 1st operand */
+ s390_isel_float128_expr(&op2_hi, &op2_lo, env, arg2); /* 2nd operand */
+ cc_s390 = newVRegI(env);
+
+ /* We use non-virtual registers as pairs (f13, f15) and (f12, f14)) */
+ f12 = make_fpr(12);
+ f13 = make_fpr(13);
+ f14 = make_fpr(14);
+ f15 = make_fpr(15);
+
+ /* 1st operand --> (f12, f14) */
+ addInstr(env, s390_insn_move(8, f12, op1_hi));
+ addInstr(env, s390_insn_move(8, f14, op1_lo));
+
+ /* 2nd operand --> (f13, f15) */
+ addInstr(env, s390_insn_move(8, f13, op2_hi));
+ addInstr(env, s390_insn_move(8, f15, op2_lo));
+
+ res = newVRegI(env);
+ addInstr(env, s390_insn_bfp128_compare(16, cc_s390, f12, f14, f13, f15));
+
+ return convert_s390_fpcc_to_vex(env, cc_s390);
+ }
+
+ case Iop_Add8:
+ case Iop_Add16:
+ case Iop_Add32:
+ case Iop_Add64:
+ opkind = S390_ALU_ADD;
+ break;
+
+ case Iop_Sub8:
+ case Iop_Sub16:
+ case Iop_Sub32:
+ case Iop_Sub64:
+ opkind = S390_ALU_SUB;
+ is_commutative = False;
+ break;
+
+ case Iop_And8:
+ case Iop_And16:
+ case Iop_And32:
+ case Iop_And64:
+ opkind = S390_ALU_AND;
+ break;
+
+ case Iop_Or8:
+ case Iop_Or16:
+ case Iop_Or32:
+ case Iop_Or64:
+ opkind = S390_ALU_OR;
+ break;
+
+ case Iop_Xor8:
+ case Iop_Xor16:
+ case Iop_Xor32:
+ case Iop_Xor64:
+ opkind = S390_ALU_XOR;
+ break;
+
+ case Iop_Shl8:
+ case Iop_Shl16:
+ case Iop_Shl32:
+ case Iop_Shl64:
+ opkind = S390_ALU_LSH;
+ is_commutative = False;
+ break;
+
+ case Iop_Shr8:
+ case Iop_Shr16:
+ case Iop_Shr32:
+ case Iop_Shr64:
+ opkind = S390_ALU_RSH;
+ is_commutative = False;
+ break;
+
+ case Iop_Sar8:
+ case Iop_Sar16:
+ case Iop_Sar32:
+ case Iop_Sar64:
+ opkind = S390_ALU_RSHA;
+ is_commutative = False;
+ break;
+
+ default:
+ goto irreducible;
+ }
+
+ /* Pattern match: 0 - arg1 --> -arg1 */
+ if (opkind == S390_ALU_SUB && s390_expr_is_const_zero(arg1)) {
+ res = newVRegI(env);
+ op2 = s390_isel_int_expr_RMI(env, arg2); /* Process 2nd operand */
+ insn = s390_insn_unop(size, S390_NEGATE, res, op2);
+ addInstr(env, insn);
+
+ return res;
+ }
+
+ if (is_commutative) {
+ order_commutative_operands(arg1, arg2);
+ }
+
+ h1 = s390_isel_int_expr(env, arg1); /* Process 1st operand */
+ op2 = s390_isel_int_expr_RMI(env, arg2); /* Process 2nd operand */
+ res = newVRegI(env);
+ addInstr(env, s390_insn_move(size, res, h1));
+ insn = s390_insn_alu(size, opkind, res, op2);
+
+ addInstr(env, insn);
+
+ return res;
+ }
+
+ /* --------- UNARY OP --------- */
+ case Iex_Unop: {
+ static s390_opnd_RMI mask = { S390_OPND_IMMEDIATE };
+ static s390_opnd_RMI shift = { S390_OPND_IMMEDIATE };
+ s390_opnd_RMI opnd;
+ s390_insn *insn;
+ IRExpr *arg;
+ HReg dst, h1;
+ IROp unop, binop;
+
+ arg = expr->Iex.Unop.arg;
+
+ /* Special cases are handled here */
+
+ /* 32-bit multiply with 32-bit result or
+ 64-bit multiply with 64-bit result */
+ unop = expr->Iex.Unop.op;
+ binop = arg->Iex.Binop.op;
+
+ if ((arg->tag == Iex_Binop &&
+ ((unop == Iop_64to32 &&
+ (binop == Iop_MullS32 || binop == Iop_MullU32)) ||
+ (unop == Iop_128to64 &&
+ (binop == Iop_MullS64 || binop == Iop_MullU64))))) {
+ h1 = s390_isel_int_expr(env, arg->Iex.Binop.arg1); /* 1st opnd */
+ opnd = s390_isel_int_expr_RMI(env, arg->Iex.Binop.arg2); /* 2nd opnd */
+ dst = newVRegI(env); /* Result goes into a new register */
+ addInstr(env, s390_insn_move(size, dst, h1));
+ addInstr(env, s390_insn_alu(size, S390_ALU_MUL, dst, opnd));
+
+ return dst;
+ }
+
+ if (unop == Iop_ReinterpF64asI64) {
+ dst = newVRegI(env);
+ h1 = s390_isel_float_expr(env, arg); /* Process the operand */
+ addInstr(env, s390_insn_move(size, dst, h1));
+
+ return dst;
+ }
+
+ /* Expressions whose argument is 1-bit wide */
+ if (typeOfIRExpr(env->type_env, arg) == Ity_I1) {
+ s390_cc_t cond = s390_isel_cc(env, arg);
+ dst = newVRegI(env); /* Result goes into a new register */
+ addInstr(env, s390_insn_cc2bool(dst, cond));
+
+ switch (unop) {
+ case Iop_1Uto8:
+ case Iop_1Uto32:
+ case Iop_1Uto64:
+ /* Nothing to do */
+ break;
+
+ case Iop_1Sto8:
+ case Iop_1Sto16:
+ case Iop_1Sto32:
+ shift.variant.imm = 31;
+ addInstr(env, s390_insn_alu(4, S390_ALU_LSH, dst, shift));
+ addInstr(env, s390_insn_alu(4, S390_ALU_RSHA, dst, shift));
+ break;
+
+ case Iop_1Sto64:
+ shift.variant.imm = 63;
+ addInstr(env, s390_insn_alu(8, S390_ALU_LSH, dst, shift));
+ addInstr(env, s390_insn_alu(8, S390_ALU_RSHA, dst, shift));
+ break;
+
+ default:
+ goto irreducible;
+ }
+
+ return dst;
+ }
+
+ /* Regular processing */
+
+ if (unop == Iop_128to64) {
+ HReg dst_hi, dst_lo;
+
+ s390_isel_int128_expr(&dst_hi, &dst_lo, env, arg);
+ return dst_lo;
+ }
+
+ if (unop == Iop_128HIto64) {
+ HReg dst_hi, dst_lo;
+
+ s390_isel_int128_expr(&dst_hi, &dst_lo, env, arg);
+ return dst_hi;
+ }
+
+ dst = newVRegI(env); /* Result goes into a new register */
+ opnd = s390_isel_int_expr_RMI(env, arg); /* Process the operand */
+
+ switch (unop) {
+ case Iop_8Uto16:
+ case Iop_8Uto32:
+ case Iop_8Uto64:
+ insn = s390_insn_unop(size, S390_ZERO_EXTEND_8, dst, opnd);
+ break;
+
+ case Iop_16Uto32:
+ case Iop_16Uto64:
+ insn = s390_insn_unop(size, S390_ZERO_EXTEND_16, dst, opnd);
+ break;
+
+ case Iop_32Uto64:
+ insn = s390_insn_unop(size, S390_ZERO_EXTEND_32, dst, opnd);
+ break;
+
+ case Iop_8Sto16:
+ case Iop_8Sto32:
+ case Iop_8Sto64:
+ insn = s390_insn_unop(size, S390_SIGN_EXTEND_8, dst, opnd);
+ break;
+
+ case Iop_16Sto32:
+ case Iop_16Sto64:
+ insn = s390_insn_unop(size, S390_SIGN_EXTEND_16, dst, opnd);
+ break;
+
+ case Iop_32Sto64:
+ insn = s390_insn_unop(size, S390_SIGN_EXTEND_32, dst, opnd);
+ break;
+
+ case Iop_64to8:
+ case Iop_64to16:
+ case Iop_64to32:
+ case Iop_32to8:
+ case Iop_32to16:
+ case Iop_16to8:
+ /* Down-casts are no-ops. Upstream operations will only look at
+ the bytes that make up the result of the down-cast. So there
+ is no point setting the other bytes to 0. */
+ insn = s390_opnd_copy(8, dst, opnd);
+ break;
+
+ case Iop_64HIto32:
+ addInstr(env, s390_opnd_copy(8, dst, opnd));
+ shift.variant.imm = 32;
+ insn = s390_insn_alu(8, S390_ALU_RSH, dst, shift);
+ break;
+
+ case Iop_32HIto16:
+ addInstr(env, s390_opnd_copy(4, dst, opnd));
+ shift.variant.imm = 16;
+ insn = s390_insn_alu(4, S390_ALU_RSH, dst, shift);
+ break;
+
+ case Iop_16HIto8:
+ addInstr(env, s390_opnd_copy(2, dst, opnd));
+ shift.variant.imm = 8;
+ insn = s390_insn_alu(2, S390_ALU_RSH, dst, shift);
+ break;
+
+ case Iop_Not8:
+ case Iop_Not16:
+ case Iop_Not32:
+ case Iop_Not64:
+ /* XOR with ffff... */
+ mask.variant.imm = ~(ULong)0;
+ addInstr(env, s390_opnd_copy(size, dst, opnd));
+ insn = s390_insn_alu(size, S390_ALU_XOR, dst, mask);
+ break;
+
+ case Iop_Left8:
+ case Iop_Left16:
+ case Iop_Left32:
+ case Iop_Left64:
+ addInstr(env, s390_insn_unop(size, S390_NEGATE, dst, opnd));
+ insn = s390_insn_alu(size, S390_ALU_OR, dst, opnd);
+ break;
+
+ case Iop_CmpwNEZ32:
+ case Iop_CmpwNEZ64: {
+ /* Use the fact that x | -x == 0 iff x == 0. Otherwise, either X
+ or -X will have a 1 in the MSB. */
+ addInstr(env, s390_insn_unop(size, S390_NEGATE, dst, opnd));
+ addInstr(env, s390_insn_alu(size, S390_ALU_OR, dst, opnd));
+ shift.variant.imm = (unop == Iop_CmpwNEZ32) ? 31 : 63;
+ addInstr(env, s390_insn_alu(size, S390_ALU_RSHA, dst, shift));
+ return dst;
+ }
+
+ case Iop_Clz64: {
+ HReg r10, r11;
+
+ /* We use non-virtual registers r10 and r11 as pair for the two
+ output values */
+ r10 = make_gpr(env, 10);
+ r11 = make_gpr(env, 11);
+
+ /* flogr */
+ addInstr(env, s390_insn_flogr(8, r10, r11, opnd));
+
+ /* The result is in registers r10 (bit position) and r11 (modified
+ input value). The value in r11 is not needed and will be
+ discarded. */
+ addInstr(env, s390_insn_move(8, dst, r10));
+ return dst;
+ }
+
+ default:
+ goto irreducible;
+ }
+
+ addInstr(env, insn);
+
+ return dst;
+ }
+
+ /* --------- GET --------- */
+ case Iex_Get: {
+ HReg dst = newVRegI(env);
+ s390_amode *am = s390_amode_b12(expr->Iex.Get.offset,
+ s390_hreg_guest_state_pointer());
+
+ /* We never load more than 8 bytes from the guest state, because the
+ floating point register pair is not contiguous. */
+ vassert(size <= 8);
+
+ addInstr(env, s390_insn_load(size, dst, am));
+
+ return dst;
+ }
+
+ case Iex_GetI:
+ /* not needed */
+ break;
+
+ /* --------- CCALL --------- */
+ case Iex_CCall: {
+ HReg dst = newVRegI(env);
+
+ doHelperCall(env, False, NULL, expr->Iex.CCall.cee,
+ expr->Iex.CCall.args);
+
+ /* Move the returned value into the return register */
+ addInstr(env, s390_insn_move(sizeofIRType(expr->Iex.CCall.retty), dst,
+ mkHReg(S390_REGNO_RETURN_VALUE,
+ HRcInt64, False)));
+ return dst;
+ }
+
+ /* --------- LITERAL --------- */
+
+ /* Load a literal into a register. Create a "load immediate"
+ v-insn and return the register. */
+ case Iex_Const: {
+ ULong value;
+ HReg dst = newVRegI(env);
+ const IRConst *con = expr->Iex.Const.con;
+
+ /* Bitwise copy of the value. No sign/zero-extension */
+ switch (con->tag) {
+ case Ico_U64: value = con->Ico.U64; break;
+ case Ico_U32: value = con->Ico.U32; break;
+ case Ico_U16: value = con->Ico.U16; break;
+ case Ico_U8: value = con->Ico.U8; break;
+ default: vpanic("s390_isel_int_expr: invalid constant");
+ }
+
+ addInstr(env, s390_insn_load_immediate(size, dst, value));
+
+ return dst;
+ }
+
+ /* --------- MULTIPLEX --------- */
+ case Iex_Mux0X: {
+ IRExpr *cond_expr;
+ HReg dst, tmp, rX;
+ s390_opnd_RMI cond, r0, zero;
+
+ cond_expr = expr->Iex.Mux0X.cond;
+
+ dst = newVRegI(env);
+ r0 = s390_isel_int_expr_RMI(env, expr->Iex.Mux0X.expr0);
+ rX = s390_isel_int_expr(env, expr->Iex.Mux0X.exprX);
+ size = sizeofIRType(typeOfIRExpr(env->type_env, expr->Iex.Mux0X.exprX));
+
+ if (cond_expr->tag == Iex_Unop && cond_expr->Iex.Unop.op == Iop_1Uto8) {
+ s390_cc_t cc = s390_isel_cc(env, cond_expr->Iex.Unop.arg);
+
+ addInstr(env, s390_insn_move(size, dst, rX));
+ addInstr(env, s390_insn_cond_move(size, s390_cc_invert(cc), dst, r0));
+ return dst;
+ }
+
+ /* Assume the condition is true and move rX to the destination reg. */
+ addInstr(env, s390_insn_move(size, dst, rX));
+
+ /* Compute the condition ... */
+ cond = s390_isel_int_expr_RMI(env, cond_expr);
+
+ /* tmp = cond & 0xFF */
+ tmp = newVRegI(env);
+ addInstr(env, s390_insn_load_immediate(4, tmp, 0xFF));
+ addInstr(env, s390_insn_alu(4, S390_ALU_AND, tmp, cond));
+
+ /* ... and compare it with zero */
+ zero = s390_opnd_imm(0);
+ addInstr(env, s390_insn_compare(4, tmp, zero, 0 /* signed */));
+
+ /* ... and if it compared equal move r0 to the destination reg. */
+ size = sizeofIRType(typeOfIRExpr(env->type_env, expr->Iex.Mux0X.expr0));
+ addInstr(env, s390_insn_cond_move(size, S390_CC_E, dst, r0));
+
+ return dst;
+ }
+
+ default:
+ break;
+ }
+
+ /* We get here if no pattern matched. */
+ irreducible:
+ ppIRExpr(expr);
+ vpanic("s390_isel_int_expr: cannot reduce tree");
+}
+
+
+static HReg
+s390_isel_int_expr(ISelEnv *env, IRExpr *expr)
+{
+ HReg dst = s390_isel_int_expr_wrk(env, expr);
+
+ /* Sanity checks ... */
+ vassert(hregClass(dst) == HRcInt64);
+ vassert(hregIsVirtual(dst));
+
+ return dst;
+}
+
+
+static s390_opnd_RMI
+s390_isel_int_expr_RMI(ISelEnv *env, IRExpr *expr)
+{
+ IRType ty = typeOfIRExpr(env->type_env, expr);
+ s390_opnd_RMI dst;
+
+ vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 ||
+ ty == Ity_I64);
+
+ if (expr->tag == Iex_Load) {
+ dst.tag = S390_OPND_AMODE;
+ dst.variant.am = s390_isel_amode(env, expr->Iex.Load.addr);
+ } else if (expr->tag == Iex_Get) {
+ dst.tag = S390_OPND_AMODE;
+ dst.variant.am = s390_amode_b12(expr->Iex.Get.offset,
+ s390_hreg_guest_state_pointer());
+ } else if (expr->tag == Iex_Const) {
+ ULong value;
+
+ /* The bit pattern for the value will be stored as is in the least
+ significant bits of VALUE. */
+ switch (expr->Iex.Const.con->tag) {
+ case Ico_U1: value = expr->Iex.Const.con->Ico.U1; break;
+ case Ico_U8: value = expr->Iex.Const.con->Ico.U8; break;
+ case Ico_U16: value = expr->Iex.Const.con->Ico.U16; break;
+ case Ico_U32: value = expr->Iex.Const.con->Ico.U32; break;
+ case Ico_U64: value = expr->Iex.Const.con->Ico.U64; break;
+ default:
+ vpanic("s390_isel_int_expr_RMI");
+ }
+
+ dst.tag = S390_OPND_IMMEDIATE;
+ dst.variant.imm = value;
+ } else {
+ dst.tag = S390_OPND_REG;
+ dst.variant.reg = s390_isel_int_expr(env, expr);
+ }
+
+ return dst;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Floating point expressions (128 bit) ---*/
+/*---------------------------------------------------------*/
+static void
+s390_isel_float128_expr_wrk(HReg *dst_hi, HReg *dst_lo, ISelEnv *env,
+ IRExpr *expr)
+{
+ IRType ty = typeOfIRExpr(env->type_env, expr);
+
+ vassert(ty == Ity_F128);
+
+ /* Read 128-bit IRTemp */
+ if (expr->tag == Iex_RdTmp) {
+ lookupIRTemp128(dst_hi, dst_lo, env, expr->Iex.RdTmp.tmp);
+ return;
+ }
+
+ switch (expr->tag) {
+ case Iex_RdTmp:
+ /* Return the virtual registers that hold the temporary. */
+ lookupIRTemp128(dst_hi, dst_lo, env, expr->Iex.RdTmp.tmp);
+ return;
+
+ /* --------- LOAD --------- */
+ case Iex_Load: {
+ IRExpr *addr_hi, *addr_lo;
+ s390_amode *am_hi, *am_lo;
+
+ if (expr->Iex.Load.end != Iend_BE)
+ goto irreducible;
+
+ addr_hi = expr->Iex.Load.addr;
+ addr_lo = IRExpr_Binop(Iop_Add64, addr_hi, mkU64(8));
+
+ am_hi = s390_isel_amode(env, addr_hi);
+ am_lo = s390_isel_amode(env, addr_lo);
+
+ *dst_hi = newVRegF(env);
+ *dst_lo = newVRegF(env);
+ addInstr(env, s390_insn_load(8, *dst_hi, am_hi));
+ addInstr(env, s390_insn_load(8, *dst_hi, am_lo));
+ return;
+ }
+
+
+ /* --------- GET --------- */
+ case Iex_Get:
+ /* This is not supported because loading 128-bit from the guest
+ state is almost certainly wrong. Use get_fpr_pair instead. */
+ vpanic("Iex_Get with F128 data");
+
+ /* --------- 4-ary OP --------- */
+ case Iex_Qop:
+ vpanic("Iex_Qop with F128 data");
+
+ /* --------- TERNARY OP --------- */
+ case Iex_Triop: {
+ IROp op = expr->Iex.Triop.op;
+ IRExpr *left = expr->Iex.Triop.arg2;
+ IRExpr *right = expr->Iex.Triop.arg3;
+ s390_bfp_binop_t bfpop;
+ s390_round_t rounding_mode;
+ HReg op1_hi, op1_lo, op2_hi, op2_lo, f12, f13, f14, f15;
+
+ s390_isel_float128_expr(&op1_hi, &op1_lo, env, left); /* 1st operand */
+ s390_isel_float128_expr(&op2_hi, &op2_lo, env, right); /* 2nd operand */
+
+ /* We use non-virtual registers as pairs (f13, f15) and (f12, f14)) */
+ f12 = make_fpr(12);
+ f13 = make_fpr(13);
+ f14 = make_fpr(14);
+ f15 = make_fpr(15);
+
+ /* 1st operand --> (f12, f14) */
+ addInstr(env, s390_insn_move(8, f12, op1_hi));
+ addInstr(env, s390_insn_move(8, f14, op1_lo));
+
+ /* 2nd operand --> (f13, f15) */
+ addInstr(env, s390_insn_move(8, f13, op2_hi));
+ addInstr(env, s390_insn_move(8, f15, op2_lo));
+
+ switch (op) {
+ case Iop_AddF128: bfpop = S390_BFP_ADD; break;
+ case Iop_SubF128: bfpop = S390_BFP_SUB; break;
+ case Iop_MulF128: bfpop = S390_BFP_MUL; break;
+ case Iop_DivF128: bfpop = S390_BFP_DIV; break;
+ default:
+ goto irreducible;
+ }
+
+ rounding_mode = decode_rounding_mode(expr->Iex.Triop.arg1);
+ addInstr(env, s390_insn_bfp128_binop(16, bfpop, f12, f14, f13,
+ f15, rounding_mode));
+
+ /* Move result to virtual destination register */
+ *dst_hi = newVRegF(env);
+ *dst_lo = newVRegF(env);
+ addInstr(env, s390_insn_move(8, *dst_hi, f12));
+ addInstr(env, s390_insn_move(8, *dst_lo, f14));
+
+ return;
+ }
+
+ /* --------- BINARY OP --------- */
+ case Iex_Binop: {
+ HReg op_hi, op_lo, f12, f13, f14, f15;
+ s390_bfp_binop_t bfpop;
+ s390_round_t rounding_mode;
+
+ /* We use non-virtual registers as pairs (f13, f15) and (f12, f14)) */
+ f12 = make_fpr(12);
+ f13 = make_fpr(13);
+ f14 = make_fpr(14);
+ f15 = make_fpr(15);
+
+ switch (expr->Iex.Binop.op) {
+ case Iop_SqrtF128:
+ s390_isel_float128_expr(&op_hi, &op_lo, env, expr->Iex.Binop.arg2);
+
+ /* operand --> (f13, f15) */
+ addInstr(env, s390_insn_move(8, f13, op_hi));
+ addInstr(env, s390_insn_move(8, f15, op_lo));
+
+ bfpop = S390_BFP_SQRT;
+ rounding_mode = decode_rounding_mode(expr->Iex.Binop.arg1);
+
+ addInstr(env, s390_insn_bfp128_unop(16, bfpop, f12, f14, f13, f15,
+ rounding_mode));
+
+ /* Move result to virtual destination registers */
+ *dst_hi = newVRegF(env);
+ *dst_lo = newVRegF(env);
+ addInstr(env, s390_insn_move(8, *dst_hi, f12));
+ addInstr(env, s390_insn_move(8, *dst_lo, f14));
+ return;
+
+ case Iop_F64HLto128:
+ *dst_hi = s390_isel_float_expr(env, expr->Iex.Binop.arg1);
+ *dst_lo = s390_isel_float_expr(env, expr->Iex.Binop.arg2);
+ return;
+
+ default:
+ goto irreducible;
+ }
+ }
+
+ /* --------- UNARY OP --------- */
+ case Iex_Unop: {
+ IRExpr *left = expr->Iex.Binop.arg1;
+ s390_bfp_unop_t bfpop;
+ s390_round_t rounding_mode;
+ HReg op_hi, op_lo, op, f12, f13, f14, f15;
+
+ /* We use non-virtual registers as pairs (f13, f15) and (f12, f14)) */
+ f12 = make_fpr(12);
+ f13 = make_fpr(13);
+ f14 = make_fpr(14);
+ f15 = make_fpr(15);
+
+ switch (expr->Iex.Binop.op) {
+ case Iop_NegF128: bfpop = S390_BFP_NEG; goto float128_opnd;
+ case Iop_AbsF128: bfpop = S390_BFP_ABS; goto float128_opnd;
+ case Iop_I32StoF128: bfpop = S390_BFP_I32_TO_F128; goto convert_int;
+ case Iop_I64StoF128: bfpop = S390_BFP_I64_TO_F128; goto convert_int;
+ case Iop_F32toF128: bfpop = S390_BFP_F32_TO_F128; goto convert_float;
+ case Iop_F64toF128: bfpop = S390_BFP_F64_TO_F128; goto convert_float;
+ default:
+ goto irreducible;
+ }
+
+ float128_opnd:
+ s390_isel_float128_expr(&op_hi, &op_lo, env, left);
+
+ /* operand --> (f13, f15) */
+ addInstr(env, s390_insn_move(8, f13, op_hi));
+ addInstr(env, s390_insn_move(8, f15, op_lo));
+
+ rounding_mode = S390_ROUND_CURRENT; /* will not be used later on */
+ addInstr(env, s390_insn_bfp128_unop(16, bfpop, f12, f14, f13, f15,
+ rounding_mode));
+ goto move_dst;
+
+ convert_float:
+ op = s390_isel_float_expr(env, left);
+ addInstr(env, s390_insn_bfp128_convert_to(16, bfpop, f12, f14,
+ op));
+ goto move_dst;
+
+ convert_int:
+ op = s390_isel_int_expr(env, left);
+ addInstr(env, s390_insn_bfp128_convert_to(16, bfpop, f12, f14,
+ op));
+ goto move_dst;
+
+ move_dst:
+ /* Move result to virtual destination registers */
+ *dst_hi = newVRegF(env);
+ *dst_lo = newVRegF(env);
+ addInstr(env, s390_insn_move(8, *dst_hi, f12));
+ addInstr(env, s390_insn_move(8, *dst_lo, f14));
+ return;
+ }
+
+ default:
+ goto irreducible;
+ }
+
+ /* We get here if no pattern matched. */
+ irreducible:
+ ppIRExpr(expr);
+ vpanic("s390_isel_int_expr: cannot reduce tree");
+}
+
+/* Compute a 128-bit value into two 64-bit registers. These may be either
+ real or virtual regs; in any case they must not be changed by subsequent
+ code emitted by the caller. */
+static void
+s390_isel_float128_expr(HReg *dst_hi, HReg *dst_lo, ISelEnv *env, IRExpr *expr)
+{
+ s390_isel_float128_expr_wrk(dst_hi, dst_lo, env, expr);
+
+ /* Sanity checks ... */
+ vassert(hregIsVirtual(*dst_hi));
+ vassert(hregIsVirtual(*dst_lo));
+ vassert(hregClass(*dst_hi) == HRcFlt64);
+ vassert(hregClass(*dst_lo) == HRcFlt64);
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Floating point expressions (64 bit) ---*/
+/*---------------------------------------------------------*/
+
+static HReg
+s390_isel_float_expr_wrk(ISelEnv *env, IRExpr *expr)
+{
+ IRType ty = typeOfIRExpr(env->type_env, expr);
+ UChar size;
+
+ vassert(ty == Ity_F32 || ty == Ity_F64);
+
+ size = sizeofIRType(ty);
+
+ switch (expr->tag) {
+ case Iex_RdTmp:
+ /* Return the virtual register that holds the temporary. */
+ return lookupIRTemp(env, expr->Iex.RdTmp.tmp);
+
+ /* --------- LOAD --------- */
+ case Iex_Load: {
+ HReg dst = newVRegF(env);
+ s390_amode *am = s390_isel_amode(env, expr->Iex.Load.addr);
+
+ if (expr->Iex.Load.end != Iend_BE)
+ goto irreducible;
+
+ addInstr(env, s390_insn_load(size, dst, am));
+
+ return dst;
+ }
+
+ /* --------- GET --------- */
+ case Iex_Get: {
+ HReg dst = newVRegF(env);
+ s390_amode *am = s390_amode_b12(expr->Iex.Get.offset,
+ s390_hreg_guest_state_pointer());
+
+ addInstr(env, s390_insn_load(size, dst, am));
+
+ return dst;
+ }
+
+ /* --------- LITERAL --------- */
+
+ /* Load a literal into a register. Create a "load immediate"
+ v-insn and return the register. */
+ case Iex_Const: {
+ ULong value;
+ HReg dst = newVRegF(env);
+ const IRConst *con = expr->Iex.Const.con;
+
+ /* Bitwise copy of the value. No sign/zero-extension */
+ switch (con->tag) {
+ case Ico_F32i: value = con->Ico.F32i; break;
+ case Ico_F64i: value = con->Ico.F64i; break;
+ default: vpanic("s390_isel_float_expr: invalid constant");
+ }
+
+ if (value != 0) vpanic("cannot load immediate floating point constant");
+
+ addInstr(env, s390_insn_load_immediate(size, dst, value));
+
+ return dst;
+ }
+
+ /* --------- 4-ary OP --------- */
+ case Iex_Qop: {
+ HReg op1, op2, op3, dst;
+ s390_bfp_triop_t bfpop;
+ s390_round_t rounding_mode;
+
+ op1 = s390_isel_float_expr(env, expr->Iex.Qop.arg2);
+ op2 = s390_isel_float_expr(env, expr->Iex.Qop.arg3);
+ op3 = s390_isel_float_expr(env, expr->Iex.Qop.arg4);
+ dst = newVRegF(env);
+ addInstr(env, s390_insn_move(size, dst, op1));
+
+ switch (expr->Iex.Qop.op) {
+ case Iop_MAddF32:
+ case Iop_MAddF64: bfpop = S390_BFP_MADD; break;
+ case Iop_MSubF32:
+ case Iop_MSubF64: bfpop = S390_BFP_MSUB; break;
+
+ default:
+ goto irreducible;
+ }
+
+ rounding_mode = decode_rounding_mode(expr->Iex.Qop.arg1);
+ addInstr(env, s390_insn_bfp_triop(size, bfpop, dst, op2, op3,
+ rounding_mode));
+ return dst;
+ }
+
+ /* --------- TERNARY OP --------- */
+ case Iex_Triop: {
+ IROp op = expr->Iex.Triop.op;
+ IRExpr *left = expr->Iex.Triop.arg2;
+ IRExpr *right = expr->Iex.Triop.arg3;
+ s390_bfp_binop_t bfpop;
+ s390_round_t rounding_mode;
+ HReg h1, op2, dst;
+
+ h1 = s390_isel_float_expr(env, left); /* Process 1st operand */
+ op2 = s390_isel_float_expr(env, right); /* Process 2nd operand */
+ dst = newVRegF(env);
+ addInstr(env, s390_insn_move(size, dst, h1));
+ switch (op) {
+ case Iop_AddF32:
+ case Iop_AddF64: bfpop = S390_BFP_ADD; break;
+ case Iop_SubF32:
+ case Iop_SubF64: bfpop = S390_BFP_SUB; break;
+ case Iop_MulF32:
+ case Iop_MulF64: bfpop = S390_BFP_MUL; break;
+ case Iop_DivF32:
+ case Iop_DivF64: bfpop = S390_BFP_DIV; break;
+
+ default:
+ goto irreducible;
+ }
+
+ rounding_mode = decode_rounding_mode(expr->Iex.Triop.arg1);
+ addInstr(env, s390_insn_bfp_binop(size, bfpop, dst, op2, rounding_mode));
+ return dst;
+ }
+
+ /* --------- BINARY OP --------- */
+ case Iex_Binop: {
+ IROp op = expr->Iex.Binop.op;
+ IRExpr *left = expr->Iex.Binop.arg2;
+ HReg h1, dst;
+ s390_bfp_unop_t bfpop;
+ s390_round_t rounding_mode;
+ Int integer_operand;
+
+ integer_operand = 1;
+
+ switch (op) {
+ case Iop_SqrtF32:
+ case Iop_SqrtF64:
+ bfpop = S390_BFP_SQRT;
+ integer_operand = 0;
+ break;
+
+ case Iop_F64toF32:
+ bfpop = S390_BFP_F64_TO_F32;
+ integer_operand = 0;
+ break;
+
+ case Iop_I32StoF32: bfpop = S390_BFP_I32_TO_F32; break;
+ case Iop_I64StoF32: bfpop = S390_BFP_I64_TO_F32; break;
+ case Iop_I64StoF64: bfpop = S390_BFP_I64_TO_F64; break;
+ default:
+ goto irreducible;
+
+ case Iop_F128toF64:
+ case Iop_F128toF32: {
+ HReg op_hi, op_lo, f12, f13, f14, f15;
+
+ bfpop = op == Iop_F128toF32 ? S390_BFP_F128_TO_F32
+ : S390_BFP_F128_TO_F64;
+
+ rounding_mode = decode_rounding_mode(expr->Iex.Binop.arg1);
+
+ s390_isel_float128_expr(&op_hi, &op_lo, env, expr->Iex.Binop.arg2);
+
+ /* We use non-virtual registers as pairs (f13, f15) and (f12, f14)) */
+ f12 = make_fpr(12);
+ f13 = make_fpr(13);
+ f14 = make_fpr(14);
+ f15 = make_fpr(15);
+
+ /* operand --> (f13, f15) */
+ addInstr(env, s390_insn_move(8, f13, op_hi));
+ addInstr(env, s390_insn_move(8, f15, op_lo));
+
+ dst = newVRegF(env);
+ addInstr(env, s390_insn_bfp128_unop(16, bfpop, f12, f14, f13, f15,
+ rounding_mode));
+
+ /* Move result to virtual destination registers */
+ addInstr(env, s390_insn_move(8, dst, f12));
+ return dst;
+ }
+ }
+
+ /* Process operand */
+ if (integer_operand) {
+ h1 = s390_isel_int_expr(env, left);
+ } else {
+ h1 = s390_isel_float_expr(env, left);
+ }
+
+ dst = newVRegF(env);
+ rounding_mode = decode_rounding_mode(expr->Iex.Binop.arg1);
+ addInstr(env, s390_insn_bfp_unop(size, bfpop, dst, h1, rounding_mode));
+ return dst;
+ }
+
+ /* --------- UNARY OP --------- */
+ case Iex_Unop: {
+ IROp op = expr->Iex.Unop.op;
+ IRExpr *left = expr->Iex.Unop.arg;
+ s390_bfp_unop_t bfpop;
+ s390_round_t rounding_mode;
+ HReg h1, dst;
+
+ if (op == Iop_F128HIto64 || op == Iop_F128to64) {
+ HReg dst_hi, dst_lo;
+
+ s390_isel_float128_expr(&dst_hi, &dst_lo, env, left);
+ return op == Iop_F128to64 ? dst_lo : dst_hi;
+ }
+
+ if (op == Iop_ReinterpI64asF64) {
+ dst = newVRegF(env);
+ h1 = s390_isel_int_expr(env, left); /* Process the operand */
+ addInstr(env, s390_insn_move(size, dst, h1));
+
+ return dst;
+ }
+
+ switch (op) {
+ case Iop_NegF32:
+ case Iop_NegF64:
+ if (left->tag == Iex_Unop &&
+ (left->Iex.Unop.op == Iop_AbsF32 || left->Iex.Unop.op == Iop_AbsF64))
+ bfpop = S390_BFP_NABS;
+ else
+ bfpop = S390_BFP_NEG;
+ break;
+
+ case Iop_AbsF32:
+ case Iop_AbsF64: bfpop = S390_BFP_ABS; break;
+ case Iop_I32StoF64: bfpop = S390_BFP_I32_TO_F64; break;
+ case Iop_F32toF64: bfpop = S390_BFP_F32_TO_F64; break;
+ default:
+ goto irreducible;
+ }
+
+ /* Process operand */
+ if (op == Iop_I32StoF64)
+ h1 = s390_isel_int_expr(env, left);
+ else if (bfpop == S390_BFP_NABS)
+ h1 = s390_isel_float_expr(env, left->Iex.Unop.arg);
+ else
+ h1 = s390_isel_float_expr(env, left);
+
+ dst = newVRegF(env);
+ rounding_mode = S390_ROUND_CURRENT; /* will not be used later on */
+ addInstr(env, s390_insn_bfp_unop(size, bfpop, dst, h1, rounding_mode));
+ return dst;
+ }
+
+ default:
+ goto irreducible;
+ }
+
+ /* We get here if no pattern matched. */
+ irreducible:
+ ppIRExpr(expr);
+ vpanic("s390_isel_float_expr: cannot reduce tree");
+}
+
+
+static HReg
+s390_isel_float_expr(ISelEnv *env, IRExpr *expr)
+{
+ HReg dst = s390_isel_float_expr_wrk(env, expr);
+
+ /* Sanity checks ... */
+ vassert(hregClass(dst) == HRcFlt64);
+ vassert(hregIsVirtual(dst));
+
+ return dst;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Condition Code ---*/
+/*---------------------------------------------------------*/
+
+/* This function handles all operators that produce a 1-bit result */
+static s390_cc_t
+s390_isel_cc(ISelEnv *env, IRExpr *cond)
+{
+ UChar size;
+
+ vassert(typeOfIRExpr(env->type_env, cond) == Ity_I1);
+
+ /* Constant: either 1 or 0 */
+ if (cond->tag == Iex_Const) {
+ vassert(cond->Iex.Const.con->tag == Ico_U1);
+ vassert(cond->Iex.Const.con->Ico.U1 == True
+ || cond->Iex.Const.con->Ico.U1 == False);
+
+ return cond->Iex.Const.con->Ico.U1 == True ? S390_CC_ALWAYS : S390_CC_NEVER;
+ }
+
+ /* Variable: values are 1 or 0 */
+ if (cond->tag == Iex_RdTmp) {
+ IRTemp tmp = cond->Iex.RdTmp.tmp;
+ HReg reg = lookupIRTemp(env, tmp);
+
+ /* Load-and-test does not modify REG; so this is OK. */
+ if (typeOfIRTemp(env->type_env, tmp) == Ity_I1)
+ size = 4;
+ else
+ size = sizeofIRType(typeOfIRTemp(env->type_env, tmp));
+ addInstr(env, s390_insn_test(size, s390_opnd_reg(reg)));
+ return S390_CC_NE;
+ }
+
+ /* Unary operators */
+ if (cond->tag == Iex_Unop) {
+ IRExpr *arg = cond->Iex.Unop.arg;
+
+ switch (cond->Iex.Unop.op) {
+ case Iop_Not1: /* Not1(cond) */
+ /* Generate code for EXPR, and negate the test condition */
+ return s390_cc_invert(s390_isel_cc(env, arg));
+
+ /* Iop_32/64to1 select the LSB from their operand */
+ case Iop_32to1:
+ case Iop_64to1: {
+ HReg dst = s390_isel_int_expr(env, arg);
+
+ size = sizeofIRType(typeOfIRExpr(env->type_env, arg));
+
+ addInstr(env, s390_insn_alu(size, S390_ALU_AND, dst, s390_opnd_imm(1)));
+ addInstr(env, s390_insn_test(size, s390_opnd_reg(dst)));
+ return S390_CC_NE;
+ }
+
+ case Iop_CmpNEZ8:
+ case Iop_CmpNEZ16: {
+ s390_opnd_RMI src;
+ s390_unop_t op;
+ HReg dst;
+
+ op = (cond->Iex.Unop.op == Iop_CmpNEZ8) ? S390_ZERO_EXTEND_8
+ : S390_ZERO_EXTEND_16;
+ dst = newVRegI(env);
+ src = s390_isel_int_expr_RMI(env, arg);
+ addInstr(env, s390_insn_unop(4, op, dst, src));
+ addInstr(env, s390_insn_test(4, s390_opnd_reg(dst)));
+ return S390_CC_NE;
+ }
+
+ case Iop_CmpNEZ32:
+ case Iop_CmpNEZ64: {
+ s390_opnd_RMI src;
+
+ src = s390_isel_int_expr_RMI(env, arg);
+ size = sizeofIRType(typeOfIRExpr(env->type_env, arg));
+ addInstr(env, s390_insn_test(size, src));
+ return S390_CC_NE;
+ }
+
+ default:
+ goto fail;
+ }
+ }
+
+ /* Binary operators */
+ if (cond->tag == Iex_Binop) {
+ IRExpr *arg1 = cond->Iex.Binop.arg1;
+ IRExpr *arg2 = cond->Iex.Binop.arg2;
+ HReg reg1, reg2;
+
+ size = sizeofIRType(typeOfIRExpr(env->type_env, arg1));
+
+ switch (cond->Iex.Binop.op) {
+ s390_unop_t op;
+ s390_cc_t result;
+
+ case Iop_CmpEQ8:
+ case Iop_CasCmpEQ8:
+ op = S390_ZERO_EXTEND_8;
+ result = S390_CC_E;
+ goto do_compare_ze;
+
+ case Iop_CmpNE8:
+ case Iop_CasCmpNE8:
+ op = S390_ZERO_EXTEND_8;
+ result = S390_CC_NE;
+ goto do_compare_ze;
+
+ case Iop_CmpEQ16:
+ case Iop_CasCmpEQ16:
+ op = S390_ZERO_EXTEND_16;
+ result = S390_CC_E;
+ goto do_compare_ze;
+
+ case Iop_CmpNE16:
+ case Iop_CasCmpNE16:
+ op = S390_ZERO_EXTEND_16;
+ result = S390_CC_NE;
+ goto do_compare_ze;
+
+ do_compare_ze: {
+ s390_opnd_RMI op1, op2;
+
+ op1 = s390_isel_int_expr_RMI(env, arg1);
+ reg1 = newVRegI(env);
+ addInstr(env, s390_insn_unop(4, op, reg1, op1));
+
+ op2 = s390_isel_int_expr_RMI(env, arg2);
+ reg2 = newVRegI(env);
+ addInstr(env, s390_insn_unop(4, op, reg2, op2)); /* zero extend */
+
+ op2 = s390_opnd_reg(reg2);
+ addInstr(env, s390_insn_compare(4, reg1, op2, False));
+
+ return result;
+ }
+
+ case Iop_CmpEQ32:
+ case Iop_CmpEQ64:
+ case Iop_CasCmpEQ32:
+ case Iop_CasCmpEQ64:
+ result = S390_CC_E;
+ goto do_compare;
+
+ case Iop_CmpNE32:
+ case Iop_CmpNE64:
+ case Iop_CasCmpNE32:
+ case Iop_CasCmpNE64:
+ result = S390_CC_NE;
+ goto do_compare;
+
+ do_compare: {
+ HReg op1;
+ s390_opnd_RMI op2;
+
+ order_commutative_operands(arg1, arg2);
+
+ op1 = s390_isel_int_expr(env, arg1);
+ op2 = s390_isel_int_expr_RMI(env, arg2);
+
+ addInstr(env, s390_insn_compare(size, op1, op2, False));
+
+ return result;
+ }
+
+ case Iop_CmpLT32S:
+ case Iop_CmpLE32S:
+ case Iop_CmpLT64S:
+ case Iop_CmpLE64S: {
+ HReg op1;
+ s390_opnd_RMI op2;
+
+ op1 = s390_isel_int_expr(env, arg1);
+ op2 = s390_isel_int_expr_RMI(env, arg2);
+
+ addInstr(env, s390_insn_compare(size, op1, op2, True));
+
+ return (cond->Iex.Binop.op == Iop_CmpLT32S ||
+ cond->Iex.Binop.op == Iop_CmpLT64S) ? S390_CC_L : S390_CC_LE;
+ }
+
+ case Iop_CmpLT32U:
+ case Iop_CmpLE32U:
+ case Iop_CmpLT64U:
+ case Iop_CmpLE64U: {
+ HReg op1;
+ s390_opnd_RMI op2;
+
+ op1 = s390_isel_int_expr(env, arg1);
+ op2 = s390_isel_int_expr_RMI(env, arg2);
+
+ addInstr(env, s390_insn_compare(size, op1, op2, False));
+
+ return (cond->Iex.Binop.op == Iop_CmpLT32U ||
+ cond->Iex.Binop.op == Iop_CmpLT64U) ? S390_CC_L : S390_CC_LE;
+ }
+
+ default:
+ goto fail;
+ }
+ }
+
+ fail:
+ ppIRExpr(cond);
+ vpanic("s390_isel_cc: unexpected operator");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Statements ---*/
+/*---------------------------------------------------------*/
+
+static void
+s390_isel_stmt(ISelEnv *env, IRStmt *stmt)
+{
+ if (vex_traceflags & VEX_TRACE_VCODE) {
+ vex_printf("\n -- ");
+ ppIRStmt(stmt);
+ vex_printf("\n");
+ }
+
+ switch (stmt->tag) {
+
+ /* --------- STORE --------- */
+ case Ist_Store: {
+ IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
+ s390_amode *am;
+ HReg src;
+
+ if (stmt->Ist.Store.end != Iend_BE) goto stmt_fail;
+
+ am = s390_isel_amode(env, stmt->Ist.Store.addr);
+
+ switch (tyd) {
+ case Ity_I8:
+ case Ity_I16:
+ case Ity_I32:
+ case Ity_I64:
+ src = s390_isel_int_expr(env, stmt->Ist.Store.data);
+ break;
+
+ case Ity_F32:
+ case Ity_F64:
+ src = s390_isel_float_expr(env, stmt->Ist.Store.data);
+ break;
+
+ case Ity_F128:
+ /* Cannot occur. No such instruction */
+ vpanic("Ist_Store with F128 data");
+
+ default:
+ goto stmt_fail;
+ }
+
+ addInstr(env, s390_insn_store(sizeofIRType(tyd), am, src));
+ return;
+ }
+
+ /* --------- PUT --------- */
+ case Ist_Put: {
+ IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Put.data);
+ HReg src;
+ s390_amode *am;
+
+ am = s390_amode_b12(stmt->Ist.Put.offset,
+ s390_hreg_guest_state_pointer());
+ switch (tyd) {
+ case Ity_I8:
+ case Ity_I16:
+ case Ity_I32:
+ case Ity_I64:
+ src = s390_isel_int_expr(env, stmt->Ist.Put.data);
+ break;
+
+ case Ity_F32:
+ case Ity_F64:
+ src = s390_isel_float_expr(env, stmt->Ist.Put.data);
+ break;
+
+ case Ity_F128:
+ /* Does not occur. See function put_fpr_pair. */
+ vpanic("Ist_Put with F128 data");
+
+ default:
+ goto stmt_fail;
+ }
+
+ addInstr(env, s390_insn_store(sizeofIRType(tyd), am, src));
+ return;
+ }
+
+ /* --------- TMP --------- */
+ case Ist_WrTmp: {
+ IRTemp tmp = stmt->Ist.WrTmp.tmp;
+ IRType tyd = typeOfIRTemp(env->type_env, tmp);
+ HReg src, dst;
+
+ switch (tyd) {
+ case Ity_I128: {
+ HReg dst_hi, dst_lo, res_hi, res_lo;
+
+ s390_isel_int128_expr(&res_hi, &res_lo, env, stmt->Ist.WrTmp.data);
+ lookupIRTemp128(&dst_hi, &dst_lo, env, tmp);
+
+ addInstr(env, s390_insn_move(8, dst_hi, res_hi));
+ addInstr(env, s390_insn_move(8, dst_lo, res_lo));
+ return;
+ }
+
+ case Ity_I8:
+ case Ity_I16:
+ case Ity_I32:
+ case Ity_I64:
+ src = s390_isel_int_expr(env, stmt->Ist.WrTmp.data);
+ dst = lookupIRTemp(env, tmp);
+ break;
+
+ case Ity_I1: {
+ s390_cc_t cond = s390_isel_cc(env, stmt->Ist.WrTmp.data);
+ dst = lookupIRTemp(env, tmp);
+ addInstr(env, s390_insn_cc2bool(dst, cond));
+ return;
+ }
+
+ case Ity_F32:
+ case Ity_F64:
+ src = s390_isel_float_expr(env, stmt->Ist.WrTmp.data);
+ dst = lookupIRTemp(env, tmp);
+ break;
+
+ case Ity_F128: {
+ HReg dst_hi, dst_lo, res_hi, res_lo;
+
+ s390_isel_float128_expr(&res_hi, &res_lo, env, stmt->Ist.WrTmp.data);
+ lookupIRTemp128(&dst_hi, &dst_lo, env, tmp);
+
+ addInstr(env, s390_insn_move(8, dst_hi, res_hi));
+ addInstr(env, s390_insn_move(8, dst_lo, res_lo));
+ return;
+ }
+
+ default:
+ goto stmt_fail;
+ }
+
+ addInstr(env, s390_insn_move(sizeofIRType(tyd), dst, src));
+ return;
+ }
+
+ /* --------- Call to DIRTY helper --------- */
+ case Ist_Dirty: {
+ IRType retty;
+ IRDirty* d = stmt->Ist.Dirty.details;
+ Bool passBBP;
+
+ if (d->nFxState == 0)
+ vassert(!d->needsBBP);
+
+ passBBP = toBool(d->nFxState > 0 && d->needsBBP);
+
+ doHelperCall(env, passBBP, d->guard, d->cee, d->args);
+
+ /* Now figure out what to do with the returned value, if any. */
+ if (d->tmp == IRTemp_INVALID)
+ /* No return value. Nothing to do. */
+ return;
+
+ retty = typeOfIRTemp(env->type_env, d->tmp);
+ if (retty == Ity_I64 || retty == Ity_I32
+ || retty == Ity_I16 || retty == Ity_I8) {
+ /* Move the returned value into the return register */
+ HReg dst = lookupIRTemp(env, d->tmp);
+ addInstr(env, s390_insn_move(sizeofIRType(retty), dst,
+ mkHReg(S390_REGNO_RETURN_VALUE,
+ HRcInt64, False)));
+ return;
+ }
+ break;
+ }
+
+ case Ist_CAS:
+ if (stmt->Ist.CAS.details->oldHi == IRTemp_INVALID) {
+ IRCAS *cas = stmt->Ist.CAS.details;
+ s390_amode *op2 = s390_isel_amode(env, cas->addr);
+ HReg op3 = s390_isel_int_expr(env, cas->dataLo); /* new value */
+ HReg op1 = s390_isel_int_expr(env, cas->expdLo); /* expected value */
+ HReg old = lookupIRTemp(env, cas->oldLo);
+
+ if (typeOfIRTemp(env->type_env, cas->oldLo) == Ity_I32) {
+ addInstr(env, s390_insn_cas(4, op1, op2, op3, old));
+ } else {
+ addInstr(env, s390_insn_cas(8, op1, op2, op3, old));
+ }
+ return;
+ } else {
+ vpanic("compare double and swap not implemented\n");
+ }
+ break;
+
+ /* --------- EXIT --------- */
+ case Ist_Exit: {
+ s390_opnd_RMI dst;
+ s390_cc_t cond;
+ IRConstTag tag = stmt->Ist.Exit.dst->tag;
+
+ if (tag != Ico_U64)
+ vpanic("s390_isel_stmt: Ist_Exit: dst is not a 64-bit value");
+
+ dst = s390_isel_int_expr_RMI(env, IRExpr_Const(stmt->Ist.Exit.dst));
+ cond = s390_isel_cc(env, stmt->Ist.Exit.guard);
+ addInstr(env, s390_insn_branch(stmt->Ist.Exit.jk, cond, dst));
+ return;
+ }
+
+ /* --------- MEM FENCE --------- */
+ case Ist_MBE: /* fixs390 later */
+ break;
+
+ /* --------- Miscellaneous --------- */
+
+ case Ist_PutI: /* Not needed */
+ case Ist_IMark: /* Doesn't generate any executable code */
+ case Ist_NoOp: /* Doesn't generate any executable code */
+ case Ist_AbiHint: /* Meaningless in IR */
+ return;
+
+ default:
+ break;
+ }
+
+ stmt_fail:
+ ppIRStmt(stmt);
+ vpanic("s390_isel_stmt");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Basic block terminators (Nexts) ---*/
+/*---------------------------------------------------------*/
+
+static void
+iselNext(ISelEnv *env, IRExpr *next, IRJumpKind jk)
+{
+ s390_opnd_RMI dst;
+
+ if (vex_traceflags & VEX_TRACE_VCODE) {
+ vex_printf("\n-- goto {");
+ ppIRJumpKind(jk);
+ vex_printf("} ");
+ ppIRExpr(next);
+ vex_printf("\n");
+ }
+
+ dst = s390_isel_int_expr_RMI(env, next);
+ addInstr(env, s390_insn_branch(jk, S390_CC_ALWAYS, dst));
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Insn selector top-level ---*/
+/*---------------------------------------------------------*/
+
+/* Translate an entire SB to s390 code. */
+
+HInstrArray *
+s390_isel_sb(IRSB *bb, VexArch arch_host, VexArchInfo *archinfo_host,
+ VexAbiInfo *vbi)
+{
+ UInt i, j;
+ HReg hreg, hregHI;
+ ISelEnv *env;
+ UInt hwcaps_host = archinfo_host->hwcaps;
+
+ /* Do some sanity checks */
+ vassert((hwcaps_host & ~(VEX_HWCAPS_S390X_ALL)) == 0);
+
+ /* Make up an initial environment to use. */
+ env = LibVEX_Alloc(sizeof(ISelEnv));
+ env->vreg_ctr = 0;
+
+ /* Are we being s390 or s390x? */
+ env->mode64 = arch_host == VexArchS390X;
+
+ /* Set up output code array. */
+ env->code = newHInstrArray();
+
+ /* Copy BB's type env. */
+ env->type_env = bb->tyenv;
+
+ /* Make up an IRTemp -> virtual HReg mapping. This doesn't
+ change as we go along. For some reason types_used has Int type -- but
+ it should be unsigned. Internally we use an unsigned type; so we
+ assert it here. */
+ vassert(bb->tyenv->types_used >= 0);
+
+ env->n_vregmap = bb->tyenv->types_used;
+ env->vregmap = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
+ env->vregmapHI = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
+
+ /* and finally ... */
+ env->hwcaps = hwcaps_host;
+
+ /* For each IR temporary, allocate a suitably-kinded virtual
+ register. */
+ j = 0;
+ for (i = 0; i < env->n_vregmap; i++) {
+ hregHI = hreg = INVALID_HREG;
+ switch (bb->tyenv->types[i]) {
+ case Ity_I1:
+ case Ity_I8:
+ case Ity_I16:
+ case Ity_I32:
+ hreg = mkHReg(j++, HRcInt64, True);
+ break;
+
+ case Ity_I64:
+ hreg = mkHReg(j++, HRcInt64, True);
+ break;
+
+ case Ity_I128:
+ hreg = mkHReg(j++, HRcInt64, True);
+ hregHI = mkHReg(j++, HRcInt64, True);
+ break;
+
+ case Ity_F32:
+ case Ity_F64:
+ hreg = mkHReg(j++, HRcFlt64, True);
+ break;
+
+ case Ity_F128:
+ hreg = mkHReg(j++, HRcFlt64, True);
+ hregHI = mkHReg(j++, HRcFlt64, True);
+ break;
+
+ case Ity_V128: /* fall through */
+ default:
+ ppIRType(bb->tyenv->types[i]);
+ vpanic("s390_isel_sb: IRTemp type");
+ }
+
+ env->vregmap[i] = hreg;
+ env->vregmapHI[i] = hregHI;
+ }
+ env->vreg_ctr = j;
+
+ /* Ok, finally we can iterate over the statements. */
+ for (i = 0; i < bb->stmts_used; i++)
+ if (bb->stmts[i])
+ s390_isel_stmt(env, bb->stmts[i]);
+
+ iselNext(env, bb->next, bb->jumpkind);
+
+ /* Record the number of vregs we used. */
+ env->code->n_vregs = env->vreg_ctr;
+
+ return env->code;
+}
+
+/*---------------------------------------------------------------*/
+/*--- end host_s390_isel.c ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/host_s390_isel.h
+++ valgrind/VEX/priv/host_s390_isel.h
@@ -0,0 +1,47 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin host_s390_isel.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#ifndef __VEX_HOST_S390_ISEL_H
+#define __VEX_HOST_S390_ISEL_H
+
+#include "libvex_basictypes.h"
+#include "libvex.h" /* VexAbiInfo */
+#include "main_util.h" /* needed for host_generic_regs.h */
+#include "host_generic_regs.h" /* HInstrArray */
+
+HInstrArray *s390_isel_sb(IRSB *, VexArch, VexArchInfo *, VexAbiInfo *);
+
+#endif /* ndef __VEX_HOST_S390_ISEL_H */
+
+/*---------------------------------------------------------------*/
+/*--- end host_s390_isel.h ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/priv/host_s390_wrapper.c
+++ valgrind/VEX/priv/host_s390_wrapper.c
@@ -0,0 +1,413 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin host_s390_wrapper.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+/* This file contains wrapper functions for instructions that depend on
+ certain hardware facilities being installed. If the facility is not
+ installed an equivalent instruction sequence will be issued.
+ The general register "r0" is available to those instruction sequences. */
+
+#include "libvex_basictypes.h"
+
+#include "host_s390_insn.h" /* s390_host_has_eimm */
+#include "host_s390_emit.h"
+
+
+/* For both 32-bit and 64-bit clients we can use the instructions that
+ were available on z900. See binutils/opcodes/s390-opc.txt
+
+ Note, that entering a wrapper for a z/arch instruction does not
+ imply that the client is a 64-bit client. It only means that this
+ instruction is a good match for the expression at hand.
+*/
+
+/* Provide a symbolic name for register "r0" */
+#undef r0
+#define r0 0
+
+/* Split up a 20-bit displacement into its high and low piece
+ suitable for passing as function arguments */
+#define DISP20(d) ((d) & 0xFFF), (((d) >> 12) & 0xFF)
+
+
+/*------------------------------------------------------------*/
+/*--- Helper functions ---*/
+/*------------------------------------------------------------*/
+
+/* Load a 32-bit immediate VAL into register REG. */
+static UChar *
+s390_emit_load_32imm(UChar *p, UChar reg, UInt val)
+{
+ /* val[0:15] --> (val >> 16) & 0xFFFF
+ val[16:31] --> val & 0xFFFF
+ */
+ p = s390_emit_IILH(p, reg, (val >> 16) & 0xFFFF);
+ return s390_emit_IILL(p, reg, val & 0xFFFF);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Wrapper functions ---*/
+/*------------------------------------------------------------*/
+
+/* r1[32:63] = i2 */
+UChar *
+s390_emit_IILFw(UChar *p, UChar r1, UInt i2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_IILF(p, r1, i2);
+ }
+
+ return s390_emit_load_32imm(p, r1, i2);
+}
+
+
+/* r1[32:63],r1+1[32:63] = r1+1[32:63] * memory[op2addr][32:63] */
+UChar *
+s390_emit_MFYw(UChar *p, UChar r1, UChar x, UChar b, UShort dl, UChar dh)
+{
+ if (s390_host_has_gie) {
+ return s390_emit_MFY(p, r1, x, b, dl, dh);
+ }
+
+ /* Load from memory into R0, then MULTIPLY with R1 */
+ p = s390_emit_LY(p, r0, x, b, dl, dh);
+ return s390_emit_MR(p, r1, r0);
+}
+
+
+/* r1[32:63] = r1[32:63] * i2 */
+UChar *
+s390_emit_MSFIw(UChar *p, UChar r1, UInt i2)
+{
+ if (s390_host_has_gie) {
+ return s390_emit_MSFI(p, r1, i2);
+ }
+
+ /* Load I2 into R0; then MULTIPLY R0 with R1 */
+ p = s390_emit_load_32imm(p, r0, i2);
+ return s390_emit_MSR(p, r1, r0);
+}
+
+
+/* r1[32:63] = r1[32:63] & i2 */
+UChar *
+s390_emit_NILFw(UChar *p, UChar r1, UInt i2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_NILF(p, r1, i2);
+ }
+
+ /* Load I2 into R0; then AND R0 with R1 */
+ p = s390_emit_load_32imm(p, r0, i2);
+ return s390_emit_NR(p, r1, r0);
+}
+
+
+/* r1[32:63] = r1[32:63] | i2 */
+UChar *
+s390_emit_OILFw(UChar *p, UChar r1, UInt i2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_OILF(p, r1, i2);
+ }
+
+ /* Load I2 into R0; then AND R0 with R1 */
+ p = s390_emit_load_32imm(p, r0, i2);
+ return s390_emit_OR(p, r1, r0);
+}
+
+
+/* r1[32:63] = r1[32:63] ^ i2 */
+UChar *
+s390_emit_XILFw(UChar *p, UChar r1, UInt i2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_XILF(p, r1, i2);
+ }
+
+ /* Load I2 into R0; then AND R0 with R1 */
+ p = s390_emit_load_32imm(p, r0, i2);
+ return s390_emit_XR(p, r1, r0);
+}
+
+
+/* r1[32:63] = sign_extend(r2[56:63]) */
+UChar *
+s390_emit_LBRw(UChar *p, UChar r1, UChar r2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_LBR(p, r1, r2);
+ }
+
+ p = s390_emit_LR(p, r1, r2); /* r1 = r2 */
+ p = s390_emit_SLL(p, r1, r0, r0, 24); /* r1 = r1 << 24 */
+ return s390_emit_SRA(p, r1, r0, r0, 24); /* r1 = r1 >>a 24 */
+}
+
+
+/* r1[0:63] = sign_extend(r2[56:63]) */
+UChar *
+s390_emit_LGBRw(UChar *p, UChar r1, UChar r2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_LGBR(p, r1, r2);
+ }
+
+ p = s390_emit_LR(p, r1, r2); /* r1 = r2 */
+ p = s390_emit_SLLG(p, r1, r1, r0, DISP20(56)); /* r1 = r1 << 56 */
+ return s390_emit_SRAG(p, r1, r1, r0, DISP20(56)); /* r1 = r1 >>a 56 */
+}
+
+
+/* r1[32:63] = sign_extend(r2[48:63]) */
+UChar *
+s390_emit_LHRw(UChar *p, UChar r1, UChar r2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_LHR(p, r1, r2);
+ }
+
+ p = s390_emit_LR(p, r1, r2); /* r1 = r2 */
+ p = s390_emit_SLL(p, r1, r0, r0, 16); /* r1 = r1 << 16 */
+ return s390_emit_SRA(p, r1, r0, r0, 16); /* r1 = r1 >>a 16 */
+}
+
+
+/* r1[0:63] = sign_extend(r2[48:63]) */
+UChar *
+s390_emit_LGHRw(UChar *p, UChar r1, UChar r2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_LGHR(p, r1, r2);
+ }
+
+ p = s390_emit_LR(p, r1, r2); /* r1 = r2 */
+ p = s390_emit_SLLG(p, r1, r1, r0, DISP20(48)); /* r1 = r1 << 48 */
+ return s390_emit_SRAG(p, r1, r1, r0, DISP20(48)); /* r1 = r1 >>a 48 */
+}
+
+
+/* r1[0:63] = sign_extend(i2) */
+UChar *
+s390_emit_LGFIw(UChar *p, UChar r1, UInt i2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_LGFI(p, r1, i2);
+ }
+
+ p = s390_emit_load_32imm(p, r0, i2);
+ return s390_emit_LGFR(p, r1, r0);
+}
+
+
+/* r1[32:63] = zero_extend($r2[56:63]) */
+UChar *
+s390_emit_LLCRw(UChar *p, UChar r1, UChar r2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_LLCR(p, r1, r2);
+ }
+
+ p = s390_emit_LR(p, r1, r2);
+ p = s390_emit_LHI(p, r0, 0xFF);
+ return s390_emit_NR(p, r1, r0);
+}
+
+
+/* r1[0:63] = zero_extend($r2[56:63]) */
+UChar *
+s390_emit_LLGCRw(UChar *p, UChar r1, UChar r2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_LLGCR(p, r1, r2);
+ }
+
+ p = s390_emit_LR(p, r1, r2);
+ p = s390_emit_LLILL(p, r0, 0xFF);
+ return s390_emit_NGR(p, r1, r0);
+}
+
+
+/* r1[32:63] = zero_extend(r2[48:63]) */
+UChar *
+s390_emit_LLHRw(UChar *p, UChar r1, UChar r2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_LLHR(p, r1, r2);
+ }
+
+ p = s390_emit_LR(p, r1, r2);
+ p = s390_emit_LLILL(p, r0, 0xFFFF);
+ return s390_emit_NR(p, r1, r0);
+}
+
+
+/* r1[0:63] = zero_extend(r2[48:63]) */
+UChar *
+s390_emit_LLGHRw(UChar *p, UChar r1, UChar r2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_LLGHR(p, r1, r2);
+ }
+
+ p = s390_emit_LR(p, r1, r2);
+ p = s390_emit_LLILL(p, r0, 0xFFFF);
+ return s390_emit_NGR(p, r1, r0);
+}
+
+
+/* r1[32:63] = zero_extend(mem[op2addr][0:7]) */
+UChar *
+s390_emit_LLCw(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl, UChar dh)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_LLC(p, r1, x2, b2, dl, dh);
+ }
+
+ if (dh == 0) {
+ p = s390_emit_IC(p, r1, x2, b2, dl);
+ } else {
+ p = s390_emit_ICY(p, r1, x2, b2, dl, dh);
+ }
+ p = s390_emit_LLILL(p, r0, 0xFF);
+ return s390_emit_NR(p, r1, r0);
+}
+
+
+/* r1[32:63] = zero_extend(mem[op2addr][0:15]) */
+UChar *
+s390_emit_LLHw(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl, UChar dh)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_LLH(p, r1, x2, b2, dl, dh);
+ }
+
+ p = s390_emit_LLGH(p, r1, x2, b2, dl, dh);
+ p = s390_emit_LLILL(p, r0, 0xFFFF);
+ return s390_emit_NR(p, r1, r0);
+}
+
+
+/* r1[0:63] = zero_extend(i2) */
+UChar *
+s390_emit_LLILFw(UChar *p, UChar r1, UInt i2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_LLILF(p, r1, i2);
+ }
+
+ p = s390_emit_LLILH(p, r1, (i2 >> 16) & 0xFFFF); /* i2[0:15] */
+ return s390_emit_OILL(p, r1, i2 & 0xFFFF);
+}
+
+
+/* r1[32:63] = r1[32:63] + i2 */
+UChar *
+s390_emit_AFIw(UChar *p, UChar r1, UInt i2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_AFI(p, r1, i2);
+ }
+ /* Load 32 bit immediate to R0 then add */
+ p = s390_emit_load_32imm(p, r0, i2);
+ return s390_emit_AR(p, r1, r0);
+}
+
+
+/* r1[32:63] = r1[32:63] - i2 */
+UChar *
+s390_emit_SLFIw(UChar *p, UChar r1, UInt i2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_SLFI(p, r1, i2);
+ }
+
+ /* Load 32 bit immediate to R0 then subtract */
+ p = s390_emit_load_32imm(p, r0, i2);
+ return s390_emit_SR(p, r1, r0);
+}
+
+
+UChar *
+s390_emit_LTw(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl, UChar dh)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_LT(p, r1, x2, b2, dl, dh);
+ }
+ /* Load 32 bit from memory to R0 then compare */
+ if (dh == 0) {
+ p = s390_emit_L(p, r0, x2, b2, dl);
+ } else {
+ p = s390_emit_LY(p, r0, x2, b2, dl, dh);
+ }
+ return s390_emit_LTR(p, r1, r0);
+}
+
+
+UChar *
+s390_emit_LTGw(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl, UChar dh)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_LTG(p, r1, x2, b2, dl, dh);
+ }
+ /* Load 64 bit from memory to R0 then compare */
+ p = s390_emit_LG(p, r0, x2, b2, dl, dh);
+ return s390_emit_LTGR(p, r1, r0);
+}
+
+
+UChar *
+s390_emit_CFIw(UChar *p, UChar r1, UInt i2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_CFI(p, r1, i2);
+ }
+ /* Load 32 bit immediate to R0 then compare */
+ p = s390_emit_load_32imm(p, r0, i2);
+ return s390_emit_CR(p, r1, r0);
+}
+
+
+UChar *
+s390_emit_CLFIw(UChar *p, UChar r1, UInt i2)
+{
+ if (s390_host_has_eimm) {
+ return s390_emit_CLFI(p, r1, i2);
+ }
+ /* Load 32 bit immediate to R0 then compare */
+ p = s390_emit_load_32imm(p, r0, i2);
+ return s390_emit_CLR(p, r1, r0);
+}
+
+/*---------------------------------------------------------------*/
+/*--- end host_s390_wrapper.c ---*/
+/*---------------------------------------------------------------*/
--- valgrind/VEX/pub/libvex_guest_s390x.h
+++ valgrind/VEX/pub/libvex_guest_s390x.h
@@ -0,0 +1,178 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin libvex_guest_s390x.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __LIBVEX_PUB_GUEST_S390X_H
+#define __LIBVEX_PUB_GUEST_S390X_H
+
+#include "libvex_basictypes.h"
+#include "libvex_emwarn.h"
+
+/*------------------------------------------------------------*/
+/*--- Vex's representation of the s390 CPU state. ---*/
+/*------------------------------------------------------------*/
+
+typedef struct {
+
+/*------------------------------------------------------------*/
+/*--- ar registers ---*/
+/*------------------------------------------------------------*/
+
+ /* 0 */ UInt guest_a0;
+ /* 4 */ UInt guest_a1;
+ /* 8 */ UInt guest_a2;
+ /* 12 */ UInt guest_a3;
+ /* 16 */ UInt guest_a4;
+ /* 20 */ UInt guest_a5;
+ /* 24 */ UInt guest_a6;
+ /* 28 */ UInt guest_a7;
+ /* 32 */ UInt guest_a8;
+ /* 36 */ UInt guest_a9;
+ /* 40 */ UInt guest_a10;
+ /* 44 */ UInt guest_a11;
+ /* 48 */ UInt guest_a12;
+ /* 52 */ UInt guest_a13;
+ /* 56 */ UInt guest_a14;
+ /* 60 */ UInt guest_a15;
+
+/*------------------------------------------------------------*/
+/*--- fpr registers ---*/
+/*------------------------------------------------------------*/
+
+ /* 64 */ ULong guest_f0;
+ /* 72 */ ULong guest_f1;
+ /* 80 */ ULong guest_f2;
+ /* 88 */ ULong guest_f3;
+ /* 96 */ ULong guest_f4;
+ /* 104 */ ULong guest_f5;
+ /* 112 */ ULong guest_f6;
+ /* 120 */ ULong guest_f7;
+ /* 128 */ ULong guest_f8;
+ /* 136 */ ULong guest_f9;
+ /* 144 */ ULong guest_f10;
+ /* 152 */ ULong guest_f11;
+ /* 160 */ ULong guest_f12;
+ /* 168 */ ULong guest_f13;
+ /* 176 */ ULong guest_f14;
+ /* 184 */ ULong guest_f15;
+
+/*------------------------------------------------------------*/
+/*--- gpr registers ---*/
+/*------------------------------------------------------------*/
+
+ /* 192 */ ULong guest_r0;
+ /* 200 */ ULong guest_r1;
+ /* 208 */ ULong guest_r2;
+ /* 216 */ ULong guest_r3;
+ /* 224 */ ULong guest_r4;
+ /* 232 */ ULong guest_r5;
+ /* 240 */ ULong guest_r6;
+ /* 248 */ ULong guest_r7;
+ /* 256 */ ULong guest_r8;
+ /* 264 */ ULong guest_r9;
+ /* 272 */ ULong guest_r10;
+ /* 280 */ ULong guest_r11;
+ /* 288 */ ULong guest_r12;
+ /* 296 */ ULong guest_r13;
+ /* 304 */ ULong guest_r14;
+ /* 312 */ ULong guest_r15;
+
+/*------------------------------------------------------------*/
+/*--- S390 miscellaneous registers ---*/
+/*------------------------------------------------------------*/
+
+ /* 320 */ ULong guest_counter;
+ /* 328 */ UInt guest_fpc;
+ /* 4-byte hole to enforce alignment requirements */
+ /* 336 */ ULong guest_IA;
+
+/*------------------------------------------------------------*/
+/*--- S390 pseudo registers ---*/
+/*------------------------------------------------------------*/
+
+ /* 344 */ ULong guest_SYSNO;
+
+/*------------------------------------------------------------*/
+/*--- 4-word thunk used to calculate the condition code ---*/
+/*------------------------------------------------------------*/
+
+ /* 352 */ ULong guest_CC_OP;
+ /* 360 */ ULong guest_CC_DEP1;
+ /* 368 */ ULong guest_CC_DEP2;
+ /* 376 */ ULong guest_CC_NDEP;
+
+/*------------------------------------------------------------*/
+/*--- Pseudo registers. Required by all architectures ---*/
+/*------------------------------------------------------------*/
+
+ /* See comments at bottom of libvex.h */
+ /* 384 */ ULong guest_NRADDR;
+ /* 392 */ ULong guest_TISTART;
+ /* 400 */ ULong guest_TILEN;
+
+ /* Used when backing up to restart a syscall that has
+ been interrupted by a signal. See also comment in
+ libvex_ir.h */
+ /* 408 */ ULong guest_IP_AT_SYSCALL;
+
+ /* Emulation warnings; see comments in libvex_emwarn.h */
+ /* 416 */ UInt guest_EMWARN;
+
+/*------------------------------------------------------------*/
+/*--- Force alignment to 16 bytes ---*/
+/*------------------------------------------------------------*/
+ /* 420 */ UChar padding[12];
+
+ /* 432 */ /* This is the size of the guest state */
+} VexGuestS390XState;
+
+/*------------------------------------------------------------*/
+/*--- Required alignment for s390x target ---*/
+/*------------------------------------------------------------*/
+
+#define VexGuestS390XStateAlignment 16
+
+/*------------------------------------------------------------*/
+/*--- Function prototypes ---*/
+/*------------------------------------------------------------*/
+
+void LibVEX_GuestS390X_initialise(VexGuestS390XState *);
+
+/*------------------------------------------------------------*/
+/*--- Dedicated registers ---*/
+/*------------------------------------------------------------*/
+
+#define guest_LR guest_r14 /* Link register */
+#define guest_SP guest_r15 /* Stack pointer */
+#define guest_FP guest_r11 /* Frame pointer */
+
+/*---------------------------------------------------------------*/
+/*--- end libvex_guest_s390x.h ---*/
+/*---------------------------------------------------------------*/
+
+#endif /* __LIBVEX_PUB_GUEST_S390X_H */
--- valgrind/VEX/pub/libvex_s390x.h
+++ valgrind/VEX/pub/libvex_s390x.h
@@ -0,0 +1,59 @@
+#ifndef __LIBVEX_PUB_S390X_H
+#define __LIBVEX_PUB_S390X_H
+
+/* This file includes definitions for s390.
+
+ It must be suitable for inclusion in assembler source files. */
+
+
+/*--------------------------------------------------------------*/
+/*--- Dedicated registers ---*/
+/*--------------------------------------------------------------*/
+
+#define S390_REGNO_RETURN_VALUE 2
+#define S390_REGNO_DISPATCH_CTR 12 /* Holds VG_(dispatch_ctr) */
+#define S390_REGNO_GUEST_STATE_POINTER 13
+#define S390_REGNO_LINK_REGISTER 14
+#define S390_REGNO_STACK_POINTER 15
+
+
+/*--------------------------------------------------------------*/
+/*--- Offsets in the stack frame allocated by the dispatcher ---*/
+/*--------------------------------------------------------------*/
+
+/* Where client's FPC register is saved. */
+#define S390_OFFSET_SAVED_FPC_C 160+88
+
+/* Where valgrind's FPC register is saved. */
+#define S390_OFFSET_SAVED_FPC_V 160+80
+
+/* Where client code will save the link register before calling a helper. */
+#define S390_OFFSET_SAVED_LR 160+72
+
+/* Location of saved guest state pointer */
+#define S390_OFFSET_SAVED_GSP 160+64
+
+/* Size of frame allocated by VG_(run_innerloop)
+ Need size for
+ 8 FPRs
+ + 2 GPRs (SAVED_GSP and SAVED_LR)
+ + 2 FPCs (SAVED_FPC_C and SAVED_FPC_V).
+
+ Additionally, we need a standard frame for helper functions being called
+ from client code. (See figure 1-16 in zSeries ABI) */
+#define S390_INNERLOOP_FRAME_SIZE ((8+2+2)*8 + 160)
+
+
+/*--------------------------------------------------------------*/
+/*--- Miscellaneous ---*/
+/*--------------------------------------------------------------*/
+
+/* Number of arguments that can be passed in registers */
+#define S390_NUM_GPRPARMS 5
+
+
+/*--------------------------------------------------------------------*/
+/*--- libvex_s390x.h ---*/
+/*--------------------------------------------------------------------*/
+
+#endif /* __LIBVEX_PUB_S390X_H */
--- valgrind/cachegrind/cg-s390x.c
+++ valgrind/cachegrind/cg-s390x.c
@@ -0,0 +1,73 @@
+
+/*--------------------------------------------------------------------*/
+/*--- s390x-specific definitions. cg-s390x.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Cachegrind, a Valgrind tool for cache
+ profiling programs.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Christian Borntraeger */
+
+#if defined(VGA_s390x)
+
+#include "pub_tool_basics.h"
+#include "pub_tool_libcbase.h"
+#include "pub_tool_libcassert.h"
+#include "pub_tool_libcprint.h"
+
+#include "cg_arch.h"
+
+void VG_(configure_caches)(cache_t* I1c, cache_t* D1c, cache_t* L2c,
+ Bool all_caches_clo_defined)
+{
+ // Set caches to z10 default.
+ // See IBM Journal of Research and Development
+ // Issue Date: Jan. 2009
+ // Volume: 53 Issue:1
+ // fixs390: have a table for all available models and check /proc/cpuinfo
+ *I1c = (cache_t) { 65536, 4, 256 };
+ *D1c = (cache_t) { 131072, 8, 256 };
+ *L2c = (cache_t) { 3145728, 12, 256 };
+
+ // Warn if config not completely specified from cmd line. Note that
+ // this message is slightly different from the one we give on x86/AMD64
+ // when auto-detection fails; this lets us filter out this one (which is
+ // not important) in the regression test suite without filtering the
+ // x86/AMD64 one (which we want to see if it ever occurs in the
+ // regression test suite).
+ //
+ // If you change this message, please update
+ // cachegrind/tests/filter_stderr!
+ //
+ if (!all_caches_clo_defined) {
+ VG_(dmsg)("Warning: Cannot auto-detect cache config on s390x, using one "
+ "or more defaults \n");
+ }
+}
+
+#endif
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
--- valgrind/coregrind/m_dispatch/dispatch-s390x-linux.S
+++ valgrind/coregrind/m_dispatch/dispatch-s390x-linux.S
@@ -0,0 +1,401 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The core dispatch loop, for jumping to a code address. ---*/
+/*--- dispatch-s390x-linux.S ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm and Christian Borntraeger */
+
+#include "pub_core_basics_asm.h"
+#include "pub_core_dispatch_asm.h"
+#include "pub_core_transtab_asm.h"
+#include "libvex_guest_offsets.h"
+#include "libvex_s390x.h"
+
+#if defined(VGA_s390x)
+
+/*------------------------------------------------------------*/
+/*--- ---*/
+/*--- The dispatch loop. VG_(run_innerloop) is used to ---*/
+/*--- run all translations except no-redir ones. ---*/
+/*--- ---*/
+/*------------------------------------------------------------*/
+
+/* Convenience definitions for readability */
+#undef SP
+#define SP S390_REGNO_STACK_POINTER
+
+#undef LR
+#define LR S390_REGNO_LINK_REGISTER
+
+/* Location of valgrind's saved FPC register */
+#define S390_LOC_SAVED_FPC_V S390_OFFSET_SAVED_FPC_V(SP)
+
+/* Location of saved guest state pointer */
+#define S390_LOC_SAVED_GSP S390_OFFSET_SAVED_GSP(SP)
+
+/*----------------------------------------------------*/
+/*--- Preamble (set everything up) ---*/
+/*----------------------------------------------------*/
+
+/* signature:
+UWord VG_(run_innerloop) ( void* guest_state, UWord do_profiling );
+*/
+
+.text
+.align 4
+.globl VG_(run_innerloop)
+VG_(run_innerloop):
+ /* r2 holds address of guest_state */
+ /* r3 holds do_profiling (a flag) */
+
+ /* Save gprs ABI: r6...r13 and r15 */
+ stmg %r6,%r15,48(SP)
+
+ /* New stack frame */
+ aghi SP,-S390_INNERLOOP_FRAME_SIZE
+
+ /* Save fprs: ABI: f8...f15 */
+ std %f8,160+0(SP)
+ std %f9,160+8(SP)
+ std %f10,160+16(SP)
+ std %f11,160+24(SP)
+ std %f12,160+32(SP)
+ std %f13,160+40(SP)
+ std %f14,160+48(SP)
+ std %f15,160+56(SP)
+
+ /* Load address of guest state into guest state register (r13) */
+ lgr %r13,%r2
+
+ /* Store address of guest state pointer on stack.
+ It will be needed later because upon return from a VEX translation
+ r13 may contain a special value. So the old value will be used to
+ determine whether r13 contains a special value. */
+ stg %r13,S390_LOC_SAVED_GSP
+
+ /* Save valgrind's FPC on stack so run_innerloop_exit can restore
+ it later . */
+ stfpc S390_LOC_SAVED_FPC_V
+
+ /* Load the FPC the way the client code wants it. I.e. pull the
+ value from the guest state.
+ lfpc OFFSET_s390x_fpc(%r13)
+
+ /* Get the IA from the guest state */
+ lg %r2,OFFSET_s390x_IA(%r13)
+
+ /* Get VG_(dispatch_ctr) -- a 32-bit value -- and store it in a reg */
+ larl %r6,VG_(dispatch_ctr)
+ l S390_REGNO_DISPATCH_CTR,0(%r6)
+
+ /* Fall into main loop (the right one) */
+
+ /* r3 = 1 --> do_profiling. We may trash r3 later on. That's OK,
+ because it's a volatile register (does not need to be preserved). */
+ ltgr %r3,%r3
+ je run_innerloop__dispatch_unprofiled
+ j run_innerloop__dispatch_profiled
+
+/*----------------------------------------------------*/
+/*--- NO-PROFILING (standard) dispatcher ---*/
+/*----------------------------------------------------*/
+
+run_innerloop__dispatch_unprofiled:
+ /* This is the story:
+
+ r2 = IA = next guest address
+ r12 = VG_(dispatch_ctr)
+ r13 = guest state pointer or (upon return from guest code) some
+ special value
+ r15 = stack pointer (as usual)
+ */
+
+ /* Has the guest state pointer been messed with? If yes, exit. */
+ cg %r13,S390_LOC_SAVED_GSP /* r13 = actual guest state pointer */
+ larl %r8, VG_(tt_fast)
+ jne gsp_changed
+
+ /* Save the jump address in the guest state */
+ stg %r2,OFFSET_s390x_IA(%r13)
+
+
+ /* Try a fast lookup in the translation cache */
+ lgr %r7, %r2 /* next guest addr */
+
+ /* Compute offset (not index) into VT_(tt_fast):
+
+ offset = VG_TT_FAST_HASH(addr) * sizeof(FastCacheEntry)
+
+ with VG_TT_FAST_HASH(addr) == (addr >> 1) & VG_TT_FAST_MASK
+ and sizeof(FastCacheEntry) == 16
+
+ offset = ((addr >> 1) & VG_TT_FAST_MASK) << 4
+ */
+ srlg %r7,%r7,1
+ lghi %r5,VG_TT_FAST_MASK
+ ngr %r7,%r5
+ sllg %r7,%r7,4
+
+ /* Set the return address to the beginning of the loop here to
+ have some instruction between setting r7 and using it as an
+ address */
+ larl LR,run_innerloop__dispatch_unprofiled
+
+ /* Are we out of timeslice? If yes, defer to scheduler. */
+ ahi S390_REGNO_DISPATCH_CTR,-1
+ jz counter_is_zero
+
+
+ lg %r10, 0(%r8,%r7) /* .guest */
+ lg %r11, 8(%r8,%r7) /* .host */
+ cgr %r2, %r10
+ jne fast_lookup_failed
+
+ /* Found a match. Call .host.
+ r11 is an address. There we will find the instrumented client code.
+ That code may modify the guest state register r13. The client code
+ will return to the beginning of this loop start by issuing br LR.
+ We can simply branch to the host code */
+ br %r11
+
+
+/*----------------------------------------------------*/
+/*--- PROFILING dispatcher (can be much slower) ---*/
+/*----------------------------------------------------*/
+
+run_innerloop__dispatch_profiled:
+
+ /* Has the guest state pointer been messed with? If yes, exit. */
+ cg %r13,S390_LOC_SAVED_GSP /* r13 = actual guest state pointer */
+ larl %r8, VG_(tt_fast)
+ jne gsp_changed
+
+ /* Save the jump address in the guest state */
+ stg %r2,OFFSET_s390x_IA(%r13)
+
+ /* Try a fast lookup in the translation cache */
+ lgr %r7,%r2 /* next guest addr */
+
+ /* Compute offset (not index) into VT_(tt_fast):
+
+ offset = VG_TT_FAST_HASH(addr) * sizeof(FastCacheEntry)
+
+ with VG_TT_FAST_HASH(addr) == (addr >> 1) & VG_TT_FAST_MASK
+ and sizeof(FastCacheEntry) == 16
+
+ offset = ((addr >> 1) & VG_TT_FAST_MASK) << 4
+ */
+ srlg %r7,%r7,1
+ lghi %r5,VG_TT_FAST_MASK
+ ngr %r7,%r5
+ sllg %r7,%r7,4
+
+ /* Set the return address to the beginning of the loop here to
+ have some instruction between setting r7 and using it as an
+ address */
+ larl LR,run_innerloop__dispatch_profiled
+
+ /* Are we out of timeslice? If yes, defer to scheduler. */
+ ahi S390_REGNO_DISPATCH_CTR,-1
+ jz counter_is_zero
+
+ lg %r10, 0(%r8,%r7) /* .guest */
+ lg %r11, 8(%r8,%r7) /* .host */
+ cgr %r2, %r10
+ jne fast_lookup_failed
+
+ /* sizeof(FastCacheEntry) == 16, sizeof(*UInt)==8 */
+ srlg %r7,%r7,1
+
+ /* we got a hit: VG_(tt_fastN) is guaranteed to point to count */
+ larl %r8, VG_(tt_fastN)
+
+ /* increment bb profile counter */
+ lg %r9,0(%r8,%r7)
+ l %r10,0(%r9)
+ ahi %r10,1
+ st %r10,0(%r9)
+
+ /* Found a match. Call .host.
+ r11 is an address. There we will find the instrumented client code.
+ That code may modify the guest state register r13. The client code
+ will return to the beginning of this loop start by issuing br LR.
+ We can simply branch to the host code */
+ br %r11
+
+/*----------------------------------------------------*/
+/*--- exit points ---*/
+/*----------------------------------------------------*/
+
+gsp_changed:
+ /* Someone messed with the gsp (in r13). Have to
+ defer to scheduler to resolve this. The register
+ holding VG_(dispatch_ctr) is not yet decremented,
+ so no need to increment. */
+
+ /* Update the IA in the guest state */
+ lg %r6,S390_LOC_SAVED_GSP /* r6 = original guest state pointer */
+ stg %r2,OFFSET_s390x_IA(%r6)
+
+ /* Return the special guest state pointer value */
+ lgr %r2, %r13
+ j run_innerloop_exit
+
+
+counter_is_zero:
+ /* IA is up to date */
+
+ /* Back out decrement of the dispatch counter */
+ ahi S390_REGNO_DISPATCH_CTR,1
+
+ /* Set return value for the scheduler */
+ lghi %r2,VG_TRC_INNER_COUNTERZERO
+ j run_innerloop_exit
+
+
+fast_lookup_failed:
+ /* IA is up to date */
+
+ /* Back out decrement of the dispatch counter */
+ ahi S390_REGNO_DISPATCH_CTR,1
+
+ /* Set return value for the scheduler */
+ lghi %r2,VG_TRC_INNER_FASTMISS
+ j run_innerloop_exit
+
+
+ /* All exits from the dispatcher go through here.
+ When we come here r2 holds the return value. */
+run_innerloop_exit:
+
+ /* Restore valgrind's FPC, as client code may have changed it. */
+ lfpc S390_LOC_SAVED_FPC_V
+
+ /* Write ctr to VG_(dispatch_ctr) (=32bit value) */
+ larl %r6,VG_(dispatch_ctr)
+ st S390_REGNO_DISPATCH_CTR,0(%r6)
+
+ /* Restore callee-saved registers... */
+
+ /* Floating-point regs */
+ ld %f8,160+0(SP)
+ ld %f9,160+8(SP)
+ ld %f10,160+16(SP)
+ ld %f11,160+24(SP)
+ ld %f12,160+32(SP)
+ ld %f13,160+40(SP)
+ ld %f14,160+48(SP)
+ ld %f15,160+56(SP)
+
+ /* Remove atack frame */
+ aghi SP,S390_INNERLOOP_FRAME_SIZE
+
+ /* General-purpose regs. This also restores the original link
+ register (r14) and stack pointer (r15). */
+ lmg %r6,%r15,48(SP)
+
+ /* Return */
+ br LR
+
+/*------------------------------------------------------------*/
+/*--- ---*/
+/*--- A special dispatcher, for running no-redir ---*/
+/*--- translations. Just runs the given translation once. ---*/
+/*--- ---*/
+/*------------------------------------------------------------*/
+
+/* signature:
+void VG_(run_a_noredir_translation) ( UWord* argblock );
+*/
+
+/* Run a no-redir translation. argblock points to 4 UWords, 2 to carry args
+ and 2 to carry results:
+ 0: input: ptr to translation
+ 1: input: ptr to guest state
+ 2: output: next guest PC
+ 3: output: guest state pointer afterwards (== thread return code)
+*/
+.text
+.align 4
+.globl VG_(run_a_noredir_translation)
+VG_(run_a_noredir_translation):
+ stmg %r6,%r15,48(SP)
+ aghi SP,-S390_INNERLOOP_FRAME_SIZE
+ std %f8,160+0(SP)
+ std %f9,160+8(SP)
+ std %f10,160+16(SP)
+ std %f11,160+24(SP)
+ std %f12,160+32(SP)
+ std %f13,160+40(SP)
+ std %f14,160+48(SP)
+ std %f15,160+56(SP)
+
+ /* Load address of guest state into guest state register (r13) */
+ lg %r13,8(%r2)
+
+ /* Get the IA */
+ lg %r11,0(%r2)
+
+ /* save r2 (argblock) as it is clobbered */
+ stg %r2,160+64(SP)
+
+ /* the call itself */
+ basr LR,%r11
+
+ /* restore argblock */
+ lg %r1,160+64(SP)
+ /* save the next guest PC */
+ stg %r2,16(%r1)
+
+ /* save the guest state */
+ stg %r13,24(%r1)
+
+ /* Restore Floating-point regs */
+ ld %f8,160+0(SP)
+ ld %f9,160+8(SP)
+ ld %f10,160+16(SP)
+ ld %f11,160+24(SP)
+ ld %f12,160+32(SP)
+ ld %f13,160+40(SP)
+ ld %f14,160+48(SP)
+ ld %f15,160+56(SP)
+
+ aghi SP,S390_INNERLOOP_FRAME_SIZE
+
+ lmg %r6,%r15,48(SP)
+ br %r14
+
+
+/* Let the linker know we don't need an executable stack */
+.section .note.GNU-stack,"",@progbits
+
+#endif /* VGA_s390x */
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
--- valgrind/coregrind/m_sigframe/sigframe-s390x-linux.c
+++ valgrind/coregrind/m_sigframe/sigframe-s390x-linux.c
@@ -0,0 +1,565 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Create/destroy signal delivery frames. ---*/
+/*--- sigframe-s390x-linux.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Christian Borntraeger */
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_aspacemgr.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_machine.h"
+#include "pub_core_options.h"
+#include "pub_core_sigframe.h"
+#include "pub_core_signals.h"
+#include "pub_core_tooliface.h"
+#include "pub_core_trampoline.h"
+
+#if defined(VGA_s390x)
+
+/* This module creates and removes signal frames for signal deliveries
+ on s390x-linux.
+
+ Note, this file contains kernel-specific knowledge in the form of
+ 'struct sigframe' and 'struct rt_sigframe'.
+
+ Either a 'struct sigframe' or a 'struct rtsigframe' is pushed
+ onto the client's stack. This contains a subsidiary
+ vki_ucontext. That holds the vcpu's state across the signal,
+ so that the sighandler can mess with the vcpu state if it
+ really wants.
+*/
+
+#define SET_SIGNAL_GPR(zztst, zzn, zzval) \
+ do { zztst->arch.vex.guest_r##zzn = (unsigned long)(zzval); \
+ VG_TRACK( post_reg_write, Vg_CoreSignal, zztst->tid, \
+ offsetof(VexGuestS390XState,guest_r##zzn), \
+ sizeof(UWord) ); \
+ } while (0)
+
+/*------------------------------------------------------------*/
+/*--- Signal frame layouts ---*/
+/*------------------------------------------------------------*/
+
+// A structure in which to save the application's registers
+// during the execution of signal handlers.
+
+// Linux has 2 signal frame structures: one for normal signal
+// deliveries, and one for SA_SIGINFO deliveries (also known as RT
+// signals).
+//
+// In theory, so long as we get the arguments to the handler function
+// right, it doesn't matter what the exact layout of the rest of the
+// frame is. Unfortunately, things like gcc's exception unwinding
+// make assumptions about the locations of various parts of the frame,
+// so we need to duplicate it exactly.
+
+/* Valgrind-specific parts of the signal frame */
+struct vg_sigframe
+{
+ /* Sanity check word. */
+ UInt magicPI;
+
+ UInt handlerflags; /* flags for signal handler */
+
+
+ /* Safely-saved version of sigNo, as described above. */
+ Int sigNo_private;
+
+ /* XXX This is wrong. Surely we should store the shadow values
+ into the shadow memory behind the actual values? */
+ VexGuestS390XState vex_shadow1;
+ VexGuestS390XState vex_shadow2;
+
+ /* HACK ALERT */
+ VexGuestS390XState vex;
+ /* end HACK ALERT */
+
+ /* saved signal mask to be restored when handler returns */
+ vki_sigset_t mask;
+
+ /* Sanity check word. Is the highest-addressed word; do not
+ move!*/
+ UInt magicE;
+};
+
+#define S390_SYSCALL_SIZE 2
+
+struct sigframe
+{
+ UChar callee_used_stack[__VKI_SIGNAL_FRAMESIZE];
+ struct vki_sigcontext sc;
+ _vki_sigregs sregs;
+ Int sigNo;
+ UChar retcode[S390_SYSCALL_SIZE];
+
+ struct vg_sigframe vg;
+};
+
+struct rt_sigframe
+{
+ UChar callee_used_stack[__VKI_SIGNAL_FRAMESIZE];
+ UChar retcode[S390_SYSCALL_SIZE];
+ struct vki_siginfo info;
+ struct vki_ucontext uc;
+
+ struct vg_sigframe vg;
+};
+
+/*------------------------------------------------------------*/
+/*--- Creating signal frames ---*/
+/*------------------------------------------------------------*/
+
+/* Saves all user-controlled register into a _vki_sigregs structure */
+static void save_sigregs(ThreadState *tst, _vki_sigregs *sigregs)
+{
+ sigregs->regs.gprs[0] = tst->arch.vex.guest_r0;
+ sigregs->regs.gprs[1] = tst->arch.vex.guest_r1;
+ sigregs->regs.gprs[2] = tst->arch.vex.guest_r2;
+ sigregs->regs.gprs[3] = tst->arch.vex.guest_r3;
+ sigregs->regs.gprs[4] = tst->arch.vex.guest_r4;
+ sigregs->regs.gprs[5] = tst->arch.vex.guest_r5;
+ sigregs->regs.gprs[6] = tst->arch.vex.guest_r6;
+ sigregs->regs.gprs[7] = tst->arch.vex.guest_r7;
+ sigregs->regs.gprs[8] = tst->arch.vex.guest_r8;
+ sigregs->regs.gprs[9] = tst->arch.vex.guest_r9;
+ sigregs->regs.gprs[10] = tst->arch.vex.guest_r10;
+ sigregs->regs.gprs[11] = tst->arch.vex.guest_r11;
+ sigregs->regs.gprs[12] = tst->arch.vex.guest_r12;
+ sigregs->regs.gprs[13] = tst->arch.vex.guest_r13;
+ sigregs->regs.gprs[14] = tst->arch.vex.guest_r14;
+ sigregs->regs.gprs[15] = tst->arch.vex.guest_r15;
+
+ sigregs->regs.acrs[0] = tst->arch.vex.guest_a0;
+ sigregs->regs.acrs[1] = tst->arch.vex.guest_a1;
+ sigregs->regs.acrs[2] = tst->arch.vex.guest_a2;
+ sigregs->regs.acrs[3] = tst->arch.vex.guest_a3;
+ sigregs->regs.acrs[4] = tst->arch.vex.guest_a4;
+ sigregs->regs.acrs[5] = tst->arch.vex.guest_a5;
+ sigregs->regs.acrs[6] = tst->arch.vex.guest_a6;
+ sigregs->regs.acrs[7] = tst->arch.vex.guest_a7;
+ sigregs->regs.acrs[8] = tst->arch.vex.guest_a8;
+ sigregs->regs.acrs[9] = tst->arch.vex.guest_a9;
+ sigregs->regs.acrs[10] = tst->arch.vex.guest_a10;
+ sigregs->regs.acrs[11] = tst->arch.vex.guest_a11;
+ sigregs->regs.acrs[12] = tst->arch.vex.guest_a12;
+ sigregs->regs.acrs[13] = tst->arch.vex.guest_a13;
+ sigregs->regs.acrs[14] = tst->arch.vex.guest_a14;
+ sigregs->regs.acrs[15] = tst->arch.vex.guest_a15;
+
+ sigregs->fpregs.fprs[0] = tst->arch.vex.guest_f0;
+ sigregs->fpregs.fprs[1] = tst->arch.vex.guest_f1;
+ sigregs->fpregs.fprs[2] = tst->arch.vex.guest_f2;
+ sigregs->fpregs.fprs[3] = tst->arch.vex.guest_f3;
+ sigregs->fpregs.fprs[4] = tst->arch.vex.guest_f4;
+ sigregs->fpregs.fprs[5] = tst->arch.vex.guest_f5;
+ sigregs->fpregs.fprs[6] = tst->arch.vex.guest_f6;
+ sigregs->fpregs.fprs[7] = tst->arch.vex.guest_f7;
+ sigregs->fpregs.fprs[8] = tst->arch.vex.guest_f8;
+ sigregs->fpregs.fprs[9] = tst->arch.vex.guest_f9;
+ sigregs->fpregs.fprs[10] = tst->arch.vex.guest_f10;
+ sigregs->fpregs.fprs[11] = tst->arch.vex.guest_f11;
+ sigregs->fpregs.fprs[12] = tst->arch.vex.guest_f12;
+ sigregs->fpregs.fprs[13] = tst->arch.vex.guest_f13;
+ sigregs->fpregs.fprs[14] = tst->arch.vex.guest_f14;
+ sigregs->fpregs.fprs[15] = tst->arch.vex.guest_f15;
+ sigregs->fpregs.fpc = tst->arch.vex.guest_fpc;
+
+ sigregs->regs.psw.addr = tst->arch.vex.guest_IA;
+ /* save a sane dummy mask */
+ sigregs->regs.psw.mask = 0x0705000180000000UL;
+}
+
+static void restore_sigregs(ThreadState *tst, _vki_sigregs *sigregs)
+{
+ tst->arch.vex.guest_r0 = sigregs->regs.gprs[0];
+ tst->arch.vex.guest_r1 = sigregs->regs.gprs[1];
+ tst->arch.vex.guest_r2 = sigregs->regs.gprs[2];
+ tst->arch.vex.guest_r3 = sigregs->regs.gprs[3];
+ tst->arch.vex.guest_r4 = sigregs->regs.gprs[4];
+ tst->arch.vex.guest_r5 = sigregs->regs.gprs[5];
+ tst->arch.vex.guest_r6 = sigregs->regs.gprs[6];
+ tst->arch.vex.guest_r7 = sigregs->regs.gprs[7];
+ tst->arch.vex.guest_r8 = sigregs->regs.gprs[8];
+ tst->arch.vex.guest_r9 = sigregs->regs.gprs[9];
+ tst->arch.vex.guest_r10 = sigregs->regs.gprs[10];
+ tst->arch.vex.guest_r11 = sigregs->regs.gprs[11];
+ tst->arch.vex.guest_r12 = sigregs->regs.gprs[12];
+ tst->arch.vex.guest_r13 = sigregs->regs.gprs[13];
+ tst->arch.vex.guest_r14 = sigregs->regs.gprs[14];
+ tst->arch.vex.guest_r15 = sigregs->regs.gprs[15];
+
+ tst->arch.vex.guest_a0 = sigregs->regs.acrs[0];
+ tst->arch.vex.guest_a1 = sigregs->regs.acrs[1];
+ tst->arch.vex.guest_a2 = sigregs->regs.acrs[2];
+ tst->arch.vex.guest_a3 = sigregs->regs.acrs[3];
+ tst->arch.vex.guest_a4 = sigregs->regs.acrs[4];
+ tst->arch.vex.guest_a5 = sigregs->regs.acrs[5];
+ tst->arch.vex.guest_a6 = sigregs->regs.acrs[6];
+ tst->arch.vex.guest_a7 = sigregs->regs.acrs[7];
+ tst->arch.vex.guest_a8 = sigregs->regs.acrs[8];
+ tst->arch.vex.guest_a9 = sigregs->regs.acrs[9];
+ tst->arch.vex.guest_a10 = sigregs->regs.acrs[10];
+ tst->arch.vex.guest_a11 = sigregs->regs.acrs[11];
+ tst->arch.vex.guest_a12 = sigregs->regs.acrs[12];
+ tst->arch.vex.guest_a13 = sigregs->regs.acrs[13];
+ tst->arch.vex.guest_a14 = sigregs->regs.acrs[14];
+ tst->arch.vex.guest_a15 = sigregs->regs.acrs[15];
+
+ tst->arch.vex.guest_f0 = sigregs->fpregs.fprs[0];
+ tst->arch.vex.guest_f1 = sigregs->fpregs.fprs[1];
+ tst->arch.vex.guest_f2 = sigregs->fpregs.fprs[2];
+ tst->arch.vex.guest_f3 = sigregs->fpregs.fprs[3];
+ tst->arch.vex.guest_f4 = sigregs->fpregs.fprs[4];
+ tst->arch.vex.guest_f5 = sigregs->fpregs.fprs[5];
+ tst->arch.vex.guest_f6 = sigregs->fpregs.fprs[6];
+ tst->arch.vex.guest_f7 = sigregs->fpregs.fprs[7];
+ tst->arch.vex.guest_f8 = sigregs->fpregs.fprs[8];
+ tst->arch.vex.guest_f9 = sigregs->fpregs.fprs[9];
+ tst->arch.vex.guest_f10 = sigregs->fpregs.fprs[10];
+ tst->arch.vex.guest_f11 = sigregs->fpregs.fprs[11];
+ tst->arch.vex.guest_f12 = sigregs->fpregs.fprs[12];
+ tst->arch.vex.guest_f13 = sigregs->fpregs.fprs[13];
+ tst->arch.vex.guest_f14 = sigregs->fpregs.fprs[14];
+ tst->arch.vex.guest_f15 = sigregs->fpregs.fprs[15];
+ tst->arch.vex.guest_fpc = sigregs->fpregs.fpc;
+
+ tst->arch.vex.guest_IA = sigregs->regs.psw.addr;
+}
+
+/* Extend the stack segment downwards if needed so as to ensure the
+ new signal frames are mapped to something. Return a Bool
+ indicating whether or not the operation was successful.
+*/
+static Bool extend ( ThreadState *tst, Addr addr, SizeT size )
+{
+ ThreadId tid = tst->tid;
+ NSegment const* stackseg = NULL;
+
+ if (VG_(extend_stack)(addr, tst->client_stack_szB)) {
+ stackseg = VG_(am_find_nsegment)(addr);
+ if (0 && stackseg)
+ VG_(printf)("frame=%#lx seg=%#lx-%#lx\n",
+ addr, stackseg->start, stackseg->end);
+ }
+
+ if (stackseg == NULL || !stackseg->hasR || !stackseg->hasW) {
+ VG_(message)(
+ Vg_UserMsg,
+ "Can't extend stack to %#lx during signal delivery for thread %d:\n",
+ addr, tid);
+ if (stackseg == NULL)
+ VG_(message)(Vg_UserMsg, " no stack segment\n");
+ else
+ VG_(message)(Vg_UserMsg, " too small or bad protection modes\n");
+
+ /* set SIGSEGV to default handler */
+ VG_(set_default_handler)(VKI_SIGSEGV);
+ VG_(synth_fault_mapping)(tid, addr);
+
+ /* The whole process should be about to die, since the default
+ action of SIGSEGV to kill the whole process. */
+ return False;
+ }
+
+ /* For tracking memory events, indicate the entire frame has been
+ allocated. */
+ VG_TRACK( new_mem_stack_signal, addr - VG_STACK_REDZONE_SZB,
+ size + VG_STACK_REDZONE_SZB, tid );
+
+ return True;
+}
+
+
+/* Build the Valgrind-specific part of a signal frame. */
+
+static void build_vg_sigframe(struct vg_sigframe *frame,
+ ThreadState *tst,
+ UInt flags,
+ Int sigNo)
+{
+ frame->sigNo_private = sigNo;
+ frame->magicPI = 0x31415927;
+ frame->vex_shadow1 = tst->arch.vex_shadow1;
+ frame->vex_shadow2 = tst->arch.vex_shadow2;
+ /* HACK ALERT */
+ frame->vex = tst->arch.vex;
+ /* end HACK ALERT */
+ frame->mask = tst->sig_mask;
+ frame->handlerflags = flags;
+ frame->magicE = 0x27182818;
+}
+
+
+static Addr build_sigframe(ThreadState *tst,
+ Addr sp_top_of_frame,
+ const vki_siginfo_t *siginfo,
+ const struct vki_ucontext *siguc,
+ UInt flags,
+ const vki_sigset_t *mask,
+ void *restorer)
+{
+ struct sigframe *frame;
+ Addr sp = sp_top_of_frame;
+
+ vg_assert((flags & VKI_SA_SIGINFO) == 0);
+
+ sp -= sizeof(*frame);
+ sp = VG_ROUNDDN(sp, 16);
+ frame = (struct sigframe *)sp;
+
+ if (!extend(tst, sp, sizeof(*frame)))
+ return sp_top_of_frame;
+
+ /* retcode, sigNo, sc, sregs fields are to be written */
+ VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "signal handler frame",
+ sp, offsetof(struct sigframe, vg) );
+
+ save_sigregs(tst, &frame->sregs);
+
+ frame->sigNo = siginfo->si_signo;
+ frame->sc.sregs = &frame->sregs;
+ VG_(memcpy)(frame->sc.oldmask, mask->sig, sizeof(frame->sc.oldmask));
+
+ if (flags & VKI_SA_RESTORER) {
+ SET_SIGNAL_GPR(tst, 14, restorer);
+ } else {
+ frame->retcode[0] = 0x0a;
+ frame->retcode[1] = __NR_sigreturn;
+ /* This normally should be &frame->recode. but since there
+ might be problems with non-exec stack and we must discard
+ the translation for the on-stack sigreturn we just use the
+ trampoline like x86,ppc. We still fill in the retcode, lets
+ just hope that nobody actually jumps here */
+ SET_SIGNAL_GPR(tst, 14, &VG_(s390x_linux_SUBST_FOR_sigreturn));
+ }
+
+ SET_SIGNAL_GPR(tst, 2, siginfo->si_signo);
+ SET_SIGNAL_GPR(tst, 3, &frame->sc);
+ /* fixs390: we dont fill in trapno and prot_addr in r4 and r5*/
+
+ /* Set up backchain. */
+ *((Addr *) sp) = sp_top_of_frame;
+
+ VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid,
+ sp, offsetof(struct sigframe, vg) );
+
+ build_vg_sigframe(&frame->vg, tst, flags, siginfo->si_signo);
+
+ return sp;
+}
+
+static Addr build_rt_sigframe(ThreadState *tst,
+ Addr sp_top_of_frame,
+ const vki_siginfo_t *siginfo,
+ const struct vki_ucontext *siguc,
+ UInt flags,
+ const vki_sigset_t *mask,
+ void *restorer)
+{
+ struct rt_sigframe *frame;
+ Addr sp = sp_top_of_frame;
+ Int sigNo = siginfo->si_signo;
+
+ vg_assert((flags & VKI_SA_SIGINFO) != 0);
+ sp -= sizeof(*frame);
+ sp = VG_ROUNDDN(sp, 16);
+ frame = (struct rt_sigframe *)sp;
+
+ if (!extend(tst, sp, sizeof(*frame)))
+ return sp_top_of_frame;
+
+ /* retcode, sigNo, sc, sregs fields are to be written */
+ VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "signal handler frame",
+ sp, offsetof(struct rt_sigframe, vg) );
+
+ save_sigregs(tst, &frame->uc.uc_mcontext);
+
+ if (flags & VKI_SA_RESTORER) {
+ frame->retcode[0] = 0;
+ frame->retcode[1] = 0;
+ SET_SIGNAL_GPR(tst, 14, restorer);
+ } else {
+ frame->retcode[0] = 0x0a;
+ frame->retcode[1] = __NR_rt_sigreturn;
+ /* This normally should be &frame->recode. but since there
+ might be problems with non-exec stack and we must discard
+ the translation for the on-stack sigreturn we just use the
+ trampoline like x86,ppc. We still fill in the retcode, lets
+ just hope that nobody actually jumps here */
+ SET_SIGNAL_GPR(tst, 14, &VG_(s390x_linux_SUBST_FOR_rt_sigreturn));
+ }
+
+ VG_(memcpy)(&frame->info, siginfo, sizeof(vki_siginfo_t));
+ frame->uc.uc_flags = 0;
+ frame->uc.uc_link = 0;
+ frame->uc.uc_sigmask = *mask;
+ frame->uc.uc_stack = tst->altstack;
+
+ SET_SIGNAL_GPR(tst, 2, siginfo->si_signo);
+ SET_SIGNAL_GPR(tst, 3, &frame->info);
+ SET_SIGNAL_GPR(tst, 4, &frame->uc);
+
+ /* Set up backchain. */
+ *((Addr *) sp) = sp_top_of_frame;
+
+ VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid,
+ sp, offsetof(struct rt_sigframe, vg) );
+
+ build_vg_sigframe(&frame->vg, tst, flags, sigNo);
+ return sp;
+}
+
+/* EXPORTED */
+void VG_(sigframe_create)( ThreadId tid,
+ Addr sp_top_of_frame,
+ const vki_siginfo_t *siginfo,
+ const struct vki_ucontext *siguc,
+ void *handler,
+ UInt flags,
+ const vki_sigset_t *mask,
+ void *restorer )
+{
+ Addr sp;
+ ThreadState* tst = VG_(get_ThreadState)(tid);
+
+ if (flags & VKI_SA_SIGINFO)
+ sp = build_rt_sigframe(tst, sp_top_of_frame, siginfo, siguc,
+ flags, mask, restorer);
+ else
+ sp = build_sigframe(tst, sp_top_of_frame, siginfo, siguc,
+ flags, mask, restorer);
+
+ /* Set the thread so it will next run the handler. */
+ VG_(set_SP)(tid, sp);
+ VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_STACK_PTR, sizeof(Addr));
+
+ tst->arch.vex.guest_IA = (Addr) handler;
+ /* We might have interrupted a repeating instruction that uses the guest
+ counter. Since our VEX requires that a new instruction will see a
+ guest counter == 0, we have to set it here. The old value will be
+ restored by restore_vg_sigframe. */
+ tst->arch.vex.guest_counter = 0;
+ /* This thread needs to be marked runnable, but we leave that the
+ caller to do. */
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Destroying signal frames ---*/
+/*------------------------------------------------------------*/
+
+/* Return False and don't do anything, just set the client to take a
+ segfault, if it looks like the frame is corrupted. */
+static
+Bool restore_vg_sigframe ( ThreadState *tst,
+ struct vg_sigframe *frame, Int *sigNo )
+{
+ if (frame->magicPI != 0x31415927 ||
+ frame->magicE != 0x27182818) {
+ VG_(message)(Vg_UserMsg, "Thread %d return signal frame "
+ "corrupted. Killing process.\n",
+ tst->tid);
+ VG_(set_default_handler)(VKI_SIGSEGV);
+ VG_(synth_fault)(tst->tid);
+ *sigNo = VKI_SIGSEGV;
+ return False;
+ }
+ tst->sig_mask = frame->mask;
+ tst->tmp_sig_mask = frame->mask;
+ tst->arch.vex_shadow1 = frame->vex_shadow1;
+ tst->arch.vex_shadow2 = frame->vex_shadow2;
+ /* HACK ALERT */
+ tst->arch.vex = frame->vex;
+ /* end HACK ALERT */
+ *sigNo = frame->sigNo_private;
+ return True;
+}
+
+static
+SizeT restore_sigframe ( ThreadState *tst,
+ struct sigframe *frame, Int *sigNo )
+{
+ if (restore_vg_sigframe(tst, &frame->vg, sigNo))
+ restore_sigregs(tst, frame->sc.sregs);
+
+ return sizeof(*frame);
+}
+
+static
+SizeT restore_rt_sigframe ( ThreadState *tst,
+ struct rt_sigframe *frame, Int *sigNo )
+{
+ if (restore_vg_sigframe(tst, &frame->vg, sigNo)) {
+ restore_sigregs(tst, &frame->uc.uc_mcontext);
+ }
+ return sizeof(*frame);
+}
+
+
+/* EXPORTED */
+void VG_(sigframe_destroy)( ThreadId tid, Bool isRT )
+{
+ Addr sp;
+ ThreadState* tst;
+ SizeT size;
+ Int sigNo;
+
+ tst = VG_(get_ThreadState)(tid);
+
+ /* Correctly reestablish the frame base address. */
+ sp = tst->arch.vex.guest_SP;
+
+ if (!isRT)
+ size = restore_sigframe(tst, (struct sigframe *)sp, &sigNo);
+ else
+ size = restore_rt_sigframe(tst, (struct rt_sigframe *)sp, &sigNo);
+
+ VG_TRACK( die_mem_stack_signal, sp - VG_STACK_REDZONE_SZB,
+ size + VG_STACK_REDZONE_SZB );
+
+ if (VG_(clo_trace_signals))
+ VG_(message)(
+ Vg_DebugMsg,
+ "VG_(sigframe_destroy) (thread %d): isRT=%d valid magic; IP=%#llx\n",
+ tid, isRT, tst->arch.vex.guest_IA);
+
+ /* tell the tools */
+ VG_TRACK( post_deliver_signal, tid, sigNo );
+}
+
+#endif /* VGA_s390x */
+
+/*--------------------------------------------------------------------*/
+/*--- end sigframe-s390x-linux.c ---*/
+/*--------------------------------------------------------------------*/
--- valgrind/coregrind/m_syswrap/syscall-s390x-linux.S
+++ valgrind/coregrind/m_syswrap/syscall-s390x-linux.S
@@ -0,0 +1,172 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Support for doing system calls. syscall-s390x-linux.S ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Christian Borntraeger */
+
+#include "pub_core_basics_asm.h"
+#include "pub_core_vkiscnums_asm.h"
+#include "libvex_guest_offsets.h"
+
+#if defined(VGA_s390x)
+
+/*----------------------------------------------------------------*/
+/*
+ Perform a syscall for the client. This will run a syscall
+ with the client's specific per-thread signal mask.
+
+ The structure of this function is such that, if the syscall is
+ interrupted by a signal, we can determine exactly what
+ execution state we were in with respect to the execution of
+ the syscall by examining the value of NIP in the signal
+ handler. This means that we can always do the appropriate
+ thing to precisely emulate the kernel's signal/syscall
+ interactions.
+
+ The syscall number is taken from the argument, since the syscall
+ number can be encoded in the svc instruction itself.
+ The syscall result is written back to guest register r2.
+
+ Returns 0 if the syscall was successfully called (even if the
+ syscall itself failed), or a nonzero error code in the lowest
+ 8 bits if one of the sigprocmasks failed (there's no way to
+ determine which one failed). And there's no obvious way to
+ recover from that either, but nevertheless we want to know.
+
+ VG_(fixup_guest_state_after_syscall_interrupted) does the
+ thread state fixup in the case where we were interrupted by a
+ signal.
+
+ Prototype:
+
+ UWord ML_(do_syscall_for_client_WRK)(
+ Int syscallno, // r2
+ void* guest_state, // r3
+ const vki_sigset_t *sysmask, // r4
+ const vki_sigset_t *postmask, // r5
+ Int nsigwords) // r6
+*/
+/* from vki_arch.h */
+#define VKI_SIG_SETMASK 2
+
+#define SP_SAVE 16
+#define SP_R2 SP_SAVE + 0*8
+#define SP_R3 SP_SAVE + 1*8
+#define SP_R4 SP_SAVE + 2*8
+#define SP_R5 SP_SAVE + 3*8
+#define SP_R6 SP_SAVE + 4*8
+#define SP_R7 SP_SAVE + 5*8
+#define SP_R8 SP_SAVE + 6*8
+#define SP_R9 SP_SAVE + 7*8
+
+.align 4
+.globl ML_(do_syscall_for_client_WRK)
+ML_(do_syscall_for_client_WRK):
+1: /* Even though we can't take a signal until the sigprocmask completes,
+ start the range early.
+ If eip is in the range [1,2), the syscall hasn't been started yet */
+
+ /* Set the signal mask which should be current during the syscall. */
+ /* Save and restore all the parameters and all the registers that
+ we clobber (r6-r9) */
+ stmg %r2,%r9, SP_R2(%r15)
+
+ lghi %r2, VKI_SIG_SETMASK /* how */
+ lgr %r3, %r4 /* sysmask */
+ lgr %r4, %r5 /* postmask */
+ lgr %r5, %r6 /* nsigwords */
+ svc __NR_rt_sigprocmask
+ cghi %r2, 0x0
+ jne 7f /* sigprocmask failed */
+
+ /* OK, that worked. Now do the syscall proper. */
+ lg %r9, SP_R3(%r15) /* guest state --> r9 */
+ lg %r2, OFFSET_s390x_r2(%r9) /* guest r2 --> real r2 */
+ lg %r3, OFFSET_s390x_r3(%r9) /* guest r3 --> real r3 */
+ lg %r4, OFFSET_s390x_r4(%r9) /* guest r4 --> real r4 */
+ lg %r5, OFFSET_s390x_r5(%r9) /* guest r5 --> real r5 */
+ lg %r6, OFFSET_s390x_r6(%r9) /* guest r6 --> real r6 */
+ lg %r7, OFFSET_s390x_r7(%r9) /* guest r7 --> real r7 */
+ lg %r1, SP_R2(%r15) /* syscallno -> r1 */
+
+2: svc 0
+
+3:
+ stg %r2, OFFSET_s390x_r2(%r9)
+
+4: /* Re-block signals. If eip is in [4,5), then the syscall
+ is complete and we needn't worry about it. */
+ lghi %r2, VKI_SIG_SETMASK /* how */
+ lg %r3, SP_R5(%r15) /* postmask */
+ lghi %r4, 0x0 /* NULL */
+ lg %r5, SP_R6(%r15) /* nsigwords */
+ svc __NR_rt_sigprocmask
+ cghi %r2, 0x0
+ jne 7f /* sigprocmask failed */
+
+5: /* everyting ok. return 0 and restore the call-saved
+ registers, that we have clobbered */
+ lghi %r2, 0x0
+ lmg %r6,%r9, SP_R6(%r15)
+ br %r14
+
+7: /* some problem. return 0x8000 | error and restore the call-saved
+ registers we have clobbered. */
+ nill %r2, 0x7fff
+ oill %r2, 0x8000
+ lmg %r6,%r9, SP_R6(%r15)
+ br %r14
+
+.section .rodata
+/* export the ranges so that
+ VG_(fixup_guest_state_after_syscall_interrupted) can do the
+ right thing */
+
+.globl ML_(blksys_setup)
+.globl ML_(blksys_restart)
+.globl ML_(blksys_complete)
+.globl ML_(blksys_committed)
+.globl ML_(blksys_finished)
+
+/* the compiler can assume that 8 byte data elements are aligned on 8 byte */
+.align 8
+ML_(blksys_setup): .quad 1b
+ML_(blksys_restart): .quad 2b
+ML_(blksys_complete): .quad 3b
+ML_(blksys_committed): .quad 4b
+ML_(blksys_finished): .quad 5b
+.previous
+
+/* Let the linker know we don't need an executable stack */
+.section .note.GNU-stack,"",@progbits
+
+#endif /* VGA_s390x */
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
--- valgrind/coregrind/m_syswrap/syswrap-s390x-linux.c
+++ valgrind/coregrind/m_syswrap/syswrap-s390x-linux.c
@@ -0,0 +1,1524 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Platform-specific syscalls stuff. syswrap-s390x-linux.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Christian Borntraeger */
+
+#if defined(VGP_s390x_linux)
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_aspacemgr.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_libcproc.h"
+#include "pub_core_libcsignal.h"
+#include "pub_core_mallocfree.h"
+#include "pub_core_options.h"
+#include "pub_core_scheduler.h"
+#include "pub_core_sigframe.h" // For VG_(sigframe_destroy)()
+#include "pub_core_signals.h"
+#include "pub_core_syscall.h"
+#include "pub_core_syswrap.h"
+#include "pub_core_tooliface.h"
+#include "pub_core_stacks.h" // VG_(register_stack)
+
+#include "priv_types_n_macros.h"
+#include "priv_syswrap-generic.h" /* for decls of generic wrappers */
+#include "priv_syswrap-linux.h" /* for decls of linux-ish wrappers */
+#include "priv_syswrap-linux-variants.h" /* decls of linux variant wrappers */
+#include "priv_syswrap-main.h"
+
+
+/* ---------------------------------------------------------------------
+ clone() handling
+ ------------------------------------------------------------------ */
+
+/* Call f(arg1), but first switch stacks, using 'stack' as the new
+ stack, and use 'retaddr' as f's return-to address. Also, clear all
+ the integer registers before entering f.
+ Thought: Why are we clearing the GPRs ? The callee pointed to by f
+ is a regular C function which will play by the ABI rules. So there is
+ no need to zero out the GPRs. If we assumed that f accesses registers at
+ will, then it would make sense to create a defined register state.
+ But then, why only for the GPRs and not the FPRs ? */
+__attribute__((noreturn))
+void ML_(call_on_new_stack_0_1) ( Addr stack,
+ Addr retaddr,
+ void (*f)(Word),
+ Word arg1 );
+/* Upon entering this function we have the following setup:
+ r2 = stack
+ r3 = retaddr
+ r4 = f_desc
+ r5 = arg1
+*/
+asm(
+ ".text\n"
+ ".align 4\n"
+ ".globl vgModuleLocal_call_on_new_stack_0_1\n"
+ ".type vgModuleLocal_call_on_new_stack_0_1, @function\n"
+ "vgModuleLocal_call_on_new_stack_0_1:\n"
+ " lgr %r15,%r2\n" // stack to r15
+ " lgr %r14,%r3\n" // retaddr to r14
+ " lgr %r2,%r5\n" // arg1 to r2
+ // zero all gprs to get a defined state
+ " lghi %r0,0\n"
+ " lghi %r1,0\n"
+ // r2 holds the argument for the callee
+ " lghi %r3,0\n"
+ // r4 holds the callee address
+ " lghi %r5,0\n"
+ " lghi %r6,0\n"
+ " lghi %r7,0\n"
+ " lghi %r8,0\n"
+ " lghi %r9,0\n"
+ " lghi %r10,0\n"
+ " lghi %r11,0\n"
+ " lghi %r12,0\n"
+ " lghi %r13,0\n"
+ // r14 holds the return address for the callee
+ // r15 is the stack pointer
+ " br %r4\n" // jump to f
+ ".previous\n"
+ );
+
+/*
+ Perform a clone system call. clone is strange because it has
+ fork()-like return-twice semantics, so it needs special
+ handling here.
+
+ Upon entry, we have:
+ void* child_stack in r2
+ long flags in r3
+ int* parent_tid in r4
+ int* child_tid in r5
+ int* child_tid in r6
+ Word (*fn)(void *) 160(r15)
+ void *arg 168(r15)
+
+ System call requires:
+ void* child_stack in r2 (sc arg1)
+ long flags in r3 (sc arg2)
+ int* parent_tid in r4 (sc arg3)
+ int* child_tid in r5 (sc arg4)
+ void* tlsaddr in r6 (sc arg5)
+
+ Returns a ULong encoded as: top half is %cr following syscall,
+ low half is syscall return value (r3).
+ */
+#define __NR_CLONE VG_STRINGIFY(__NR_clone)
+#define __NR_EXIT VG_STRINGIFY(__NR_exit)
+
+extern
+ULong do_syscall_clone_s390x_linux ( void *stack,
+ ULong flags,
+ Int *child_tid,
+ Int *parent_tid,
+ Addr tlsaddr,
+ Word (*fn)(void *),
+ void *arg);
+asm(
+ " .text\n"
+ " .align 4\n"
+ "do_syscall_clone_s390x_linux:\n"
+ " lg %r1, 160(%r15)\n" // save fn from parent stack into r1
+ " lg %r0, 168(%r15)\n" // save arg from parent stack into r0
+ " aghi %r2, -160\n" // create stack frame for child
+ // all syscall parameters are already in place (r2-r6)
+ " svc " __NR_CLONE"\n" // clone()
+ " ltgr %r2,%r2\n" // child if retval == 0
+ " jne 1f\n"
+
+ // CHILD - call thread function
+ " lgr %r2, %r0\n" // get arg from r0
+ " basr %r14,%r1\n" // call fn
+
+ // exit. The result is already in r2
+ " svc " __NR_EXIT"\n"
+
+ // Exit returned?!
+ " j +2\n"
+
+ "1:\n" // PARENT or ERROR
+ " br %r14\n"
+ ".previous\n"
+);
+
+#undef __NR_CLONE
+#undef __NR_EXIT
+
+void VG_(cleanup_thread) ( ThreadArchState* arch )
+{
+ /* only used on x86 for descriptor tables */
+}
+
+static void setup_child ( /*OUT*/ ThreadArchState *child,
+ /*IN*/ ThreadArchState *parent )
+{
+ /* We inherit our parent's guest state. */
+ child->vex = parent->vex;
+ child->vex_shadow1 = parent->vex_shadow1;
+ child->vex_shadow2 = parent->vex_shadow2;
+}
+
+
+/*
+ When a client clones, we need to keep track of the new thread. This means:
+ 1. allocate a ThreadId+ThreadState+stack for the the thread
+
+ 2. initialize the thread's new VCPU state
+
+ 3. create the thread using the same args as the client requested,
+ but using the scheduler entrypoint for IP, and a separate stack
+ for SP.
+ */
+static SysRes do_clone ( ThreadId ptid,
+ Addr sp, ULong flags,
+ Int *parent_tidptr,
+ Int *child_tidptr,
+ Addr tlsaddr)
+{
+ static const Bool debug = False;
+
+ ThreadId ctid = VG_(alloc_ThreadState)();
+ ThreadState* ptst = VG_(get_ThreadState)(ptid);
+ ThreadState* ctst = VG_(get_ThreadState)(ctid);
+ UWord* stack;
+ NSegment const* seg;
+ SysRes res;
+ ULong r2;
+ vki_sigset_t blockall, savedmask;
+
+ VG_(sigfillset)(&blockall);
+
+ vg_assert(VG_(is_running_thread)(ptid));
+ vg_assert(VG_(is_valid_tid)(ctid));
+
+ stack = (UWord*)ML_(allocstack)(ctid);
+ if (stack == NULL) {
+ res = VG_(mk_SysRes_Error)( VKI_ENOMEM );
+ goto out;
+ }
+
+ /* Copy register state
+
+ Both parent and child return to the same place, and the code
+ following the clone syscall works out which is which, so we
+ don't need to worry about it.
+
+ The parent gets the child's new tid returned from clone, but the
+ child gets 0.
+
+ If the clone call specifies a NULL sp for the new thread, then
+ it actually gets a copy of the parent's sp.
+ */
+ setup_child( &ctst->arch, &ptst->arch );
+
+ /* Make sys_clone appear to have returned Success(0) in the
+ child. */
+ ctst->arch.vex.guest_r2 = 0;
+
+ if (sp != 0)
+ ctst->arch.vex.guest_r15 = sp;
+
+ ctst->os_state.parent = ptid;
+
+ /* inherit signal mask */
+ ctst->sig_mask = ptst->sig_mask;
+ ctst->tmp_sig_mask = ptst->sig_mask;
+
+ /* have the parents thread group */
+ ctst->os_state.threadgroup = ptst->os_state.threadgroup;
+
+ /* We don't really know where the client stack is, because its
+ allocated by the client. The best we can do is look at the
+ memory mappings and try to derive some useful information. We
+ assume that esp starts near its highest possible value, and can
+ only go down to the start of the mmaped segment. */
+ seg = VG_(am_find_nsegment)((Addr)sp);
+ if (seg && seg->kind != SkResvn) {
+ ctst->client_stack_highest_word = (Addr)VG_PGROUNDUP(sp);
+ ctst->client_stack_szB = ctst->client_stack_highest_word - seg->start;
+
+ VG_(register_stack)(seg->start, ctst->client_stack_highest_word);
+
+ if (debug)
+ VG_(printf)("tid %d: guessed client stack range %#lx-%#lx\n",
+ ctid, seg->start, VG_PGROUNDUP(sp));
+ } else {
+ VG_(message)(Vg_UserMsg,
+ "!? New thread %d starts with SP(%#lx) unmapped\n",
+ ctid, sp);
+ ctst->client_stack_szB = 0;
+ }
+
+ /* Assume the clone will succeed, and tell any tool that wants to
+ know that this thread has come into existence. If the clone
+ fails, we'll send out a ll_exit notification for it at the out:
+ label below, to clean up. */
+ VG_TRACK ( pre_thread_ll_create, ptid, ctid );
+
+ if (flags & VKI_CLONE_SETTLS) {
+ if (debug)
+ VG_(printf)("clone child has SETTLS: tls at %#lx\n", tlsaddr);
+ ctst->arch.vex.guest_a0 = (UInt) (tlsaddr >> 32);
+ ctst->arch.vex.guest_a1 = (UInt) tlsaddr;
+ }
+ flags &= ~VKI_CLONE_SETTLS;
+
+ /* start the thread with everything blocked */
+ VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, &savedmask);
+
+ /* Create the new thread */
+ r2 = do_syscall_clone_s390x_linux(
+ stack, flags, child_tidptr, parent_tidptr, tlsaddr,
+ ML_(start_thread_NORETURN), &VG_(threads)[ctid]);
+
+ res = VG_(mk_SysRes_s390x_linux)( r2 );
+
+ VG_(sigprocmask)(VKI_SIG_SETMASK, &savedmask, NULL);
+
+ out:
+ if (sr_isError(res)) {
+ /* clone failed */
+ ctst->status = VgTs_Empty;
+ /* oops. Better tell the tool the thread exited in a hurry :-) */
+ VG_TRACK( pre_thread_ll_exit, ctid );
+ }
+
+ return res;
+
+}
+
+
+
+/* ---------------------------------------------------------------------
+ PRE/POST wrappers for s390x/Linux-specific syscalls
+ ------------------------------------------------------------------ */
+
+#define PRE(name) DEFN_PRE_TEMPLATE(s390x_linux, name)
+#define POST(name) DEFN_POST_TEMPLATE(s390x_linux, name)
+
+/* Add prototypes for the wrappers declared here, so that gcc doesn't
+ harass us for not having prototypes. Really this is a kludge --
+ the right thing to do is to make these wrappers 'static' since they
+ aren't visible outside this file, but that requires even more macro
+ magic. */
+
+DECL_TEMPLATE(s390x_linux, sys_ptrace);
+DECL_TEMPLATE(s390x_linux, sys_socketcall);
+DECL_TEMPLATE(s390x_linux, sys_mmap);
+DECL_TEMPLATE(s390x_linux, sys_ipc);
+DECL_TEMPLATE(s390x_linux, sys_clone);
+DECL_TEMPLATE(s390x_linux, sys_sigreturn);
+DECL_TEMPLATE(s390x_linux, sys_rt_sigreturn);
+DECL_TEMPLATE(s390x_linux, sys_fadvise64);
+
+// PEEK TEXT,DATA and USER are common to all architectures
+// PEEKUSR_AREA and POKEUSR_AREA are special, having a memory area
+// containing the real addr, data, and len field pointed to by ARG3
+// instead of ARG4
+PRE(sys_ptrace)
+{
+ PRINT("sys_ptrace ( %ld, %ld, %#lx, %#lx )", ARG1,ARG2,ARG3,ARG4);
+ PRE_REG_READ4(int, "ptrace",
+ long, request, long, pid, long, addr, long, data);
+ switch (ARG1) {
+ case VKI_PTRACE_PEEKTEXT:
+ case VKI_PTRACE_PEEKDATA:
+ case VKI_PTRACE_PEEKUSR:
+ PRE_MEM_WRITE( "ptrace(peek)", ARG4,
+ sizeof (long));
+ break;
+ case VKI_PTRACE_GETEVENTMSG:
+ PRE_MEM_WRITE( "ptrace(geteventmsg)", ARG4, sizeof(unsigned long));
+ break;
+ case VKI_PTRACE_GETSIGINFO:
+ PRE_MEM_WRITE( "ptrace(getsiginfo)", ARG4, sizeof(vki_siginfo_t));
+ break;
+ case VKI_PTRACE_SETSIGINFO:
+ PRE_MEM_READ( "ptrace(setsiginfo)", ARG4, sizeof(vki_siginfo_t));
+ break;
+ case VKI_PTRACE_PEEKUSR_AREA:
+ {
+ vki_ptrace_area *pa;
+
+ /* Reads a part of the user area into the memory at pa->process_addr */
+ pa = (vki_ptrace_area *) ARG3;
+ PRE_MEM_READ("ptrace(peekusrarea ptrace_area->len)",
+ (unsigned long) &pa->vki_len, sizeof(pa->vki_len));
+ PRE_MEM_READ("ptrace(peekusrarea ptrace_area->kernel_addr)",
+ (unsigned long) &pa->vki_kernel_addr, sizeof(pa->vki_kernel_addr));
+ PRE_MEM_READ("ptrace(peekusrarea ptrace_area->process_addr)",
+ (unsigned long) &pa->vki_process_addr, sizeof(pa->vki_process_addr));
+ PRE_MEM_WRITE("ptrace(peekusrarea *(ptrace_area->process_addr))",
+ pa->vki_process_addr, pa->vki_len);
+ break;
+ }
+ case VKI_PTRACE_POKEUSR_AREA:
+ {
+ vki_ptrace_area *pa;
+
+ /* Updates a part of the user area from the memory at pa->process_addr */
+ pa = (vki_ptrace_area *) ARG3;
+ PRE_MEM_READ("ptrace(pokeusrarea ptrace_area->len)",
+ (unsigned long) &pa->vki_len, sizeof(pa->vki_len));
+ PRE_MEM_READ("ptrace(pokeusrarea ptrace_area->kernel_addr)",
+ (unsigned long) &pa->vki_kernel_addr, sizeof(pa->vki_kernel_addr));
+ PRE_MEM_READ("ptrace(pokeusrarea ptrace_area->process_addr)",
+ (unsigned long) &pa->vki_process_addr, sizeof(pa->vki_process_addr));
+ PRE_MEM_READ("ptrace(pokeusrarea *(ptrace_area->process_addr))",
+ pa->vki_process_addr, pa->vki_len);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+POST(sys_ptrace)
+{
+ switch (ARG1) {
+ case VKI_PTRACE_PEEKTEXT:
+ case VKI_PTRACE_PEEKDATA:
+ case VKI_PTRACE_PEEKUSR:
+ POST_MEM_WRITE( ARG4, sizeof (long));
+ break;
+ case VKI_PTRACE_GETEVENTMSG:
+ POST_MEM_WRITE( ARG4, sizeof(unsigned long));
+ break;
+ case VKI_PTRACE_GETSIGINFO:
+ /* XXX: This is a simplification. Different parts of the
+ * siginfo_t are valid depending on the type of signal.
+ */
+ POST_MEM_WRITE( ARG4, sizeof(vki_siginfo_t));
+ break;
+ case VKI_PTRACE_PEEKUSR_AREA:
+ {
+ vki_ptrace_area *pa;
+
+ pa = (vki_ptrace_area *) ARG3;
+ POST_MEM_WRITE(pa->vki_process_addr, pa->vki_len);
+ }
+ default:
+ break;
+ }
+}
+
+
+PRE(sys_socketcall)
+{
+# define ARG2_0 (((UWord*)ARG2)[0])
+# define ARG2_1 (((UWord*)ARG2)[1])
+# define ARG2_2 (((UWord*)ARG2)[2])
+# define ARG2_3 (((UWord*)ARG2)[3])
+# define ARG2_4 (((UWord*)ARG2)[4])
+# define ARG2_5 (((UWord*)ARG2)[5])
+
+ *flags |= SfMayBlock;
+ PRINT("sys_socketcall ( %ld, %#lx )",ARG1,ARG2);
+ PRE_REG_READ2(long, "socketcall", int, call, unsigned long *, args);
+
+ switch (ARG1 /* request */) {
+
+ case VKI_SYS_SOCKETPAIR:
+ /* int socketpair(int d, int type, int protocol, int sv[2]); */
+ PRE_MEM_READ( "socketcall.socketpair(args)", ARG2, 4*sizeof(Addr) );
+ if (!ML_(valid_client_addr)(ARG2, 4*sizeof(Addr), tid, NULL)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ break;
+ }
+ ML_(generic_PRE_sys_socketpair)( tid, ARG2_0, ARG2_1, ARG2_2, ARG2_3 );
+ break;
+
+ case VKI_SYS_SOCKET:
+ /* int socket(int domain, int type, int protocol); */
+ PRE_MEM_READ( "socketcall.socket(args)", ARG2, 3*sizeof(Addr) );
+ if (!ML_(valid_client_addr)(ARG2, 3*sizeof(Addr), tid, NULL)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ break;
+ }
+ break;
+
+ case VKI_SYS_BIND:
+ /* int bind(int sockfd, struct sockaddr *my_addr,
+ int addrlen); */
+ PRE_MEM_READ( "socketcall.bind(args)", ARG2, 3*sizeof(Addr) );
+ if (!ML_(valid_client_addr)(ARG2, 3*sizeof(Addr), tid, NULL)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ break;
+ }
+ ML_(generic_PRE_sys_bind)( tid, ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_LISTEN:
+ /* int listen(int s, int backlog); */
+ PRE_MEM_READ( "socketcall.listen(args)", ARG2, 2*sizeof(Addr) );
+ if (!ML_(valid_client_addr)(ARG2, 2*sizeof(Addr), tid, NULL)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ break;
+ }
+ break;
+
+ case VKI_SYS_ACCEPT: {
+ /* int accept(int s, struct sockaddr *addr, int *addrlen); */
+ PRE_MEM_READ( "socketcall.accept(args)", ARG2, 3*sizeof(Addr) );
+ if (!ML_(valid_client_addr)(ARG2, 3*sizeof(Addr), tid, NULL)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ break;
+ }
+ ML_(generic_PRE_sys_accept)( tid, ARG2_0, ARG2_1, ARG2_2 );
+ break;
+ }
+
+ case VKI_SYS_SENDTO:
+ /* int sendto(int s, const void *msg, int len,
+ unsigned int flags,
+ const struct sockaddr *to, int tolen); */
+ PRE_MEM_READ( "socketcall.sendto(args)", ARG2, 6*sizeof(Addr) );
+ if (!ML_(valid_client_addr)(ARG2, 6*sizeof(Addr), tid, NULL)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ break;
+ }
+ ML_(generic_PRE_sys_sendto)( tid, ARG2_0, ARG2_1, ARG2_2,
+ ARG2_3, ARG2_4, ARG2_5 );
+ break;
+
+ case VKI_SYS_SEND:
+ /* int send(int s, const void *msg, size_t len, int flags); */
+ PRE_MEM_READ( "socketcall.send(args)", ARG2, 4*sizeof(Addr) );
+ if (!ML_(valid_client_addr)(ARG2, 4*sizeof(Addr), tid, NULL)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ break;
+ }
+ ML_(generic_PRE_sys_send)( tid, ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_RECVFROM:
+ /* int recvfrom(int s, void *buf, int len, unsigned int flags,
+ struct sockaddr *from, int *fromlen); */
+ PRE_MEM_READ( "socketcall.recvfrom(args)", ARG2, 6*sizeof(Addr) );
+ if (!ML_(valid_client_addr)(ARG2, 6*sizeof(Addr), tid, NULL)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ break;
+ }
+ ML_(generic_PRE_sys_recvfrom)( tid, ARG2_0, ARG2_1, ARG2_2,
+ ARG2_3, ARG2_4, ARG2_5 );
+ break;
+
+ case VKI_SYS_RECV:
+ /* int recv(int s, void *buf, int len, unsigned int flags); */
+ /* man 2 recv says:
+ The recv call is normally used only on a connected socket
+ (see connect(2)) and is identical to recvfrom with a NULL
+ from parameter.
+ */
+ PRE_MEM_READ( "socketcall.recv(args)", ARG2, 4*sizeof(Addr) );
+ if (!ML_(valid_client_addr)(ARG2, 4*sizeof(Addr), tid, NULL)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ break;
+ }
+ ML_(generic_PRE_sys_recv)( tid, ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_CONNECT:
+ /* int connect(int sockfd,
+ struct sockaddr *serv_addr, int addrlen ); */
+ PRE_MEM_READ( "socketcall.connect(args)", ARG2, 3*sizeof(Addr) );
+ if (!ML_(valid_client_addr)(ARG2, 3*sizeof(Addr), tid, NULL)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ break;
+ }
+ ML_(generic_PRE_sys_connect)( tid, ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_SETSOCKOPT:
+ /* int setsockopt(int s, int level, int optname,
+ const void *optval, int optlen); */
+ PRE_MEM_READ( "socketcall.setsockopt(args)", ARG2, 5*sizeof(Addr) );
+ if (!ML_(valid_client_addr)(ARG2, 5*sizeof(Addr), tid, NULL)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ break;
+ }
+ ML_(generic_PRE_sys_setsockopt)( tid, ARG2_0, ARG2_1, ARG2_2,
+ ARG2_3, ARG2_4 );
+ break;
+
+ case VKI_SYS_GETSOCKOPT:
+ /* int getsockopt(int s, int level, int optname,
+ void *optval, socklen_t *optlen); */
+ PRE_MEM_READ( "socketcall.getsockopt(args)", ARG2, 5*sizeof(Addr) );
+ if (!ML_(valid_client_addr)(ARG2, 5*sizeof(Addr), tid, NULL)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ break;
+ }
+ ML_(linux_PRE_sys_getsockopt)( tid, ARG2_0, ARG2_1, ARG2_2,
+ ARG2_3, ARG2_4 );
+ break;
+
+ case VKI_SYS_GETSOCKNAME:
+ /* int getsockname(int s, struct sockaddr* name, int* namelen) */
+ PRE_MEM_READ( "socketcall.getsockname(args)", ARG2, 3*sizeof(Addr) );
+ if (!ML_(valid_client_addr)(ARG2, 3*sizeof(Addr), tid, NULL)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ break;
+ }
+ ML_(generic_PRE_sys_getsockname)( tid, ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_GETPEERNAME:
+ /* int getpeername(int s, struct sockaddr* name, int* namelen) */
+ PRE_MEM_READ( "socketcall.getpeername(args)", ARG2, 3*sizeof(Addr) );
+ if (!ML_(valid_client_addr)(ARG2, 3*sizeof(Addr), tid, NULL)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ break;
+ }
+ ML_(generic_PRE_sys_getpeername)( tid, ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_SHUTDOWN:
+ /* int shutdown(int s, int how); */
+ PRE_MEM_READ( "socketcall.shutdown(args)", ARG2, 2*sizeof(Addr) );
+ if (!ML_(valid_client_addr)(ARG2, 2*sizeof(Addr), tid, NULL)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ break;
+ }
+ break;
+
+ case VKI_SYS_SENDMSG: {
+ /* int sendmsg(int s, const struct msghdr *msg, int flags); */
+ PRE_MEM_READ( "socketcall.sendmsg(args)", ARG2, 3*sizeof(Addr) );
+ if (!ML_(valid_client_addr)(ARG2, 3*sizeof(Addr), tid, NULL)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ break;
+ }
+ ML_(generic_PRE_sys_sendmsg)( tid, ARG2_0, ARG2_1 );
+ break;
+ }
+
+ case VKI_SYS_RECVMSG: {
+ /* int recvmsg(int s, struct msghdr *msg, int flags); */
+ PRE_MEM_READ("socketcall.recvmsg(args)", ARG2, 3*sizeof(Addr) );
+ if (!ML_(valid_client_addr)(ARG2, 3*sizeof(Addr), tid, NULL)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ break;
+ }
+ ML_(generic_PRE_sys_recvmsg)( tid, ARG2_0, ARG2_1 );
+ break;
+ }
+
+ default:
+ VG_(message)(Vg_DebugMsg,"Warning: unhandled socketcall 0x%lx\n",ARG1);
+ SET_STATUS_Failure( VKI_EINVAL );
+ break;
+ }
+# undef ARG2_0
+# undef ARG2_1
+# undef ARG2_2
+# undef ARG2_3
+# undef ARG2_4
+# undef ARG2_5
+}
+
+POST(sys_socketcall)
+{
+# define ARG2_0 (((UWord*)ARG2)[0])
+# define ARG2_1 (((UWord*)ARG2)[1])
+# define ARG2_2 (((UWord*)ARG2)[2])
+# define ARG2_3 (((UWord*)ARG2)[3])
+# define ARG2_4 (((UWord*)ARG2)[4])
+# define ARG2_5 (((UWord*)ARG2)[5])
+
+ SysRes r;
+ vg_assert(SUCCESS);
+ switch (ARG1 /* request */) {
+
+ case VKI_SYS_SOCKETPAIR:
+ r = ML_(generic_POST_sys_socketpair)(
+ tid, VG_(mk_SysRes_Success)(RES),
+ ARG2_0, ARG2_1, ARG2_2, ARG2_3
+ );
+ SET_STATUS_from_SysRes(r);
+ break;
+
+ case VKI_SYS_SOCKET:
+ r = ML_(generic_POST_sys_socket)( tid, VG_(mk_SysRes_Success)(RES) );
+ SET_STATUS_from_SysRes(r);
+ break;
+
+ case VKI_SYS_BIND:
+ /* int bind(int sockfd, struct sockaddr *my_addr,
+ int addrlen); */
+ break;
+
+ case VKI_SYS_LISTEN:
+ /* int listen(int s, int backlog); */
+ break;
+
+ case VKI_SYS_ACCEPT:
+ /* int accept(int s, struct sockaddr *addr, int *addrlen); */
+ r = ML_(generic_POST_sys_accept)( tid, VG_(mk_SysRes_Success)(RES),
+ ARG2_0, ARG2_1, ARG2_2 );
+ SET_STATUS_from_SysRes(r);
+ break;
+
+ case VKI_SYS_SENDTO:
+ break;
+
+ case VKI_SYS_SEND:
+ break;
+
+ case VKI_SYS_RECVFROM:
+ ML_(generic_POST_sys_recvfrom)( tid, VG_(mk_SysRes_Success)(RES),
+ ARG2_0, ARG2_1, ARG2_2,
+ ARG2_3, ARG2_4, ARG2_5 );
+ break;
+
+ case VKI_SYS_RECV:
+ ML_(generic_POST_sys_recv)( tid, RES, ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_CONNECT:
+ break;
+
+ case VKI_SYS_SETSOCKOPT:
+ break;
+
+ case VKI_SYS_GETSOCKOPT:
+ ML_(linux_POST_sys_getsockopt)( tid, VG_(mk_SysRes_Success)(RES),
+ ARG2_0, ARG2_1,
+ ARG2_2, ARG2_3, ARG2_4 );
+ break;
+
+ case VKI_SYS_GETSOCKNAME:
+ ML_(generic_POST_sys_getsockname)( tid, VG_(mk_SysRes_Success)(RES),
+ ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_GETPEERNAME:
+ ML_(generic_POST_sys_getpeername)( tid, VG_(mk_SysRes_Success)(RES),
+ ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_SHUTDOWN:
+ break;
+
+ case VKI_SYS_SENDMSG:
+ break;
+
+ case VKI_SYS_RECVMSG:
+ ML_(generic_POST_sys_recvmsg)( tid, ARG2_0, ARG2_1 );
+ break;
+
+ default:
+ VG_(message)(Vg_DebugMsg,"FATAL: unhandled socketcall 0x%lx\n",ARG1);
+ VG_(core_panic)("... bye!\n");
+ break; /*NOTREACHED*/
+ }
+# undef ARG2_0
+# undef ARG2_1
+# undef ARG2_2
+# undef ARG2_3
+# undef ARG2_4
+# undef ARG2_5
+}
+
+PRE(sys_mmap)
+{
+ UWord a0, a1, a2, a3, a4, a5;
+ SysRes r;
+
+ UWord* args = (UWord*)ARG1;
+ PRE_REG_READ1(long, "sys_mmap", struct mmap_arg_struct *, args);
+ PRE_MEM_READ( "sys_mmap(args)", (Addr) args, 6*sizeof(UWord) );
+
+ a0 = args[0];
+ a1 = args[1];
+ a2 = args[2];
+ a3 = args[3];
+ a4 = args[4];
+ a5 = args[5];
+
+ PRINT("sys_mmap ( %#lx, %llu, %ld, %ld, %ld, %ld )",
+ a0, (ULong)a1, a2, a3, a4, a5 );
+
+ r = ML_(generic_PRE_sys_mmap)( tid, a0, a1, a2, a3, a4, (Off64T)a5 );
+ SET_STATUS_from_SysRes(r);
+}
+
+static Addr deref_Addr ( ThreadId tid, Addr a, Char* s )
+{
+ Addr* a_p = (Addr*)a;
+ PRE_MEM_READ( s, (Addr)a_p, sizeof(Addr) );
+ return *a_p;
+}
+
+PRE(sys_ipc)
+{
+ PRINT("sys_ipc ( %ld, %ld, %ld, %ld, %#lx, %ld )", ARG1,ARG2,ARG3,ARG4,ARG5,ARG6);
+ // XXX: this is simplistic -- some args are not used in all circumstances.
+ PRE_REG_READ6(int, "ipc",
+ vki_uint, call, int, first, int, second, int, third,
+ void *, ptr, long, fifth)
+
+ switch (ARG1 /* call */) {
+ case VKI_SEMOP:
+ ML_(generic_PRE_sys_semop)( tid, ARG2, ARG5, ARG3 );
+ *flags |= SfMayBlock;
+ break;
+ case VKI_SEMGET:
+ break;
+ case VKI_SEMCTL:
+ {
+ UWord arg = deref_Addr( tid, ARG5, "semctl(arg)" );
+ ML_(generic_PRE_sys_semctl)( tid, ARG2, ARG3, ARG4, arg );
+ break;
+ }
+ case VKI_SEMTIMEDOP:
+ ML_(generic_PRE_sys_semtimedop)( tid, ARG2, ARG5, ARG3, ARG6 );
+ *flags |= SfMayBlock;
+ break;
+ case VKI_MSGSND:
+ ML_(linux_PRE_sys_msgsnd)( tid, ARG2, ARG5, ARG3, ARG4 );
+ if ((ARG4 & VKI_IPC_NOWAIT) == 0)
+ *flags |= SfMayBlock;
+ break;
+ case VKI_MSGRCV:
+ {
+ Addr msgp;
+ Word msgtyp;
+
+ msgp = deref_Addr( tid,
+ (Addr) (&((struct vki_ipc_kludge *)ARG5)->msgp),
+ "msgrcv(msgp)" );
+ msgtyp = deref_Addr( tid,
+ (Addr) (&((struct vki_ipc_kludge *)ARG5)->msgtyp),
+ "msgrcv(msgp)" );
+
+ ML_(linux_PRE_sys_msgrcv)( tid, ARG2, msgp, ARG3, msgtyp, ARG4 );
+
+ if ((ARG4 & VKI_IPC_NOWAIT) == 0)
+ *flags |= SfMayBlock;
+ break;
+ }
+ case VKI_MSGGET:
+ break;
+ case VKI_MSGCTL:
+ ML_(linux_PRE_sys_msgctl)( tid, ARG2, ARG3, ARG5 );
+ break;
+ case VKI_SHMAT:
+ {
+ UWord w;
+ PRE_MEM_WRITE( "shmat(raddr)", ARG4, sizeof(Addr) );
+ w = ML_(generic_PRE_sys_shmat)( tid, ARG2, ARG5, ARG3 );
+ if (w == 0)
+ SET_STATUS_Failure( VKI_EINVAL );
+ else
+ ARG5 = w;
+ break;
+ }
+ case VKI_SHMDT:
+ if (!ML_(generic_PRE_sys_shmdt)(tid, ARG5))
+ SET_STATUS_Failure( VKI_EINVAL );
+ break;
+ case VKI_SHMGET:
+ break;
+ case VKI_SHMCTL: /* IPCOP_shmctl */
+ ML_(generic_PRE_sys_shmctl)( tid, ARG2, ARG3, ARG5 );
+ break;
+ default:
+ VG_(message)(Vg_DebugMsg, "FATAL: unhandled syscall(ipc) %ld", ARG1 );
+ VG_(core_panic)("... bye!\n");
+ break; /*NOTREACHED*/
+ }
+}
+
+POST(sys_ipc)
+{
+ vg_assert(SUCCESS);
+ switch (ARG1 /* call */) {
+ case VKI_SEMOP:
+ case VKI_SEMGET:
+ break;
+ case VKI_SEMCTL:
+ {
+ UWord arg = deref_Addr( tid, ARG5, "semctl(arg)" );
+ ML_(generic_PRE_sys_semctl)( tid, ARG2, ARG3, ARG4, arg );
+ break;
+ }
+ case VKI_SEMTIMEDOP:
+ case VKI_MSGSND:
+ break;
+ case VKI_MSGRCV:
+ {
+ Addr msgp;
+ Word msgtyp;
+
+ msgp = deref_Addr( tid,
+ (Addr) (&((struct vki_ipc_kludge *)ARG5)->msgp),
+ "msgrcv(msgp)" );
+ msgtyp = deref_Addr( tid,
+ (Addr) (&((struct vki_ipc_kludge *)ARG5)->msgtyp),
+ "msgrcv(msgp)" );
+
+ ML_(linux_POST_sys_msgrcv)( tid, RES, ARG2, msgp, ARG3, msgtyp, ARG4 );
+ break;
+ }
+ case VKI_MSGGET:
+ break;
+ case VKI_MSGCTL:
+ ML_(linux_POST_sys_msgctl)( tid, RES, ARG2, ARG3, ARG5 );
+ break;
+ case VKI_SHMAT:
+ {
+ Addr addr;
+
+ /* force readability. before the syscall it is
+ * indeed uninitialized, as can be seen in
+ * glibc/sysdeps/unix/sysv/linux/shmat.c */
+ POST_MEM_WRITE( ARG4, sizeof( Addr ) );
+
+ addr = deref_Addr ( tid, ARG4, "shmat(addr)" );
+ ML_(generic_POST_sys_shmat)( tid, addr, ARG2, ARG5, ARG3 );
+ break;
+ }
+ case VKI_SHMDT:
+ ML_(generic_POST_sys_shmdt)( tid, RES, ARG5 );
+ break;
+ case VKI_SHMGET:
+ break;
+ case VKI_SHMCTL:
+ ML_(generic_POST_sys_shmctl)( tid, RES, ARG2, ARG3, ARG5 );
+ break;
+ default:
+ VG_(message)(Vg_DebugMsg,
+ "FATAL: unhandled syscall(ipc) %ld",
+ ARG1 );
+ VG_(core_panic)("... bye!\n");
+ break; /*NOTREACHED*/
+ }
+}
+
+PRE(sys_clone)
+{
+ UInt cloneflags;
+
+ PRINT("sys_clone ( %lx, %#lx, %#lx, %#lx, %#lx )",ARG1,ARG2,ARG3,ARG4, ARG5);
+ PRE_REG_READ4(int, "clone",
+ void *, child_stack,
+ unsigned long, flags,
+ int *, parent_tidptr,
+ int *, child_tidptr);
+
+ if (ARG2 & VKI_CLONE_PARENT_SETTID) {
+ PRE_MEM_WRITE("clone(parent_tidptr)", ARG3, sizeof(Int));
+ if (!VG_(am_is_valid_for_client)(ARG3, sizeof(Int),
+ VKI_PROT_WRITE)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ return;
+ }
+ }
+ if (ARG2 & (VKI_CLONE_CHILD_SETTID | VKI_CLONE_CHILD_CLEARTID)) {
+ PRE_MEM_WRITE("clone(child_tidptr)", ARG4, sizeof(Int));
+ if (!VG_(am_is_valid_for_client)(ARG4, sizeof(Int),
+ VKI_PROT_WRITE)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ return;
+ }
+ }
+
+ cloneflags = ARG2;
+
+ if (!ML_(client_signal_OK)(ARG2 & VKI_CSIGNAL)) {
+ SET_STATUS_Failure( VKI_EINVAL );
+ return;
+ }
+
+ /* Only look at the flags we really care about */
+ switch (cloneflags & (VKI_CLONE_VM | VKI_CLONE_FS
+ | VKI_CLONE_FILES | VKI_CLONE_VFORK)) {
+ case VKI_CLONE_VM | VKI_CLONE_FS | VKI_CLONE_FILES:
+ /* thread creation */
+ SET_STATUS_from_SysRes(
+ do_clone(tid,
+ (Addr)ARG1, /* child SP */
+ ARG2, /* flags */
+ (Int *)ARG3, /* parent_tidptr */
+ (Int *)ARG4, /* child_tidptr */
+ (Addr)ARG5)); /* tlsaddr */
+ break;
+
+ case VKI_CLONE_VFORK | VKI_CLONE_VM: /* vfork */
+ /* FALLTHROUGH - assume vfork == fork */
+ cloneflags &= ~(VKI_CLONE_VFORK | VKI_CLONE_VM);
+
+ case 0: /* plain fork */
+ SET_STATUS_from_SysRes(
+ ML_(do_fork_clone)(tid,
+ cloneflags, /* flags */
+ (Int *)ARG3, /* parent_tidptr */
+ (Int *)ARG4)); /* child_tidptr */
+ break;
+
+ default:
+ /* should we just ENOSYS? */
+ VG_(message)(Vg_UserMsg, "Unsupported clone() flags: 0x%lx", ARG2);
+ VG_(message)(Vg_UserMsg, "");
+ VG_(message)(Vg_UserMsg, "The only supported clone() uses are:");
+ VG_(message)(Vg_UserMsg, " - via a threads library (LinuxThreads or NPTL)");
+ VG_(message)(Vg_UserMsg, " - via the implementation of fork or vfork");
+ VG_(unimplemented)
+ ("Valgrind does not support general clone().");
+ }
+
+ if (SUCCESS) {
+ if (ARG2 & VKI_CLONE_PARENT_SETTID)
+ POST_MEM_WRITE(ARG3, sizeof(Int));
+ if (ARG2 & (VKI_CLONE_CHILD_SETTID | VKI_CLONE_CHILD_CLEARTID))
+ POST_MEM_WRITE(ARG4, sizeof(Int));
+
+ /* Thread creation was successful; let the child have the chance
+ to run */
+ *flags |= SfYieldAfter;
+ }
+}
+
+PRE(sys_sigreturn)
+{
+ ThreadState* tst;
+ PRINT("sys_sigreturn ( )");
+
+ vg_assert(VG_(is_valid_tid)(tid));
+ vg_assert(tid >= 1 && tid < VG_N_THREADS);
+ vg_assert(VG_(is_running_thread)(tid));
+
+ tst = VG_(get_ThreadState)(tid);
+
+ /* This is only so that the EIP is (might be) useful to report if
+ something goes wrong in the sigreturn */
+ ML_(fixup_guest_state_to_restart_syscall)(&tst->arch);
+
+ /* Restore register state from frame and remove it */
+ VG_(sigframe_destroy)(tid, False);
+
+ /* Tell the driver not to update the guest state with the "result",
+ and set a bogus result to keep it happy. */
+ *flags |= SfNoWriteResult;
+ SET_STATUS_Success(0);
+
+ /* Check to see if any signals arose as a result of this. */
+ *flags |= SfPollAfter;
+}
+
+
+PRE(sys_rt_sigreturn)
+{
+ /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for
+ an explanation of what follows. */
+
+ ThreadState* tst;
+ PRINT("sys_rt_sigreturn ( )");
+
+ vg_assert(VG_(is_valid_tid)(tid));
+ vg_assert(tid >= 1 && tid < VG_N_THREADS);
+ vg_assert(VG_(is_running_thread)(tid));
+
+ tst = VG_(get_ThreadState)(tid);
+
+ /* This is only so that the EIP is (might be) useful to report if
+ something goes wrong in the sigreturn */
+ ML_(fixup_guest_state_to_restart_syscall)(&tst->arch);
+
+ /* Restore register state from frame and remove it */
+ VG_(sigframe_destroy)(tid, True);
+
+ /* Tell the driver not to update the guest state with the "result",
+ and set a bogus result to keep it happy. */
+ *flags |= SfNoWriteResult;
+ SET_STATUS_Success(0);
+
+ /* Check to see if any signals arose as a result of this. */
+ *flags |= SfPollAfter;
+}
+
+/* we cant use the LINX_ version for 64 bit */
+PRE(sys_fadvise64)
+{
+ PRINT("sys_fadvise64 ( %ld, %ld, %ld, %ld )", ARG1,ARG2,ARG3,ARG4);
+ PRE_REG_READ4(long, "fadvise64",
+ int, fd, vki_loff_t, offset, vki_loff_t, len, int, advice);
+}
+
+#undef PRE
+#undef POST
+
+/* ---------------------------------------------------------------------
+ The s390x/Linux syscall table
+ ------------------------------------------------------------------ */
+
+/* Add an s390x-linux specific wrapper to a syscall table. */
+#define PLAX_(sysno, name) WRAPPER_ENTRY_X_(s390x_linux, sysno, name)
+#define PLAXY(sysno, name) WRAPPER_ENTRY_XY(s390x_linux, sysno, name)
+
+// This table maps from __NR_xxx syscall numbers from
+// linux/arch/s390/kernel/syscalls.S to the appropriate PRE/POST sys_foo()
+// wrappers on s390x. There are several unused numbers, which are only
+// defined on s390 (31bit mode) but no longer available on s390x (64 bit).
+// For those syscalls not handled by Valgrind, the annotation indicate its
+// arch/OS combination, eg. */* (generic), */Linux (Linux only), ?/?
+// (unknown).
+
+static SyscallTableEntry syscall_table[] = {
+ GENX_(0, sys_ni_syscall), /* unimplemented (by the kernel) */ // 0
+ GENX_(__NR_exit, sys_exit), // 1
+ GENX_(__NR_fork, sys_fork), // 2
+ GENXY(__NR_read, sys_read), // 3
+ GENX_(__NR_write, sys_write), // 4
+
+ GENXY(__NR_open, sys_open), // 5
+ GENXY(__NR_close, sys_close), // 6
+// ?????(__NR_restart_syscall, ), // 7
+ GENXY(__NR_creat, sys_creat), // 8
+ GENX_(__NR_link, sys_link), // 9
+
+ GENX_(__NR_unlink, sys_unlink), // 10
+ GENX_(__NR_execve, sys_execve), // 11
+ GENX_(__NR_chdir, sys_chdir), // 12
+ GENX_(13, sys_ni_syscall), /* unimplemented (by the kernel) */ // 13
+ GENX_(__NR_mknod, sys_mknod), // 14
+
+ GENX_(__NR_chmod, sys_chmod), // 15
+ GENX_(16, sys_ni_syscall), /* unimplemented (by the kernel) */ // 16
+ GENX_(17, sys_ni_syscall), /* unimplemented (by the kernel) */ // 17
+ GENX_(18, sys_ni_syscall), /* unimplemented (by the kernel) */ // 18
+ LINX_(__NR_lseek, sys_lseek), // 19
+
+ GENX_(__NR_getpid, sys_getpid), // 20
+ LINX_(__NR_mount, sys_mount), // 21
+ LINX_(__NR_umount, sys_oldumount), // 22
+ GENX_(23, sys_ni_syscall), /* unimplemented (by the kernel) */ // 23
+ GENX_(24, sys_ni_syscall), /* unimplemented (by the kernel) */ // 24
+
+ GENX_(25, sys_ni_syscall), /* unimplemented (by the kernel) */ // 25
+ PLAXY(__NR_ptrace, sys_ptrace), // 26
+ GENX_(__NR_alarm, sys_alarm), // 27
+ GENX_(28, sys_ni_syscall), /* unimplemented (by the kernel) */ // 28
+ GENX_(__NR_pause, sys_pause), // 29
+
+ LINX_(__NR_utime, sys_utime), // 30
+ GENX_(31, sys_ni_syscall), /* unimplemented (by the kernel) */ // 31
+ GENX_(32, sys_ni_syscall), /* unimplemented (by the kernel) */ // 32
+ GENX_(__NR_access, sys_access), // 33
+ GENX_(__NR_nice, sys_nice), // 34
+
+ GENX_(35, sys_ni_syscall), /* unimplemented (by the kernel) */ // 35
+ GENX_(__NR_sync, sys_sync), // 36
+ GENX_(__NR_kill, sys_kill), // 37
+ GENX_(__NR_rename, sys_rename), // 38
+ GENX_(__NR_mkdir, sys_mkdir), // 39
+
+ GENX_(__NR_rmdir, sys_rmdir), // 40
+ GENXY(__NR_dup, sys_dup), // 41
+ LINXY(__NR_pipe, sys_pipe), // 42
+ GENXY(__NR_times, sys_times), // 43
+ GENX_(44, sys_ni_syscall), /* unimplemented (by the kernel) */ // 44
+
+ GENX_(__NR_brk, sys_brk), // 45
+ GENX_(46, sys_ni_syscall), /* unimplemented (by the kernel) */ // 46
+ GENX_(47, sys_ni_syscall), /* unimplemented (by the kernel) */ // 47
+// ?????(__NR_signal, ), // 48
+ GENX_(49, sys_ni_syscall), /* unimplemented (by the kernel) */ // 49
+
+ GENX_(50, sys_ni_syscall), /* unimplemented (by the kernel) */ // 50
+ GENX_(__NR_acct, sys_acct), // 51
+ LINX_(__NR_umount2, sys_umount), // 52
+ GENX_(53, sys_ni_syscall), /* unimplemented (by the kernel) */ // 53
+ LINXY(__NR_ioctl, sys_ioctl), // 54
+
+ LINXY(__NR_fcntl, sys_fcntl), // 55
+ GENX_(56, sys_ni_syscall), /* unimplemented (by the kernel) */ // 56
+ GENX_(__NR_setpgid, sys_setpgid), // 57
+ GENX_(58, sys_ni_syscall), /* unimplemented (by the kernel) */ // 58
+ GENX_(59, sys_ni_syscall), /* unimplemented (by the kernel) */ // 59
+
+ GENX_(__NR_umask, sys_umask), // 60
+ GENX_(__NR_chroot, sys_chroot), // 61
+// ?????(__NR_ustat, sys_ustat), /* deprecated in favor of statfs */ // 62
+ GENXY(__NR_dup2, sys_dup2), // 63
+ GENX_(__NR_getppid, sys_getppid), // 64
+
+ GENX_(__NR_getpgrp, sys_getpgrp), // 65
+ GENX_(__NR_setsid, sys_setsid), // 66
+// ?????(__NR_sigaction, ), /* userspace uses rt_sigaction */ // 67
+ GENX_(68, sys_ni_syscall), /* unimplemented (by the kernel) */ // 68
+ GENX_(69, sys_ni_syscall), /* unimplemented (by the kernel) */ // 69
+
+ GENX_(70, sys_ni_syscall), /* unimplemented (by the kernel) */ // 70
+ GENX_(71, sys_ni_syscall), /* unimplemented (by the kernel) */ // 71
+// ?????(__NR_sigsuspend, ), // 72
+// ?????(__NR_sigpending, ), // 73
+// ?????(__NR_sethostname, ), // 74
+
+ GENX_(__NR_setrlimit, sys_setrlimit), // 75
+ GENXY(76, sys_getrlimit), /* see also 191 */ // 76
+ GENXY(__NR_getrusage, sys_getrusage), // 77
+ GENXY(__NR_gettimeofday, sys_gettimeofday), // 78
+ GENX_(__NR_settimeofday, sys_settimeofday), // 79
+
+ GENX_(80, sys_ni_syscall), /* unimplemented (by the kernel) */ // 80
+ GENX_(81, sys_ni_syscall), /* unimplemented (by the kernel) */ // 81
+ GENX_(82, sys_ni_syscall), /* unimplemented (by the kernel) */ // 82
+ GENX_(__NR_symlink, sys_symlink), // 83
+ GENX_(84, sys_ni_syscall), /* unimplemented (by the kernel) */ // 84
+
+ GENX_(__NR_readlink, sys_readlink), // 85
+// ?????(__NR_uselib, ), // 86
+// ?????(__NR_swapon, ), // 87
+// ?????(__NR_reboot, ), // 88
+ GENX_(89, sys_ni_syscall), /* unimplemented (by the kernel) */ // 89
+
+ PLAX_(__NR_mmap, sys_mmap ), // 90
+ GENXY(__NR_munmap, sys_munmap), // 91
+ GENX_(__NR_truncate, sys_truncate), // 92
+ GENX_(__NR_ftruncate, sys_ftruncate), // 93
+ GENX_(__NR_fchmod, sys_fchmod), // 94
+
+ GENX_(95, sys_ni_syscall), /* unimplemented (by the kernel) */ // 95
+ GENX_(__NR_getpriority, sys_getpriority), // 96
+ GENX_(__NR_setpriority, sys_setpriority), // 97
+ GENX_(98, sys_ni_syscall), /* unimplemented (by the kernel) */ // 98
+ GENXY(__NR_statfs, sys_statfs), // 99
+
+ GENXY(__NR_fstatfs, sys_fstatfs), // 100
+ GENX_(101, sys_ni_syscall), /* unimplemented (by the kernel) */ // 101
+ PLAXY(__NR_socketcall, sys_socketcall), // 102
+ LINXY(__NR_syslog, sys_syslog), // 103
+ GENXY(__NR_setitimer, sys_setitimer), // 104
+
+ GENXY(__NR_getitimer, sys_getitimer), // 105
+ GENXY(__NR_stat, sys_newstat), // 106
+ GENXY(__NR_lstat, sys_newlstat), // 107
+ GENXY(__NR_fstat, sys_newfstat), // 108
+ GENX_(109, sys_ni_syscall), /* unimplemented (by the kernel) */ // 109
+
+ LINXY(__NR_lookup_dcookie, sys_lookup_dcookie), // 110
+ LINX_(__NR_vhangup, sys_vhangup), // 111
+ GENX_(112, sys_ni_syscall), /* unimplemented (by the kernel) */ // 112
+ GENX_(113, sys_ni_syscall), /* unimplemented (by the kernel) */ // 113
+ GENXY(__NR_wait4, sys_wait4), // 114
+
+// ?????(__NR_swapoff, ), // 115
+ LINXY(__NR_sysinfo, sys_sysinfo), // 116
+ PLAXY(__NR_ipc, sys_ipc), // 117
+ GENX_(__NR_fsync, sys_fsync), // 118
+ PLAX_(__NR_sigreturn, sys_sigreturn), // 119
+
+ PLAX_(__NR_clone, sys_clone), // 120
+// ?????(__NR_setdomainname, ), // 121
+ GENXY(__NR_uname, sys_newuname), // 122
+ GENX_(123, sys_ni_syscall), /* unimplemented (by the kernel) */ // 123
+// ?????(__NR_adjtimex, ), // 124
+
+ GENXY(__NR_mprotect, sys_mprotect), // 125
+// LINXY(__NR_sigprocmask, sys_sigprocmask), // 126
+ GENX_(127, sys_ni_syscall), /* unimplemented (by the kernel) */ // 127
+ LINX_(__NR_init_module, sys_init_module), // 128
+ LINX_(__NR_delete_module, sys_delete_module), // 129
+
+ GENX_(130, sys_ni_syscall), /* unimplemented (by the kernel) */ // 130
+ LINX_(__NR_quotactl, sys_quotactl), // 131
+ GENX_(__NR_getpgid, sys_getpgid), // 132
+ GENX_(__NR_fchdir, sys_fchdir), // 133
+// ?????(__NR_bdflush, ), // 134
+
+// ?????(__NR_sysfs, ), // 135
+ LINX_(__NR_personality, sys_personality), // 136
+ GENX_(137, sys_ni_syscall), /* unimplemented (by the kernel) */ // 137
+ GENX_(138, sys_ni_syscall), /* unimplemented (by the kernel) */ // 138
+ GENX_(139, sys_ni_syscall), /* unimplemented (by the kernel) */ // 139
+
+// LINXY(__NR__llseek, sys_llseek), /* 64 bit --> lseek */ // 140
+ GENXY(__NR_getdents, sys_getdents), // 141
+ GENX_(__NR_select, sys_select), // 142
+ GENX_(__NR_flock, sys_flock), // 143
+ GENX_(__NR_msync, sys_msync), // 144
+
+ GENXY(__NR_readv, sys_readv), // 145
+ GENX_(__NR_writev, sys_writev), // 146
+ GENX_(__NR_getsid, sys_getsid), // 147
+ GENX_(__NR_fdatasync, sys_fdatasync), // 148
+ LINXY(__NR__sysctl, sys_sysctl), // 149
+
+ GENX_(__NR_mlock, sys_mlock), // 150
+ GENX_(__NR_munlock, sys_munlock), // 151
+ GENX_(__NR_mlockall, sys_mlockall), // 152
+ LINX_(__NR_munlockall, sys_munlockall), // 153
+ LINXY(__NR_sched_setparam, sys_sched_setparam), // 154
+
+ LINXY(__NR_sched_getparam, sys_sched_getparam), // 155
+ LINX_(__NR_sched_setscheduler, sys_sched_setscheduler), // 156
+ LINX_(__NR_sched_getscheduler, sys_sched_getscheduler), // 157
+ LINX_(__NR_sched_yield, sys_sched_yield), // 158
+ LINX_(__NR_sched_get_priority_max, sys_sched_get_priority_max), // 159
+
+ LINX_(__NR_sched_get_priority_min, sys_sched_get_priority_min), // 160
+// ?????(__NR_sched_rr_get_interval, ), // 161
+ GENXY(__NR_nanosleep, sys_nanosleep), // 162
+ GENX_(__NR_mremap, sys_mremap), // 163
+ GENX_(164, sys_ni_syscall), /* unimplemented (by the kernel) */ // 164
+
+ GENX_(165, sys_ni_syscall), /* unimplemented (by the kernel) */ // 165
+ GENX_(166, sys_ni_syscall), /* unimplemented (by the kernel) */ // 166
+ GENX_(167, sys_ni_syscall), /* unimplemented (by the kernel) */ // 167
+ GENXY(__NR_poll, sys_poll), // 168
+// ?????(__NR_nfsservctl, ), // 169
+
+ GENX_(170, sys_ni_syscall), /* unimplemented (by the kernel) */ // 170
+ GENX_(171, sys_ni_syscall), /* unimplemented (by the kernel) */ // 171
+ LINXY(__NR_prctl, sys_prctl), // 172
+ PLAX_(__NR_rt_sigreturn, sys_rt_sigreturn), // 173
+ LINXY(__NR_rt_sigaction, sys_rt_sigaction), // 174
+
+ LINXY(__NR_rt_sigprocmask, sys_rt_sigprocmask), // 175
+ LINXY(__NR_rt_sigpending, sys_rt_sigpending), // 176
+ LINXY(__NR_rt_sigtimedwait, sys_rt_sigtimedwait), // 177
+ LINXY(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo), // 178
+ LINX_(__NR_rt_sigsuspend, sys_rt_sigsuspend), // 179
+
+ GENXY(__NR_pread64, sys_pread64), // 180
+ GENX_(__NR_pwrite64, sys_pwrite64), // 181
+ GENX_(182, sys_ni_syscall), /* unimplemented (by the kernel) */ // 182
+ GENXY(__NR_getcwd, sys_getcwd), // 183
+ LINXY(__NR_capget, sys_capget), // 184
+
+ LINX_(__NR_capset, sys_capset), // 185
+ GENXY(__NR_sigaltstack, sys_sigaltstack), // 186
+ LINXY(__NR_sendfile, sys_sendfile), // 187
+ GENX_(188, sys_ni_syscall), /* unimplemented (by the kernel) */ // 188
+ GENX_(189, sys_ni_syscall), /* unimplemented (by the kernel) */ // 189
+
+ GENX_(__NR_vfork, sys_fork), // 190
+ GENXY(__NR_getrlimit, sys_getrlimit), // 191
+ GENX_(192, sys_ni_syscall), /* not exported on 64bit*/ // 192
+ GENX_(193, sys_ni_syscall), /* unimplemented (by the kernel) */ // 193
+ GENX_(194, sys_ni_syscall), /* unimplemented (by the kernel) */ // 194
+
+ GENX_(195, sys_ni_syscall), /* unimplemented (by the kernel) */ // 195
+ GENX_(196, sys_ni_syscall), /* unimplemented (by the kernel) */ // 196
+ GENX_(197, sys_ni_syscall), /* unimplemented (by the kernel) */ // 197
+ GENX_(__NR_lchown, sys_lchown), // 198
+ GENX_(__NR_getuid, sys_getuid), // 199
+
+ GENX_(__NR_getgid, sys_getgid), // 200
+ GENX_(__NR_geteuid, sys_geteuid), // 201
+ GENX_(__NR_getegid, sys_getegid), // 202
+ GENX_(__NR_setreuid, sys_setreuid), // 203
+ GENX_(__NR_setregid, sys_setregid), // 204
+
+ GENXY(__NR_getgroups, sys_getgroups), // 205
+ GENX_(__NR_setgroups, sys_setgroups), // 206
+ GENX_(__NR_fchown, sys_fchown), // 207
+ LINX_(__NR_setresuid, sys_setresuid), // 208
+ LINXY(__NR_getresuid, sys_getresuid), // 209
+
+ LINX_(__NR_setresgid, sys_setresgid), // 210
+ LINXY(__NR_getresgid, sys_getresgid), // 211
+ GENX_(__NR_chown, sys_chown), // 212
+ GENX_(__NR_setuid, sys_setuid), // 213
+ GENX_(__NR_setgid, sys_setgid), // 214
+
+ LINX_(__NR_setfsuid, sys_setfsuid), // 215
+ LINX_(__NR_setfsgid, sys_setfsgid), // 216
+// ?????(__NR_pivot_root, ),
+ GENX_(__NR_mincore, sys_mincore), // 218
+ GENX_(__NR_madvise, sys_madvise), // 219
+
+ GENXY(__NR_getdents64, sys_getdents64), // 220
+ GENX_(221, sys_ni_syscall), /* unimplemented (by the kernel) */ // 221
+ LINX_(__NR_readahead, sys_readahead), // 222
+ GENX_(223, sys_ni_syscall), /* unimplemented (by the kernel) */ // 223
+ LINX_(__NR_setxattr, sys_setxattr), // 224
+
+ LINX_(__NR_lsetxattr, sys_lsetxattr), // 225
+ LINX_(__NR_fsetxattr, sys_fsetxattr), // 226
+ LINXY(__NR_getxattr, sys_getxattr), // 227
+ LINXY(__NR_lgetxattr, sys_lgetxattr), // 228
+ LINXY(__NR_fgetxattr, sys_fgetxattr), // 229
+
+ LINXY(__NR_listxattr, sys_listxattr), // 230
+ LINXY(__NR_llistxattr, sys_llistxattr), // 231
+ LINXY(__NR_flistxattr, sys_flistxattr), // 232
+ LINX_(__NR_removexattr, sys_removexattr), // 233
+ LINX_(__NR_lremovexattr, sys_lremovexattr), // 234
+
+ LINX_(__NR_fremovexattr, sys_fremovexattr), // 235
+ LINX_(__NR_gettid, sys_gettid), // 236
+ LINXY(__NR_tkill, sys_tgkill), // 237
+ LINXY(__NR_futex, sys_futex), // 238
+ LINX_(__NR_sched_setaffinity, sys_sched_setaffinity), // 239
+
+ LINXY(__NR_sched_getaffinity, sys_sched_getaffinity), // 240
+ LINXY(__NR_tgkill, sys_tgkill), // 241
+ GENX_(242, sys_ni_syscall), /* unimplemented (by the kernel) */ // 242
+ LINXY(__NR_io_setup, sys_io_setup), // 243
+ LINX_(__NR_io_destroy, sys_io_destroy), // 244
+
+ LINXY(__NR_io_getevents, sys_io_getevents), // 245
+ LINX_(__NR_io_submit, sys_io_submit), // 246
+ LINXY(__NR_io_cancel, sys_io_cancel), // 247
+ LINX_(__NR_exit_group, sys_exit_group), // 248
+ LINXY(__NR_epoll_create, sys_epoll_create), // 249
+
+ LINX_(__NR_epoll_ctl, sys_epoll_ctl), // 250
+ LINXY(__NR_epoll_wait, sys_epoll_wait), // 251
+ LINX_(__NR_set_tid_address, sys_set_tid_address), // 252
+ PLAX_(__NR_fadvise64, sys_fadvise64), // 253
+ LINXY(__NR_timer_create, sys_timer_create), // 254
+
+ LINXY(__NR_timer_settime, sys_timer_settime), // 255
+ LINXY(__NR_timer_gettime, sys_timer_gettime), // 256
+ LINX_(__NR_timer_getoverrun, sys_timer_getoverrun), // 257
+ LINX_(__NR_timer_delete, sys_timer_delete), // 258
+ LINX_(__NR_clock_settime, sys_clock_settime), // 259
+
+ LINXY(__NR_clock_gettime, sys_clock_gettime), // 260
+ LINXY(__NR_clock_getres, sys_clock_getres), // 261
+ LINXY(__NR_clock_nanosleep, sys_clock_nanosleep), // 262
+ GENX_(263, sys_ni_syscall), /* unimplemented (by the kernel) */ // 263
+ GENX_(264, sys_ni_syscall), /* unimplemented (by the kernel) */ // 264
+
+ GENXY(__NR_statfs64, sys_statfs64), // 265
+ GENXY(__NR_fstatfs64, sys_fstatfs64), // 266
+// ?????(__NR_remap_file_pages, ),
+ GENX_(268, sys_ni_syscall), /* unimplemented (by the kernel) */ // 268
+ GENX_(269, sys_ni_syscall), /* unimplemented (by the kernel) */ // 269
+
+ GENX_(270, sys_ni_syscall), /* unimplemented (by the kernel) */ // 270
+ LINXY(__NR_mq_open, sys_mq_open), // 271
+ LINX_(__NR_mq_unlink, sys_mq_unlink), // 272
+ LINX_(__NR_mq_timedsend, sys_mq_timedsend), // 273
+ LINXY(__NR_mq_timedreceive, sys_mq_timedreceive), // 274
+
+ LINX_(__NR_mq_notify, sys_mq_notify), // 275
+ LINXY(__NR_mq_getsetattr, sys_mq_getsetattr), // 276
+// ?????(__NR_kexec_load, ),
+ LINX_(__NR_add_key, sys_add_key), // 278
+ LINX_(__NR_request_key, sys_request_key), // 279
+
+ LINXY(__NR_keyctl, sys_keyctl), // 280
+ LINXY(__NR_waitid, sys_waitid), // 281
+ LINX_(__NR_ioprio_set, sys_ioprio_set), // 282
+ LINX_(__NR_ioprio_get, sys_ioprio_get), // 283
+ LINX_(__NR_inotify_init, sys_inotify_init), // 284
+
+ LINX_(__NR_inotify_add_watch, sys_inotify_add_watch), // 285
+ LINX_(__NR_inotify_rm_watch, sys_inotify_rm_watch), // 286
+ GENX_(287, sys_ni_syscall), /* unimplemented (by the kernel) */ // 287
+ LINXY(__NR_openat, sys_openat), // 288
+ LINX_(__NR_mkdirat, sys_mkdirat), // 289
+
+ LINX_(__NR_mknodat, sys_mknodat), // 290
+ LINX_(__NR_fchownat, sys_fchownat), // 291
+ LINX_(__NR_futimesat, sys_futimesat), // 292
+ LINXY(__NR_newfstatat, sys_newfstatat), // 293
+ LINX_(__NR_unlinkat, sys_unlinkat), // 294
+
+ LINX_(__NR_renameat, sys_renameat), // 295
+ LINX_(__NR_linkat, sys_linkat), // 296
+ LINX_(__NR_symlinkat, sys_symlinkat), // 297
+ LINX_(__NR_readlinkat, sys_readlinkat), // 298
+ LINX_(__NR_fchmodat, sys_fchmodat), // 299
+
+ LINX_(__NR_faccessat, sys_faccessat), // 300
+ LINX_(__NR_pselect6, sys_pselect6), // 301
+ LINXY(__NR_ppoll, sys_ppoll), // 302
+// ?????(__NR_unshare, ),
+ LINX_(__NR_set_robust_list, sys_set_robust_list), // 304
+
+ LINXY(__NR_get_robust_list, sys_get_robust_list), // 305
+// ?????(__NR_splice, ),
+ LINX_(__NR_sync_file_range, sys_sync_file_range), // 307
+// ?????(__NR_tee, ),
+// ?????(__NR_vmsplice, ),
+
+ GENX_(310, sys_ni_syscall), /* unimplemented (by the kernel) */ // 310
+// ?????(__NR_getcpu, ),
+ LINXY(__NR_epoll_pwait, sys_epoll_pwait), // 312
+ GENX_(__NR_utimes, sys_utimes), // 313
+ LINX_(__NR_fallocate, sys_fallocate), // 314
+
+ LINX_(__NR_utimensat, sys_utimensat), // 315
+ LINXY(__NR_signalfd, sys_signalfd), // 316
+ GENX_(317, sys_ni_syscall), /* unimplemented (by the kernel) */ // 317
+ LINX_(__NR_eventfd, sys_eventfd), // 318
+ LINXY(__NR_timerfd_create, sys_timerfd_create), // 319
+
+ LINXY(__NR_timerfd_settime, sys_timerfd_settime), // 320
+ LINXY(__NR_timerfd_gettime, sys_timerfd_gettime), // 321
+ LINXY(__NR_signalfd4, sys_signalfd4), // 322
+ LINX_(__NR_eventfd2, sys_eventfd2), // 323
+ LINXY(__NR_inotify_init1, sys_inotify_init1), // 324
+
+ LINXY(__NR_pipe2, sys_pipe2), // 325
+ // (__NR_dup3, ),
+ LINXY(__NR_epoll_create1, sys_epoll_create1), // 327
+ LINXY(__NR_preadv, sys_preadv), // 328
+ LINX_(__NR_pwritev, sys_pwritev), // 329
+
+// ?????(__NR_rt_tgsigqueueinfo, ),
+ LINXY(__NR_perf_event_open, sys_perf_counter_open), // 331
+};
+
+SyscallTableEntry* ML_(get_linux_syscall_entry) ( UInt sysno )
+{
+ const UInt syscall_table_size
+ = sizeof(syscall_table) / sizeof(syscall_table[0]);
+
+ /* Is it in the contiguous initial section of the table? */
+ if (sysno < syscall_table_size) {
+ SyscallTableEntry* sys = &syscall_table[sysno];
+ if (sys->before == NULL)
+ return NULL; /* no entry */
+ else
+ return sys;
+ }
+
+ /* Can't find a wrapper */
+ return NULL;
+}
+
+#endif
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
--- valgrind/include/vki/vki-posixtypes-s390x-linux.h
+++ valgrind/include/vki/vki-posixtypes-s390x-linux.h
@@ -0,0 +1,77 @@
+
+/*--------------------------------------------------------------------*/
+/*--- s390x/Linux-specific kernel interface: posix types. ---*/
+/*--- vki-posixtypes-s390x-linux.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm and Volker Sameske */
+
+#ifndef __VKI_POSIXTYPES_S390X_LINUX_H
+#define __VKI_POSIXTYPES_S390X_LINUX_H
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/posix_types.h
+//----------------------------------------------------------------------
+
+typedef long __vki_kernel_off_t;
+typedef int __vki_kernel_pid_t;
+typedef unsigned long __vki_kernel_size_t;
+typedef long __vki_kernel_time_t;
+typedef long __vki_kernel_suseconds_t;
+typedef long __vki_kernel_clock_t;
+typedef int __vki_kernel_timer_t;
+typedef int __vki_kernel_clockid_t;
+typedef int __vki_kernel_daddr_t;
+typedef char * __vki_kernel_caddr_t;
+typedef unsigned short __vki_kernel_uid16_t;
+typedef unsigned short __vki_kernel_gid16_t;
+typedef long long __vki_kernel_loff_t;
+
+typedef unsigned int __vki_kernel_ino_t;
+typedef unsigned int __vki_kernel_mode_t;
+typedef unsigned int __vki_kernel_nlink_t;
+typedef int __vki_kernel_ipc_pid_t;
+typedef unsigned int __vki_kernel_uid_t;
+typedef unsigned int __vki_kernel_gid_t;
+typedef long __vki_kernel_ssize_t;
+typedef long __vki_kernel_ptrdiff_t;
+typedef unsigned long __vki_kernel_sigset_t; /* at least 32 bits */
+typedef __vki_kernel_uid_t __vki_kernel_old_uid_t;
+typedef __vki_kernel_gid_t __vki_kernel_old_gid_t;
+typedef __vki_kernel_uid_t __vki_kernel_uid32_t;
+typedef __vki_kernel_gid_t __vki_kernel_gid32_t;
+typedef unsigned short __vki_kernel_old_dev_t;
+
+typedef struct {
+ int val[2];
+} __vki_kernel_fsid_t;
+
+#endif // __VKI_POSIXTYPES_S390X_LINUX_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
--- valgrind/include/vki/vki-s390x-linux.h
+++ valgrind/include/vki/vki-s390x-linux.h
@@ -0,0 +1,941 @@
+
+/*--------------------------------------------------------------------*/
+/*--- s390x/Linux-specific kernel interface. vki-s390x-linux.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm and Volker Sameske */
+
+#ifndef __VKI_S390X_LINUX_H
+#define __VKI_S390X_LINUX_H
+
+#define __force
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/types.h
+//----------------------------------------------------------------------
+
+typedef __signed__ char __vki_s8;
+typedef unsigned char __vki_u8;
+
+typedef __signed__ short __vki_s16;
+typedef unsigned short __vki_u16;
+
+typedef __signed__ int __vki_s32;
+typedef unsigned int __vki_u32;
+
+typedef __signed__ long __vki_s64;
+typedef unsigned long __vki_u64;
+
+typedef unsigned short vki_u16;
+
+typedef unsigned int vki_u32;
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/page.h
+//----------------------------------------------------------------------
+
+/* PAGE_SHIFT determines the page size */
+#define VKI_PAGE_SHIFT 12
+#define VKI_PAGE_SIZE (1UL << VKI_PAGE_SHIFT)
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/siginfo.h
+//----------------------------------------------------------------------
+
+/* We need that to ensure that sizeof(siginfo) == 128. */
+#ifdef __s390x__
+#define __VKI_ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
+#endif
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/sigcontext.h
+//----------------------------------------------------------------------
+
+#define __VKI_NUM_GPRS 16
+#define __VKI_NUM_FPRS 16
+#define __VKI_NUM_ACRS 16
+
+#ifndef VGA_s390x
+
+/* Has to be at least _NSIG_WORDS from asm/signal.h */
+#define _VKI_SIGCONTEXT_NSIG 64
+#define _VKI_SIGCONTEXT_NSIG_BPW 32
+/* Size of stack frame allocated when calling signal handler. */
+#define __VKI_SIGNAL_FRAMESIZE 96
+
+#else /* VGA_s390x */
+
+/* Has to be at least _NSIG_WORDS from asm/signal.h */
+#define _VKI_SIGCONTEXT_NSIG 64
+#define _VKI_SIGCONTEXT_NSIG_BPW 64
+/* Size of stack frame allocated when calling signal handler. */
+#define __VKI_SIGNAL_FRAMESIZE 160
+
+#endif /* VGA_s390x */
+
+
+#define _VKI_SIGCONTEXT_NSIG_WORDS (_VKI_SIGCONTEXT_NSIG / _VKI_SIGCONTEXT_NSIG_BPW)
+#define _VKI_SIGMASK_COPY_SIZE (sizeof(unsigned long)*_VKI_SIGCONTEXT_NSIG_WORDS)
+
+typedef struct
+{
+ unsigned long mask;
+ unsigned long addr;
+} __attribute__ ((aligned(8))) _vki_psw_t;
+
+typedef struct
+{
+ _vki_psw_t psw;
+ unsigned long gprs[__VKI_NUM_GPRS];
+ unsigned int acrs[__VKI_NUM_ACRS];
+} _vki_s390_regs_common;
+
+typedef struct
+{
+ unsigned int fpc;
+ double fprs[__VKI_NUM_FPRS];
+} _vki_s390_fp_regs;
+
+typedef struct
+{
+ _vki_s390_regs_common regs;
+ _vki_s390_fp_regs fpregs;
+} _vki_sigregs;
+
+
+struct vki_sigcontext
+{
+ unsigned long oldmask[_VKI_SIGCONTEXT_NSIG_WORDS];
+ _vki_sigregs __user *sregs;
+};
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/signal.h
+//----------------------------------------------------------------------
+
+#define _VKI_NSIG _VKI_SIGCONTEXT_NSIG
+#define _VKI_NSIG_BPW _VKI_SIGCONTEXT_NSIG_BPW
+#define _VKI_NSIG_WORDS _VKI_SIGCONTEXT_NSIG_WORDS
+
+typedef unsigned long vki_old_sigset_t;
+
+typedef struct {
+ unsigned long sig[_VKI_NSIG_WORDS];
+} vki_sigset_t;
+
+#define VKI_SIGHUP 1
+#define VKI_SIGINT 2
+#define VKI_SIGQUIT 3
+#define VKI_SIGILL 4
+#define VKI_SIGTRAP 5
+#define VKI_SIGABRT 6
+#define VKI_SIGIOT 6
+#define VKI_SIGBUS 7
+#define VKI_SIGFPE 8
+#define VKI_SIGKILL 9
+#define VKI_SIGUSR1 10
+#define VKI_SIGSEGV 11
+#define VKI_SIGUSR2 12
+#define VKI_SIGPIPE 13
+#define VKI_SIGALRM 14
+#define VKI_SIGTERM 15
+#define VKI_SIGSTKFLT 16
+#define VKI_SIGCHLD 17
+#define VKI_SIGCONT 18
+#define VKI_SIGSTOP 19
+#define VKI_SIGTSTP 20
+#define VKI_SIGTTIN 21
+#define VKI_SIGTTOU 22
+#define VKI_SIGURG 23
+#define VKI_SIGXCPU 24
+#define VKI_SIGXFSZ 25
+#define VKI_SIGVTALRM 26
+#define VKI_SIGPROF 27
+#define VKI_SIGWINCH 28
+#define VKI_SIGIO 29
+#define VKI_SIGPOLL VKI_SIGIO
+/*
+#define VKI_SIGLOST 29
+*/
+#define VKI_SIGPWR 30
+#define VKI_SIGSYS 31
+#define VKI_SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define VKI_SIGRTMIN 32
+#define VKI_SIGRTMAX _VKI_NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define VKI_SA_NOCLDSTOP 0x00000001
+#define VKI_SA_NOCLDWAIT 0x00000002
+#define VKI_SA_SIGINFO 0x00000004
+#define VKI_SA_ONSTACK 0x08000000
+#define VKI_SA_RESTART 0x10000000
+#define VKI_SA_NODEFER 0x40000000
+#define VKI_SA_RESETHAND 0x80000000
+
+#define VKI_SA_NOMASK VKI_SA_NODEFER
+#define VKI_SA_ONESHOT VKI_SA_RESETHAND
+#define VKI_SA_INTERRUPT 0x20000000 /* dummy -- ignored */
+
+#define VKI_SA_RESTORER 0x04000000
+
+/*
+ * sigaltstack controls
+ */
+#define VKI_SS_ONSTACK 1
+#define VKI_SS_DISABLE 2
+
+#define VKI_MINSIGSTKSZ 2048
+#define VKI_SIGSTKSZ 8192
+
+
+/* Next lines asm-generic/signal.h */
+#define VKI_SIG_BLOCK 0 /* for blocking signals */
+#define VKI_SIG_UNBLOCK 1 /* for unblocking signals */
+#define VKI_SIG_SETMASK 2 /* for setting the signal mask */
+
+typedef void __vki_signalfn_t(int);
+typedef __vki_signalfn_t __user *__vki_sighandler_t;
+
+/* default signal handling */
+#define VKI_SIG_DFL ((__force __vki_sighandler_t)0)
+/* ignore signal */
+#define VKI_SIG_IGN ((__force __vki_sighandler_t)1)
+/* error return from signal */
+#define VKI_SIG_ERR ((__force __vki_sighandler_t)-1)
+/* Back to asm-s390/signal.h */
+
+struct vki_old_sigaction {
+ // [[Nb: a 'k' prefix is added to "sa_handler" because
+ // bits/sigaction.h (which gets dragged in somehow via signal.h)
+ // #defines it as something else. Since that is done for glibc's
+ // purposes, which we don't care about here, we use our own name.]]
+ __vki_sighandler_t ksa_handler;
+ vki_old_sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+struct vki_sigaction {
+ // [[See comment about extra 'k' above]]
+ __vki_sighandler_t ksa_handler;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+ vki_sigset_t sa_mask; /* mask last for extensibility */
+};
+
+struct vki_k_sigaction {
+ struct vki_sigaction sa;
+};
+
+
+/* On Linux we use the same type for passing sigactions to
+ and from the kernel. Hence: */
+typedef struct vki_sigaction vki_sigaction_toK_t;
+typedef struct vki_sigaction vki_sigaction_fromK_t;
+
+
+typedef struct vki_sigaltstack {
+ void __user *ss_sp;
+ int ss_flags;
+ vki_size_t ss_size;
+} vki_stack_t;
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/mman.h
+//----------------------------------------------------------------------
+
+#define VKI_PROT_NONE 0x0 /* No page permissions */
+#define VKI_PROT_READ 0x1 /* page can be read */
+#define VKI_PROT_WRITE 0x2 /* page can be written */
+#define VKI_PROT_EXEC 0x4 /* page can be executed */
+#define VKI_PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend
+ change to start of
+ growsdown vma */
+#define VKI_PROT_GROWSUP 0x02000000 /* mprotect flag:
+ extend change to end
+ of growsup vma */
+
+#define VKI_MAP_PRIVATE 0x0002 /* */
+#define VKI_MAP_FIXED 0x0010 /* */
+#define VKI_MAP_ANONYMOUS 0x0020 /* */
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/fcntl.h
+//----------------------------------------------------------------------
+
+#define VKI_O_RDONLY 00000000
+#define VKI_O_WRONLY 00000001
+#define VKI_O_RDWR 00000002
+#define VKI_O_ACCMODE 00000003
+#define VKI_O_CREAT 00000100 /* not fcntl */
+#define VKI_O_EXCL 00000200 /* not fcntl */
+#define VKI_O_NOCTTY 00000400 /* not fcntl */
+#define VKI_O_TRUNC 00001000 /* not fcntl */
+#define VKI_O_APPEND 00002000
+
+#define VKI_AT_FDCWD -100
+
+#define VKI_F_DUPFD 0 /* dup */
+#define VKI_F_GETFD 1 /* get close_on_exec */
+#define VKI_F_SETFD 2 /* set/clear close_on_exec */
+#define VKI_F_GETFL 3 /* get file->f_flags */
+#define VKI_F_SETFL 4 /* set file->f_flags */
+#define VKI_F_GETLK 5
+#define VKI_F_SETLK 6
+#define VKI_F_SETLKW 7
+#define VKI_F_SETOWN 8 /* for sockets. */
+#define VKI_F_GETOWN 9 /* for sockets. */
+#define VKI_F_SETSIG 10 /* for sockets. */
+#define VKI_F_GETSIG 11 /* for sockets. */
+
+#define VKI_FD_CLOEXEC 1 /* actually anything with low bit set goes */
+
+#define VKI_F_LINUX_SPECIFIC_BASE 1024
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390x/resource.h
+//----------------------------------------------------------------------
+
+// which just does #include <asm-generic/resource.h>
+
+#define VKI_RLIMIT_DATA 2 /* max data size */
+#define VKI_RLIMIT_STACK 3 /* max stack size */
+#define VKI_RLIMIT_CORE 4 /* max core file size */
+#define VKI_RLIMIT_NOFILE 7 /* max number of open files */
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/socket.h
+//----------------------------------------------------------------------
+
+#define VKI_SOL_SOCKET 1
+
+#define VKI_SO_TYPE 3
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/sockios.h
+//----------------------------------------------------------------------
+
+#define VKI_SIOCSPGRP 0x8902
+#define VKI_SIOCGPGRP 0x8904
+#define VKI_SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
+/* since 2.6.22 */
+#define VKI_SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/stat.h
+//----------------------------------------------------------------------
+
+#ifndef VGA_s390x
+struct vki_stat {
+ unsigned short st_dev;
+ unsigned short __pad1;
+ unsigned long st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned short __pad2;
+ unsigned long st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+/* This matches struct stat64 in glibc2.1, hence the absolutely
+ * insane amounts of padding around dev_t's.
+ */
+struct vki_stat64 {
+ unsigned long long st_dev;
+ unsigned int __pad1;
+ unsigned long __st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+ unsigned long st_uid;
+ unsigned long st_gid;
+ unsigned long long st_rdev;
+ unsigned int __pad3;
+ long long st_size;
+ unsigned long st_blksize;
+ unsigned char __pad4[4];
+ unsigned long __pad5; /* future possible st_blocks high bits */
+ unsigned long st_blocks; /* Number 512-byte blocks allocated. */
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec; /* will be high 32 bits of ctime someday */
+ unsigned long long st_ino;
+};
+
+#else
+
+struct vki_stat {
+ unsigned long st_dev;
+ unsigned long st_ino;
+ unsigned long st_nlink;
+ unsigned int st_mode;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned int __pad1;
+ unsigned long st_rdev;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long st_blksize;
+ long st_blocks;
+ unsigned long __unused[3];
+};
+
+#endif /* VGA_s390x */
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/statfs.h
+//----------------------------------------------------------------------
+
+struct vki_statfs {
+ int f_type;
+ int f_bsize;
+ long f_blocks;
+ long f_bfree;
+ long f_bavail;
+ long f_files;
+ long f_ffree;
+ __vki_kernel_fsid_t f_fsid;
+ int f_namelen;
+ int f_frsize;
+ int f_spare[5];
+};
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/termios.h
+//----------------------------------------------------------------------
+
+struct vki_winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define VKI_NCC 8
+struct vki_termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[VKI_NCC]; /* control characters */
+};
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/termbits.h
+//----------------------------------------------------------------------
+
+typedef unsigned char vki_cc_t;
+typedef unsigned int vki_tcflag_t;
+
+#define VKI_NCCS 19
+struct vki_termios {
+ vki_tcflag_t c_iflag; /* input mode flags */
+ vki_tcflag_t c_oflag; /* output mode flags */
+ vki_tcflag_t c_cflag; /* control mode flags */
+ vki_tcflag_t c_lflag; /* local mode flags */
+ vki_cc_t c_line; /* line discipline */
+ vki_cc_t c_cc[VKI_NCCS]; /* control characters */
+};
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/ioctl.h
+//----------------------------------------------------------------------
+
+#define _VKI_IOC_NRBITS 8
+#define _VKI_IOC_TYPEBITS 8
+#define _VKI_IOC_SIZEBITS 14
+#define _VKI_IOC_DIRBITS 2
+
+#define _VKI_IOC_NRMASK ((1 << _VKI_IOC_NRBITS)-1)
+#define _VKI_IOC_TYPEMASK ((1 << _VKI_IOC_TYPEBITS)-1)
+#define _VKI_IOC_SIZEMASK ((1 << _VKI_IOC_SIZEBITS)-1)
+#define _VKI_IOC_DIRMASK ((1 << _VKI_IOC_DIRBITS)-1)
+
+#define _VKI_IOC_NRSHIFT 0
+#define _VKI_IOC_TYPESHIFT (_VKI_IOC_NRSHIFT+_VKI_IOC_NRBITS)
+#define _VKI_IOC_SIZESHIFT (_VKI_IOC_TYPESHIFT+_VKI_IOC_TYPEBITS)
+#define _VKI_IOC_DIRSHIFT (_VKI_IOC_SIZESHIFT+_VKI_IOC_SIZEBITS)
+
+#define _VKI_IOC_NONE 0U
+#define _VKI_IOC_WRITE 1U
+#define _VKI_IOC_READ 2U
+
+#define _VKI_IOC(dir,type,nr,size) \
+ (((dir) << _VKI_IOC_DIRSHIFT) | \
+ ((type) << _VKI_IOC_TYPESHIFT) | \
+ ((nr) << _VKI_IOC_NRSHIFT) | \
+ ((size) << _VKI_IOC_SIZESHIFT))
+
+/* used to create numbers */
+#define _VKI_IO(type,nr) _VKI_IOC(_VKI_IOC_NONE,(type),(nr),0)
+#define _VKI_IOR(type,nr,size) _VKI_IOC(_VKI_IOC_READ,(type),(nr),(_VKI_IOC_TYPECHECK(size)))
+#define _VKI_IOW(type,nr,size) _VKI_IOC(_VKI_IOC_WRITE,(type),(nr),(_VKI_IOC_TYPECHECK(size)))
+#define _VKI_IOWR(type,nr,size) _VKI_IOC(_VKI_IOC_READ|_VKI_IOC_WRITE,(type),(nr),(_VKI_IOC_TYPECHECK(size)))
+
+/* used to decode ioctl numbers.. */
+#define _VKI_IOC_DIR(nr) (((nr) >> _VKI_IOC_DIRSHIFT) & _VKI_IOC_DIRMASK)
+#define _VKI_IOC_TYPE(nr) (((nr) >> _VKI_IOC_TYPESHIFT) & _VKI_IOC_TYPEMASK)
+#define _VKI_IOC_NR(nr) (((nr) >> _VKI_IOC_NRSHIFT) & _VKI_IOC_NRMASK)
+#define _VKI_IOC_SIZE(nr) (((nr) >> _VKI_IOC_SIZESHIFT) & _VKI_IOC_SIZEMASK)
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/ioctls.h
+//----------------------------------------------------------------------
+
+/* 0x54 is just a magic number to make these relatively unique ('T') */
+
+#define VKI_TCGETS 0x5401
+#define VKI_TCSETS 0x5402
+#define VKI_TCSETSW 0x5403
+#define VKI_TCSETSF 0x5404
+#define VKI_TCGETA 0x5405
+#define VKI_TCSETA 0x5406
+#define VKI_TCSETAW 0x5407
+#define VKI_TCSETAF 0x5408
+#define VKI_TCSBRK 0x5409
+#define VKI_TCXONC 0x540A
+#define VKI_TCFLSH 0x540B
+
+#define VKI_TIOCSCTTY 0x540E
+#define VKI_TIOCGPGRP 0x540F
+#define VKI_TIOCSPGRP 0x5410
+#define VKI_TIOCOUTQ 0x5411
+
+#define VKI_TIOCGWINSZ 0x5413
+#define VKI_TIOCSWINSZ 0x5414
+#define VKI_TIOCMGET 0x5415
+#define VKI_TIOCMBIS 0x5416
+#define VKI_TIOCMBIC 0x5417
+#define VKI_TIOCMSET 0x5418
+
+#define VKI_FIONREAD 0x541B
+#define VKI_TIOCLINUX 0x541C
+
+#define VKI_FIONBIO 0x5421
+
+#define VKI_TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+
+#define VKI_TIOCGPTN _VKI_IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define VKI_TIOCSPTLCK _VKI_IOW('T',0x31, int) /* Lock/unlock Pty */
+
+#define VKI_FIOASYNC 0x5452
+
+#define VKI_TIOCSERGETLSR 0x5459 /* Get line status register */
+
+#define VKI_TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/poll.h
+//----------------------------------------------------------------------
+
+struct vki_pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/ptrace.h
+//----------------------------------------------------------------------
+#define VKI_NUM_GPRS 16
+#define VKI_NUM_FPRS 16
+#define VKI_NUM_CRS 16
+#define VKI_NUM_ACRS 16
+
+typedef union
+{
+ float f;
+ double d;
+ __vki_u64 ui;
+ struct
+ {
+ __vki_u32 hi;
+ __vki_u32 lo;
+ } fp;
+} vki_freg_t;
+
+typedef struct
+{
+ __vki_u32 fpc;
+ vki_freg_t fprs[VKI_NUM_FPRS];
+} vki_s390_fp_regs;
+
+typedef struct
+{
+ unsigned long mask;
+ unsigned long addr;
+} __attribute__ ((aligned(8))) vki_psw_t;
+
+typedef struct
+{
+ vki_psw_t psw;
+ unsigned long gprs[VKI_NUM_GPRS];
+ unsigned int acrs[VKI_NUM_ACRS];
+ unsigned long orig_gpr2;
+} vki_s390_regs;
+
+/*
+ * Now for the program event recording (trace) definitions.
+ */
+typedef struct
+{
+ unsigned long cr[3];
+} vki_per_cr_words;
+
+typedef struct
+{
+#ifdef VGA_s390x
+ unsigned : 32;
+#endif /* VGA_s390x */
+ unsigned em_branching : 1;
+ unsigned em_instruction_fetch : 1;
+ /*
+ * Switching on storage alteration automatically fixes
+ * the storage alteration event bit in the users std.
+ */
+ unsigned em_storage_alteration : 1;
+ unsigned em_gpr_alt_unused : 1;
+ unsigned em_store_real_address : 1;
+ unsigned : 3;
+ unsigned branch_addr_ctl : 1;
+ unsigned : 1;
+ unsigned storage_alt_space_ctl : 1;
+ unsigned : 21;
+ unsigned long starting_addr;
+ unsigned long ending_addr;
+} vki_per_cr_bits;
+
+typedef struct
+{
+ unsigned short perc_atmid;
+ unsigned long address;
+ unsigned char access_id;
+} vki_per_lowcore_words;
+
+typedef struct
+{
+ unsigned perc_branching : 1;
+ unsigned perc_instruction_fetch : 1;
+ unsigned perc_storage_alteration : 1;
+ unsigned perc_gpr_alt_unused : 1;
+ unsigned perc_store_real_address : 1;
+ unsigned : 3;
+ unsigned atmid_psw_bit_31 : 1;
+ unsigned atmid_validity_bit : 1;
+ unsigned atmid_psw_bit_32 : 1;
+ unsigned atmid_psw_bit_5 : 1;
+ unsigned atmid_psw_bit_16 : 1;
+ unsigned atmid_psw_bit_17 : 1;
+ unsigned si : 2;
+ unsigned long address;
+ unsigned : 4;
+ unsigned access_id : 4;
+} vki_per_lowcore_bits;
+
+typedef struct
+{
+ union {
+ vki_per_cr_words words;
+ vki_per_cr_bits bits;
+ } control_regs;
+ /*
+ * Use these flags instead of setting em_instruction_fetch
+ * directly they are used so that single stepping can be
+ * switched on & off while not affecting other tracing
+ */
+ unsigned single_step : 1;
+ unsigned instruction_fetch : 1;
+ unsigned : 30;
+ /*
+ * These addresses are copied into cr10 & cr11 if single
+ * stepping is switched off
+ */
+ unsigned long starting_addr;
+ unsigned long ending_addr;
+ union {
+ vki_per_lowcore_words words;
+ vki_per_lowcore_bits bits;
+ } lowcore;
+} vki_per_struct;
+
+/*
+ * The user_regs_struct defines the way the user registers are
+ * store on the stack for signal handling.
+ */
+struct vki_user_regs_struct
+{
+ vki_psw_t psw;
+ unsigned long gprs[VKI_NUM_GPRS];
+ unsigned int acrs[VKI_NUM_ACRS];
+ unsigned long orig_gpr2;
+ vki_s390_fp_regs fp_regs;
+ /*
+ * These per registers are in here so that gdb can modify them
+ * itself as there is no "official" ptrace interface for hardware
+ * watchpoints. This is the way intel does it.
+ */
+ vki_per_struct per_info;
+ unsigned long ieee_instruction_pointer;
+ /* Used to give failing instruction back to user for ieee exceptions */
+};
+
+typedef struct
+{
+ unsigned int vki_len;
+ unsigned long vki_kernel_addr;
+ unsigned long vki_process_addr;
+} vki_ptrace_area;
+
+/*
+ * S/390 specific non posix ptrace requests
+ */
+#define VKI_PTRACE_PEEKUSR_AREA 0x5000
+#define VKI_PTRACE_POKEUSR_AREA 0x5001
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/elf.h
+//----------------------------------------------------------------------
+
+typedef vki_s390_fp_regs vki_elf_fpregset_t;
+typedef vki_s390_regs vki_elf_gregset_t;
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/ucontext.h
+//----------------------------------------------------------------------
+
+struct vki_ucontext {
+ unsigned long uc_flags;
+ struct vki_ucontext *uc_link;
+ vki_stack_t uc_stack;
+ _vki_sigregs uc_mcontext;
+ vki_sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/ipcbuf.h
+//----------------------------------------------------------------------
+
+struct vki_ipc64_perm
+{
+ __vki_kernel_key_t key;
+ __vki_kernel_uid32_t uid;
+ __vki_kernel_gid32_t gid;
+ __vki_kernel_uid32_t cuid;
+ __vki_kernel_gid32_t cgid;
+ __vki_kernel_mode_t mode;
+ unsigned short __pad1;
+ unsigned short seq;
+#ifndef VGA_s390x
+ unsigned short __pad2;
+#endif /* ! VGA_s390x */
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/sembuf.h
+//----------------------------------------------------------------------
+
+struct vki_semid64_ds {
+ struct vki_ipc64_perm sem_perm; /* permissions .. see ipc.h */
+ __vki_kernel_time_t sem_otime; /* last semop time */
+#ifndef VGA_s390x
+ unsigned long __unused1;
+#endif /* ! VGA_s390x */
+ __vki_kernel_time_t sem_ctime; /* last change time */
+#ifndef VGA_s390x
+ unsigned long __unused2;
+#endif /* ! VGA_s390x */
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/msgbuf.h
+//----------------------------------------------------------------------
+
+struct vki_msqid64_ds {
+ struct vki_ipc64_perm msg_perm;
+ __vki_kernel_time_t msg_stime; /* last msgsnd time */
+#ifndef VGA_s390x
+ unsigned long __unused1;
+#endif /* ! VGA_s390x */
+ __vki_kernel_time_t msg_rtime; /* last msgrcv time */
+#ifndef VGA_s390x
+ unsigned long __unused2;
+#endif /* ! VGA_s390x */
+ __vki_kernel_time_t msg_ctime; /* last change time */
+#ifndef VGA_s390x
+ unsigned long __unused3;
+#endif /* ! VGA_s390x */
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __vki_kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __vki_kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/ipc.h
+//----------------------------------------------------------------------
+
+struct vki_ipc_kludge {
+ struct vki_msgbuf __user *msgp;
+ long msgtyp;
+};
+
+#define VKI_SEMOP 1
+#define VKI_SEMGET 2
+#define VKI_SEMCTL 3
+#define VKI_SEMTIMEDOP 4
+#define VKI_MSGSND 11
+#define VKI_MSGRCV 12
+#define VKI_MSGGET 13
+#define VKI_MSGCTL 14
+#define VKI_SHMAT 21
+#define VKI_SHMDT 22
+#define VKI_SHMGET 23
+#define VKI_SHMCTL 24
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/shmbuf.h
+//----------------------------------------------------------------------
+
+struct vki_shmid64_ds {
+ struct vki_ipc64_perm shm_perm; /* operation perms */
+ vki_size_t shm_segsz; /* size of segment (bytes) */
+ __vki_kernel_time_t shm_atime; /* last attach time */
+#ifndef VGA_s390x
+ unsigned long __unused1;
+#endif /* ! VGA_s390x */
+ __vki_kernel_time_t shm_dtime; /* last detach time */
+#ifndef VGA_s390x
+ unsigned long __unused2;
+#endif /* ! VGA_s390x */
+ __vki_kernel_time_t shm_ctime; /* last change time */
+#ifndef VGA_s390x
+ unsigned long __unused3;
+#endif /* ! VGA_s390x */
+ __vki_kernel_pid_t shm_cpid; /* pid of creator */
+ __vki_kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+struct vki_shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+
+//----------------------------------------------------------------------
+// The following are defined in the VKI namespace but are nowhere found
+// in the linux headers.
+//----------------------------------------------------------------------
+#define VKI_BIG_ENDIAN 1
+#define VKI_MAX_PAGE_SHIFT VKI_PAGE_SHIFT
+#define VKI_MAX_PAGE_SIZE VKI_PAGE_SIZE
+
+//----------------------------------------------------------------------
+// From linux-2.6.35.4/arch/s390x/include/asm/shmparam.h
+//----------------------------------------------------------------------
+
+#define VKI_SHMLBA VKI_PAGE_SIZE
+
+/* If a system call returns a value >= VKI_MAX_ERRNO then that is considered
+ an error condition. I.e. the system call failed. */
+#define VKI_MAX_ERRNO -125
+
+#endif // __VKI_S390X_LINUX_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
--- valgrind/include/vki/vki-scnums-s390x-linux.h
+++ valgrind/include/vki/vki-scnums-s390x-linux.h
@@ -0,0 +1,447 @@
+
+/*--------------------------------------------------------------------*/
+/*--- System call numbers for s390x-linux. ---*/
+/*--- vki-scnums-s390x-linux.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright IBM Corp. 2010
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm and Christian Borntraeger. */
+
+#ifndef __VKI_SCNUMS_S390X_LINUX_H
+#define __VKI_SCNUMS_S390X_LINUX_H
+
+//----------------------------------------------------------------------
+// From linux-2.6.16.60/include/asm-s390/unistd.h
+//----------------------------------------------------------------------
+
+
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_restart_syscall 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_lchown 16
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_pause 29
+#define __NR_utime 30
+#define __NR_access 33
+#define __NR_nice 34
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_signal 48
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_umount2 52
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+#define __NR_setpgid 57
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrlimit 76
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_symlink 83
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+#define __NR_ioperm 101
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+#define __NR_lookup_dcookie 110
+#define __NR_vhangup 111
+#define __NR_idle 112
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+#define __NR_create_module 127
+#define __NR_init_module 128
+#define __NR_delete_module 129
+#define __NR_get_kernel_syms 130
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR_getdents 141
+#define __NR__newselect 142
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+#define __NR_setresuid 164
+#define __NR_getresuid 165
+#define __NR_query_module 167
+#define __NR_poll 168
+#define __NR_nfsservctl 169
+#define __NR_setresgid 170
+#define __NR_getresgid 171
+#define __NR_prctl 172
+#define __NR_rt_sigreturn 173
+#define __NR_rt_sigaction 174
+#define __NR_rt_sigprocmask 175
+#define __NR_rt_sigpending 176
+#define __NR_rt_sigtimedwait 177
+#define __NR_rt_sigqueueinfo 178
+#define __NR_rt_sigsuspend 179
+#define __NR_pread64 180
+#define __NR_pwrite64 181
+#define __NR_chown 182
+#define __NR_getcwd 183
+#define __NR_capget 184
+#define __NR_capset 185
+#define __NR_sigaltstack 186
+#define __NR_sendfile 187
+#define __NR_getpmsg 188
+#define __NR_putpmsg 189
+#define __NR_vfork 190
+#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#define __NR_lchown32 198
+#define __NR_getuid32 199
+#define __NR_getgid32 200
+#define __NR_geteuid32 201
+#define __NR_getegid32 202
+#define __NR_setreuid32 203
+#define __NR_setregid32 204
+#define __NR_getgroups32 205
+#define __NR_setgroups32 206
+#define __NR_fchown32 207
+#define __NR_setresuid32 208
+#define __NR_getresuid32 209
+#define __NR_setresgid32 210
+#define __NR_getresgid32 211
+#define __NR_chown32 212
+#define __NR_setuid32 213
+#define __NR_setgid32 214
+#define __NR_setfsuid32 215
+#define __NR_setfsgid32 216
+#define __NR_pivot_root 217
+#define __NR_mincore 218
+#define __NR_madvise 219
+#define __NR_getdents64 220
+#define __NR_fcntl64 221
+#define __NR_readahead 222
+#define __NR_sendfile64 223
+#define __NR_setxattr 224
+#define __NR_lsetxattr 225
+#define __NR_fsetxattr 226
+#define __NR_getxattr 227
+#define __NR_lgetxattr 228
+#define __NR_fgetxattr 229
+#define __NR_listxattr 230
+#define __NR_llistxattr 231
+#define __NR_flistxattr 232
+#define __NR_removexattr 233
+#define __NR_lremovexattr 234
+#define __NR_fremovexattr 235
+#define __NR_gettid 236
+#define __NR_tkill 237
+#define __NR_futex 238
+#define __NR_sched_setaffinity 239
+#define __NR_sched_getaffinity 240
+#define __NR_tgkill 241
+/* Number 242 is reserved for tux */
+#define __NR_io_setup 243
+#define __NR_io_destroy 244
+#define __NR_io_getevents 245
+#define __NR_io_submit 246
+#define __NR_io_cancel 247
+#define __NR_exit_group 248
+#define __NR_epoll_create 249
+#define __NR_epoll_ctl 250
+#define __NR_epoll_wait 251
+#define __NR_set_tid_address 252
+#define __NR_fadvise64 253
+#define __NR_timer_create 254
+#define __NR_timer_settime (__NR_timer_create+1)
+#define __NR_timer_gettime (__NR_timer_create+2)
+#define __NR_timer_getoverrun (__NR_timer_create+3)
+#define __NR_timer_delete (__NR_timer_create+4)
+#define __NR_clock_settime (__NR_timer_create+5)
+#define __NR_clock_gettime (__NR_timer_create+6)
+#define __NR_clock_getres (__NR_timer_create+7)
+#define __NR_clock_nanosleep (__NR_timer_create+8)
+/* Number 263 is reserved for vserver */
+#define __NR_fadvise64_64 264
+#define __NR_statfs64 265
+#define __NR_fstatfs64 266
+#define __NR_remap_file_pages 267
+/* Number 268 is reserved for new sys_mbind */
+/* Number 269 is reserved for new sys_get_mempolicy */
+/* Number 270 is reserved for new sys_set_mempolicy */
+#define __NR_mq_open 271
+#define __NR_mq_unlink 272
+#define __NR_mq_timedsend 273
+#define __NR_mq_timedreceive 274
+#define __NR_mq_notify 275
+#define __NR_mq_getsetattr 276
+#define __NR_kexec_load 277
+#define __NR_add_key 278
+#define __NR_request_key 279
+#define __NR_keyctl 280
+#define __NR_waitid 281
+#define __NR_ioprio_set 282
+#define __NR_ioprio_get 283
+#define __NR_inotify_init 284
+#define __NR_inotify_add_watch 285
+#define __NR_inotify_rm_watch 286
+/* Number 287 is reserved for new sys_migrate_pages */
+#define __NR_openat 288
+#define __NR_mkdirat 289
+#define __NR_mknodat 290
+#define __NR_fchownat 291
+#define __NR_futimesat 292
+#define __NR_fstatat64 293
+#define __NR_unlinkat 294
+#define __NR_renameat 295
+#define __NR_linkat 296
+#define __NR_symlinkat 297
+#define __NR_readlinkat 298
+#define __NR_fchmodat 299
+#define __NR_faccessat 300
+#define __NR_pselect6 301
+#define __NR_ppoll 302
+#define __NR_unshare 303
+/* the following system calls from 2.6.32 unistd.h*/
+#define __NR_set_robust_list 304
+#define __NR_get_robust_list 305
+#define __NR_splice 306
+#define __NR_sync_file_range 307
+#define __NR_tee 308
+#define __NR_vmsplice 309
+/* Number 310 is reserved for new sys_move_pages */
+#define __NR_getcpu 311
+#define __NR_epoll_pwait 312
+#define __NR_utimes 313
+#define __NR_fallocate 314
+#define __NR_utimensat 315
+#define __NR_signalfd 316
+#define __NR_timerfd 317
+#define __NR_eventfd 318
+#define __NR_timerfd_create 319
+#define __NR_timerfd_settime 320
+#define __NR_timerfd_gettime 321
+#define __NR_signalfd4 322
+#define __NR_eventfd2 323
+#define __NR_inotify_init1 324
+#define __NR_pipe2 325
+#define __NR_dup3 326
+#define __NR_epoll_create1 327
+#define __NR_preadv 328
+#define __NR_pwritev 329
+#define __NR_rt_tgsigqueueinfo 330
+#define __NR_perf_event_open 331
+
+#define NR_syscalls 332
+
+/*
+ * There are some system calls that are not present on 64 bit, some
+ * have a different name although they do the same (e.g. __NR_chown32
+ * is __NR_chown on 64 bit).
+ */
+#ifdef VGA_s390x
+#undef __NR_time
+#undef __NR_lchown
+#undef __NR_setuid
+#undef __NR_getuid
+#undef __NR_stime
+#undef __NR_setgid
+#undef __NR_getgid
+#undef __NR_geteuid
+#undef __NR_getegid
+#undef __NR_setreuid
+#undef __NR_setregid
+#undef __NR_getrlimit
+#undef __NR_getgroups
+#undef __NR_setgroups
+#undef __NR_fchown
+#undef __NR_ioperm
+#undef __NR_setfsuid
+#undef __NR_setfsgid
+#undef __NR__llseek
+#undef __NR__newselect
+#undef __NR_setresuid
+#undef __NR_getresuid
+#undef __NR_setresgid
+#undef __NR_getresgid
+#undef __NR_chown
+#undef __NR_ugetrlimit
+#undef __NR_mmap2
+#undef __NR_truncate64
+#undef __NR_ftruncate64
+#undef __NR_stat64
+#undef __NR_lstat64
+#undef __NR_fstat64
+#undef __NR_lchown32
+#undef __NR_getuid32
+#undef __NR_getgid32
+#undef __NR_geteuid32
+#undef __NR_getegid32
+#undef __NR_setreuid32
+#undef __NR_setregid32
+#undef __NR_getgroups32
+#undef __NR_setgroups32
+#undef __NR_fchown32
+#undef __NR_setresuid32
+#undef __NR_getresuid32
+#undef __NR_setresgid32
+#undef __NR_getresgid32
+#undef __NR_chown32
+#undef __NR_setuid32
+#undef __NR_setgid32
+#undef __NR_setfsuid32
+#undef __NR_setfsgid32
+#undef __NR_fcntl64
+#undef __NR_sendfile64
+#undef __NR_fadvise64_64
+#undef __NR_fstatat64
+
+#define __NR_select 142
+#define __NR_getrlimit 191 /* SuS compliant getrlimit */
+#define __NR_lchown 198
+#define __NR_getuid 199
+#define __NR_getgid 200
+#define __NR_geteuid 201
+#define __NR_getegid 202
+#define __NR_setreuid 203
+#define __NR_setregid 204
+#define __NR_getgroups 205
+#define __NR_setgroups 206
+#define __NR_fchown 207
+#define __NR_setresuid 208
+#define __NR_getresuid 209
+#define __NR_setresgid 210
+#define __NR_getresgid 211
+#define __NR_chown 212
+#define __NR_setuid 213
+#define __NR_setgid 214
+#define __NR_setfsuid 215
+#define __NR_setfsgid 216
+#define __NR_newfstatat 293
+
+#endif
+
+#endif /* __VKI_SCNUMS_S390X_LINUX_H */
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
--- valgrind/memcheck/tests/badjump.stderr.exp-s390x
+++ valgrind/memcheck/tests/badjump.stderr.exp-s390x
@@ -0,0 +1,25 @@
+
+Jump to the invalid address stated on the next line
+ at 0x........: ???
+ by 0x........: main (badjump.c:17)
+ Address 0x........ is not stack'd, malloc'd or (recently) free'd
+
+
+Process terminating with default action of signal 11 (SIGSEGV)
+ Access not within mapped region at address 0x........
+ at 0x........: ???
+ by 0x........: main (badjump.c:17)
+ If you believe this happened as a result of a stack
+ overflow in your program's main thread (unlikely but
+ possible), you can try to increase the size of the
+ main thread stack using the --main-stacksize= flag.
+ The main thread stack size used in this run was ....
+
+HEAP SUMMARY:
+ in use at exit: ... bytes in ... blocks
+ total heap usage: ... allocs, ... frees, ... bytes allocated
+
+For a detailed leak analysis, rerun with: --leak-check=full
+
+For counts of detected and suppressed errors, rerun with: -v
+ERROR SUMMARY: 1 errors from 1 contexts (suppressed: 0 from 0)
--- valgrind/memcheck/tests/badjump2.stderr.exp-s390x
+++ valgrind/memcheck/tests/badjump2.stderr.exp-s390x
@@ -0,0 +1,6 @@
+Jump to the invalid address stated on the next line
+ at 0x........: ???
+ by 0x........: main (badjump2.c:46)
+ Address 0x........ is not stack'd, malloc'd or (recently) free'd
+
+Signal caught, as expected
--- valgrind/memcheck/tests/origin5-bz2.stderr.exp-glibc212-s390x
+++ valgrind/memcheck/tests/origin5-bz2.stderr.exp-glibc212-s390x
@@ -0,0 +1,133 @@
+Conditional jump or move depends on uninitialised value(s)
+ at 0x........: main (origin5-bz2.c:6481)
+ Uninitialised value was created by a client request
+ at 0x........: main (origin5-bz2.c:6479)
+
+Conditional jump or move depends on uninitialised value(s)
+ at 0x........: handle_compress (origin5-bz2.c:4686)
+ by 0x........: BZ2_bzCompress (origin5-bz2.c:4822)
+ by 0x........: BZ2_bzBuffToBuffCompress (origin5-bz2.c:5630)
+ by 0x........: main (origin5-bz2.c:6484)
+ Uninitialised value was created by a client request
+ at 0x........: main (origin5-bz2.c:6479)
+
+Use of uninitialised value of size 8
+ at 0x........: handle_compress (origin5-bz2.c:4686)
+ by 0x........: BZ2_bzCompress (origin5-bz2.c:4822)
+ by 0x........: BZ2_bzBuffToBuffCompress (origin5-bz2.c:5630)
+ by 0x........: main (origin5-bz2.c:6484)
+ Uninitialised value was created by a client request
+ at 0x........: main (origin5-bz2.c:6479)
+
+Use of uninitialised value of size 8
+ at 0x........: handle_compress (origin5-bz2.c:4686)
+ by 0x........: BZ2_bzCompress (origin5-bz2.c:4822)
+ by 0x........: BZ2_bzBuffToBuffCompress (origin5-bz2.c:5630)
+ by 0x........: main (origin5-bz2.c:6484)
+ Uninitialised value was created by a client request
+ at 0x........: main (origin5-bz2.c:6479)
+
+Use of uninitialised value of size 8
+ at 0x........: mainSort (origin5-bz2.c:2820)
+ by 0x........: BZ2_blockSort (origin5-bz2.c:3105)
+ by 0x........: BZ2_compressBlock (origin5-bz2.c:4034)
+ by 0x........: handle_compress (origin5-bz2.c:4753)
+ by 0x........: BZ2_bzCompress (origin5-bz2.c:4822)
+ by 0x........: BZ2_bzBuffToBuffCompress (origin5-bz2.c:5630)
+ by 0x........: main (origin5-bz2.c:6484)
+ Uninitialised value was created by a client request
+ at 0x........: main (origin5-bz2.c:6479)
+
+Use of uninitialised value of size 8
+ at 0x........: mainSort (origin5-bz2.c:2823)
+ by 0x........: BZ2_blockSort (origin5-bz2.c:3105)
+ by 0x........: BZ2_compressBlock (origin5-bz2.c:4034)
+ by 0x........: handle_compress (origin5-bz2.c:4753)
+ by 0x........: BZ2_bzCompress (origin5-bz2.c:4822)
+ by 0x........: BZ2_bzBuffToBuffCompress (origin5-bz2.c:5630)
+ by 0x........: main (origin5-bz2.c:6484)
+ Uninitialised value was created by a client request
+ at 0x........: main (origin5-bz2.c:6479)
+
+Use of uninitialised value of size 8
+ at 0x........: mainSort (origin5-bz2.c:2854)
+ by 0x........: BZ2_blockSort (origin5-bz2.c:3105)
+ by 0x........: BZ2_compressBlock (origin5-bz2.c:4034)
+ by 0x........: handle_compress (origin5-bz2.c:4753)
+ by 0x........: BZ2_bzCompress (origin5-bz2.c:4822)
+ by 0x........: BZ2_bzBuffToBuffCompress (origin5-bz2.c:5630)
+ by 0x........: main (origin5-bz2.c:6484)
+ Uninitialised value was created by a client request
+ at 0x........: main (origin5-bz2.c:6479)
+
+Use of uninitialised value of size 8
+ at 0x........: mainSort (origin5-bz2.c:2858)
+ by 0x........: BZ2_blockSort (origin5-bz2.c:3105)
+ by 0x........: BZ2_compressBlock (origin5-bz2.c:4034)
+ by 0x........: handle_compress (origin5-bz2.c:4753)
+ by 0x........: BZ2_bzCompress (origin5-bz2.c:4822)
+ by 0x........: BZ2_bzBuffToBuffCompress (origin5-bz2.c:5630)
+ by 0x........: main (origin5-bz2.c:6484)
+ Uninitialised value was created by a client request
+ at 0x........: main (origin5-bz2.c:6479)
+
+Use of uninitialised value of size 8
+ at 0x........: mainSort (origin5-bz2.c:2859)
+ by 0x........: BZ2_blockSort (origin5-bz2.c:3105)
+ by 0x........: BZ2_compressBlock (origin5-bz2.c:4034)
+ by 0x........: handle_compress (origin5-bz2.c:4753)
+ by 0x........: BZ2_bzCompress (origin5-bz2.c:4822)
+ by 0x........: BZ2_bzBuffToBuffCompress (origin5-bz2.c:5630)
+ by 0x........: main (origin5-bz2.c:6484)
+ Uninitialised value was created by a client request
+ at 0x........: main (origin5-bz2.c:6479)
+
+Use of uninitialised value of size 8
+ at 0x........: mainSort (origin5-bz2.c:2963)
+ by 0x........: BZ2_blockSort (origin5-bz2.c:3105)
+ by 0x........: BZ2_compressBlock (origin5-bz2.c:4034)
+ by 0x........: handle_compress (origin5-bz2.c:4753)
+ by 0x........: BZ2_bzCompress (origin5-bz2.c:4822)
+ by 0x........: BZ2_bzBuffToBuffCompress (origin5-bz2.c:5630)
+ by 0x........: main (origin5-bz2.c:6484)
+ Uninitialised value was created by a client request
+ at 0x........: main (origin5-bz2.c:6479)
+
+Use of uninitialised value of size 8
+ at 0x........: mainSort (origin5-bz2.c:2964)
+ by 0x........: BZ2_blockSort (origin5-bz2.c:3105)
+ by 0x........: BZ2_compressBlock (origin5-bz2.c:4034)
+ by 0x........: handle_compress (origin5-bz2.c:4753)
+ by 0x........: BZ2_bzCompress (origin5-bz2.c:4822)
+ by 0x........: BZ2_bzBuffToBuffCompress (origin5-bz2.c:5630)
+ by 0x........: main (origin5-bz2.c:6484)
+ Uninitialised value was created by a client request
+ at 0x........: main (origin5-bz2.c:6479)
+
+Use of uninitialised value of size 8
+ at 0x........: fallbackSort (origin5-bz2.c:2269)
+ by 0x........: BZ2_blockSort (origin5-bz2.c:3116)
+ by 0x........: BZ2_compressBlock (origin5-bz2.c:4034)
+ by 0x........: handle_compress (origin5-bz2.c:4753)
+ by 0x........: BZ2_bzCompress (origin5-bz2.c:4822)
+ by 0x........: BZ2_bzBuffToBuffCompress (origin5-bz2.c:5630)
+ by 0x........: main (origin5-bz2.c:6484)
+ Uninitialised value was created by a client request
+ at 0x........: main (origin5-bz2.c:6479)
+
+Use of uninitialised value of size 8
+ at 0x........: fallbackSort (origin5-bz2.c:2275)
+ by 0x........: BZ2_blockSort (origin5-bz2.c:3116)
+ by 0x........: BZ2_compressBlock (origin5-bz2.c:4034)
+ by 0x........: handle_compress (origin5-bz2.c:4753)
+ by 0x........: BZ2_bzCompress (origin5-bz2.c:4822)
+ by 0x........: BZ2_bzBuffToBuffCompress (origin5-bz2.c:5630)
+ by 0x........: main (origin5-bz2.c:6484)
+ Uninitialised value was created by a client request
+ at 0x........: main (origin5-bz2.c:6479)
+
+Conditional jump or move depends on uninitialised value(s)
+ at 0x........: main (origin5-bz2.c:6512)
+ Uninitialised value was created by a client request
+ at 0x........: main (origin5-bz2.c:6479)
+
--- valgrind/memcheck/tests/supp_unknown.stderr.exp-s390x
+++ valgrind/memcheck/tests/supp_unknown.stderr.exp-s390x
@@ -0,0 +1,10 @@
+
+Process terminating with default action of signal 11 (SIGSEGV)
+ Access not within mapped region at address 0x........
+ at 0x........: ???
+ by 0x........: main (badjump.c:17)
+ If you believe this happened as a result of a stack
+ overflow in your program's main thread (unlikely but
+ possible), you can try to increase the size of the
+ main thread stack using the --main-stacksize= flag.
+ The main thread stack size used in this run was ....
--- valgrind/none/tests/s390x/Makefile.am
+++ valgrind/none/tests/s390x/Makefile.am
@@ -0,0 +1,20 @@
+include $(top_srcdir)/Makefile.tool-tests.am
+
+dist_noinst_SCRIPTS = filter_stderr
+
+INSN_TESTS = clcle cvb cvd lpr flogr
+
+check_PROGRAMS = $(INSN_TESTS) \
+ ex_sig \
+ ex_clone
+
+EXTRA_DIST = \
+ $(addsuffix .stderr.exp,$(check_PROGRAMS)) \
+ $(addsuffix .stdout.exp,$(check_PROGRAMS)) \
+ $(addsuffix .vgtest,$(check_PROGRAMS))
+
+AM_CFLAGS += @FLAG_M64@
+AM_CXXFLAGS += @FLAG_M64@
+AM_CCASFLAGS += @FLAG_M64@
+
+ex_clone_LDFLAGS = -lpthread
--- valgrind/none/tests/s390x/clcle.c
+++ valgrind/none/tests/s390x/clcle.c
@@ -0,0 +1,71 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+char b1[23] ="0123456789abcdefghijklm";
+char b2[23] ="mlkjihgfedcba9876543210";
+char b3[23] ="mmmmmmmmmmmmmmmmmmmmmmm";
+char b4[23] ="00000000000000000000000";
+char longbuf[17000000];
+
+static int clcle(unsigned long *_a1, unsigned long *_l1, unsigned long *_a3, unsigned long *_l3, char _pad)
+{
+ register unsigned long a1 asm ("2") = *_a1;
+ register unsigned long l1 asm ("3") = *_l1;
+ register unsigned long a3 asm ("4") = *_a3;
+ register unsigned long l3 asm ("5") = *_l3;
+ register unsigned long pad asm ("6") = _pad;
+ register unsigned long cc asm ("7");
+
+ asm volatile( "0: clcle 2,4,0(6)\n\t"
+ "jo 0b\n\t"
+ "ipm %0\n\t"
+ "srl %0,28\n\t"
+ :"=d" (cc), "+d" (a1),"+d" (l1), "+d" (a3), "+d" (l3)
+ : "d" (pad)
+ : "memory", "cc");
+ *_a1 = a1;
+ *_a3 = a3;
+ *_l1 = l1;
+ *_l3 = l3;
+
+ return cc;
+}
+
+
+void testrun(void *_a1, unsigned long _l1, void *_a3, unsigned long _l3, char pad)
+{
+ unsigned long a1,a3,l1,l3;
+ int cc;
+
+ a1 = (unsigned long) _a1; l1 = _l1; a3 = (unsigned long) _a3; l3 = _l3;
+ cc = clcle(&a1, &l1, &a3, &l3, pad);
+ printf("cc: %d, l1: %lu(%lu) l3: %lu(%lu) diff1: %lu diff3: %lu\n",
+ cc, l1, _l1, l3, _l3, a1-(unsigned long) _a1, a3-(unsigned long) _a3);
+}
+
+
+void multiplex(unsigned long l1, unsigned long l3, char pad)
+{
+ testrun(b1, l1, b1, l3, pad);
+ testrun(b1, l1, b2, l3, pad);
+ testrun(b1, l1, b3, l3, pad);
+ testrun(b1, l1, b4, l3, pad);
+ testrun(b2, l1, b3, l3, pad);
+ testrun(b2, l1, b4, l3, pad);
+ testrun(b3, l1, b4, l3, pad);
+}
+
+int main()
+{
+ multiplex(0,0,9);
+ multiplex(1,0,9);
+ multiplex(0,1,9);
+ multiplex(1,1,9);
+ multiplex(5,23,9);
+ multiplex(23,5,9);
+ testrun(longbuf,10000,longbuf,100000,0);
+ testrun(longbuf,10000,longbuf,100000,128);
+ testrun(longbuf,10000,longbuf,100000,255);
+ exit(0);
+}
+
--- valgrind/none/tests/s390x/clcle.stderr.exp
+++ valgrind/none/tests/s390x/clcle.stderr.exp
@@ -0,0 +1,2 @@
+
+
--- valgrind/none/tests/s390x/clcle.stdout.exp
+++ valgrind/none/tests/s390x/clcle.stdout.exp
@@ -0,0 +1,45 @@
+cc: 0, l1: 0(0) l3: 0(0) diff1: 0 diff3: 0
+cc: 0, l1: 0(0) l3: 0(0) diff1: 0 diff3: 0
+cc: 0, l1: 0(0) l3: 0(0) diff1: 0 diff3: 0
+cc: 0, l1: 0(0) l3: 0(0) diff1: 0 diff3: 0
+cc: 0, l1: 0(0) l3: 0(0) diff1: 0 diff3: 0
+cc: 0, l1: 0(0) l3: 0(0) diff1: 0 diff3: 0
+cc: 0, l1: 0(0) l3: 0(0) diff1: 0 diff3: 0
+cc: 2, l1: 1(1) l3: 0(0) diff1: 0 diff3: 0
+cc: 2, l1: 1(1) l3: 0(0) diff1: 0 diff3: 0
+cc: 2, l1: 1(1) l3: 0(0) diff1: 0 diff3: 0
+cc: 2, l1: 1(1) l3: 0(0) diff1: 0 diff3: 0
+cc: 2, l1: 1(1) l3: 0(0) diff1: 0 diff3: 0
+cc: 2, l1: 1(1) l3: 0(0) diff1: 0 diff3: 0
+cc: 2, l1: 1(1) l3: 0(0) diff1: 0 diff3: 0
+cc: 1, l1: 0(0) l3: 1(1) diff1: 0 diff3: 0
+cc: 1, l1: 0(0) l3: 1(1) diff1: 0 diff3: 0
+cc: 1, l1: 0(0) l3: 1(1) diff1: 0 diff3: 0
+cc: 1, l1: 0(0) l3: 1(1) diff1: 0 diff3: 0
+cc: 1, l1: 0(0) l3: 1(1) diff1: 0 diff3: 0
+cc: 1, l1: 0(0) l3: 1(1) diff1: 0 diff3: 0
+cc: 1, l1: 0(0) l3: 1(1) diff1: 0 diff3: 0
+cc: 0, l1: 0(1) l3: 0(1) diff1: 1 diff3: 1
+cc: 1, l1: 1(1) l3: 1(1) diff1: 0 diff3: 0
+cc: 1, l1: 1(1) l3: 1(1) diff1: 0 diff3: 0
+cc: 0, l1: 0(1) l3: 0(1) diff1: 1 diff3: 1
+cc: 0, l1: 0(1) l3: 0(1) diff1: 1 diff3: 1
+cc: 2, l1: 1(1) l3: 1(1) diff1: 0 diff3: 0
+cc: 2, l1: 1(1) l3: 1(1) diff1: 0 diff3: 0
+cc: 1, l1: 0(5) l3: 18(23) diff1: 5 diff3: 5
+cc: 1, l1: 5(5) l3: 23(23) diff1: 0 diff3: 0
+cc: 1, l1: 5(5) l3: 23(23) diff1: 0 diff3: 0
+cc: 2, l1: 4(5) l3: 22(23) diff1: 1 diff3: 1
+cc: 1, l1: 4(5) l3: 22(23) diff1: 1 diff3: 1
+cc: 2, l1: 5(5) l3: 23(23) diff1: 0 diff3: 0
+cc: 2, l1: 5(5) l3: 23(23) diff1: 0 diff3: 0
+cc: 2, l1: 18(23) l3: 0(5) diff1: 5 diff3: 5
+cc: 1, l1: 23(23) l3: 5(5) diff1: 0 diff3: 0
+cc: 1, l1: 23(23) l3: 5(5) diff1: 0 diff3: 0
+cc: 2, l1: 22(23) l3: 4(5) diff1: 1 diff3: 1
+cc: 1, l1: 22(23) l3: 4(5) diff1: 1 diff3: 1
+cc: 2, l1: 23(23) l3: 5(5) diff1: 0 diff3: 0
+cc: 2, l1: 23(23) l3: 5(5) diff1: 0 diff3: 0
+cc: 0, l1: 0(10000) l3: 0(100000) diff1: 10000 diff3: 100000
+cc: 2, l1: 0(10000) l3: 90000(100000) diff1: 10000 diff3: 10000
+cc: 2, l1: 0(10000) l3: 90000(100000) diff1: 10000 diff3: 10000
--- valgrind/none/tests/s390x/clcle.vgtest
+++ valgrind/none/tests/s390x/clcle.vgtest
@@ -0,0 +1 @@
+prog: clcle
--- valgrind/none/tests/s390x/cvb.c
+++ valgrind/none/tests/s390x/cvb.c
@@ -0,0 +1,104 @@
+#include <stdio.h>
+
+static unsigned long test[] ={
+ 0x000000000000000a,
+ 0x000000000000001a,
+ 0x000000000000012a,
+ 0x000000000000123a,
+ 0x000000000001234a,
+ 0x000000000012345a,
+ 0x000000000123456a,
+ 0x000000001234567a,
+ 0x000000012345678a,
+ 0x000000123456789a,
+ 0x000001234567890a,
+ 0x000000000000000b,
+ 0x000000000000001b,
+ 0x000000000000012b,
+ 0x000000000000123b,
+ 0x000000000001234b,
+ 0x000000000012345b,
+ 0x000000000123456b,
+ 0x000000001234567b,
+ 0x000000012345678b,
+ 0x000000123456789b,
+ 0x000001234567890b,
+ 0x000000000000000c,
+ 0x000000000000001c,
+ 0x000000000000012c,
+ 0x000000000000123c,
+ 0x000000000001234c,
+ 0x000000000012345c,
+ 0x000000000123456c,
+ 0x000000001234567c,
+ 0x000000012345678c,
+ 0x000000123456789c,
+ 0x000001234567890c,
+ 0x000000000000000d,
+ 0x000000000000001d,
+ 0x000000000000012d,
+ 0x000000000000123d,
+ 0x000000000001234d,
+ 0x000000000012345d,
+ 0x000000000123456d,
+ 0x000000001234567d,
+ 0x000000012345678d,
+ 0x000000123456789d,
+ 0x000001234567890d,
+ 0x000000000000000e,
+ 0x000000000000001e,
+ 0x000000000000012e,
+ 0x000000000000123e,
+ 0x000000000001234e,
+ 0x000000000012345e,
+ 0x000000000123456e,
+ 0x000000001234567e,
+ 0x000000012345678e,
+ 0x000000123456789e,
+ 0x000001234567890e,
+ 0x000000000000000f,
+ 0x000000000000001f,
+ 0x000000000000012f,
+ 0x000000000000123f,
+ 0x000000000001234f,
+ 0x000000000012345f,
+ 0x000000000123456f,
+ 0x000000001234567f,
+ 0x000000012345678f,
+ 0x000000123456789f,
+ 0x000001234567890f,
+ /* min and max */
+ 0x000002147483647c,
+ 0x000002147483648d,
+
+/* fixs390: we also need to check if invalid values cause a fixed-point-devide exception.
+ Not yet implemented. */
+/* 0x000002147483648c,
+ 0x000002147483649d,
+ 0x00000000000000fa, */
+
+};
+
+
+static signed int dec_to_hex(unsigned long *addr)
+{
+ register signed int res asm("2") = 0;
+ register unsigned long *_addr asm("4") = addr;
+
+ asm volatile(
+ " cvb %0,0(0,%1)"
+ : "=d" (res) : "d" (_addr) : "memory");
+ return res & 0xffffffff;
+}
+
+
+
+
+int main()
+{
+ int i;
+
+ for (i = 0; i < sizeof(test) / sizeof(test[0]); i++)
+ printf("%d\n", dec_to_hex(&test[i]));
+ return 0;
+}
--- valgrind/none/tests/s390x/cvb.stderr.exp
+++ valgrind/none/tests/s390x/cvb.stderr.exp
@@ -0,0 +1,2 @@
+
+
--- valgrind/none/tests/s390x/cvb.stdout.exp
+++ valgrind/none/tests/s390x/cvb.stdout.exp
@@ -0,0 +1,68 @@
+0
+1
+12
+123
+1234
+12345
+123456
+1234567
+12345678
+123456789
+1234567890
+0
+-1
+-12
+-123
+-1234
+-12345
+-123456
+-1234567
+-12345678
+-123456789
+-1234567890
+0
+1
+12
+123
+1234
+12345
+123456
+1234567
+12345678
+123456789
+1234567890
+0
+-1
+-12
+-123
+-1234
+-12345
+-123456
+-1234567
+-12345678
+-123456789
+-1234567890
+0
+1
+12
+123
+1234
+12345
+123456
+1234567
+12345678
+123456789
+1234567890
+0
+1
+12
+123
+1234
+12345
+123456
+1234567
+12345678
+123456789
+1234567890
+2147483647
+-2147483648
--- valgrind/none/tests/s390x/cvb.vgtest
+++ valgrind/none/tests/s390x/cvb.vgtest
@@ -0,0 +1 @@
+prog: cvb
--- valgrind/none/tests/s390x/cvd.c
+++ valgrind/none/tests/s390x/cvd.c
@@ -0,0 +1,34 @@
+#include <stdio.h>
+
+static signed int test[] ={
+ 0,
+ 1,
+ -1,
+ 0x7fffffff,
+ 0x80000000,
+ 0x12345678,
+ 0x87654321,
+ 0x55555555,
+ 0x11111111,
+ 0xaaaaaaaa,
+};
+
+
+static unsigned long hex_to_dec(signed int num)
+{
+ unsigned long addr = 0;
+
+ asm volatile(
+ " cvd %2,%0"
+ : "=m" (addr) : "a" (&addr) , "d" (num) : "memory");
+ return addr;
+}
+
+int main()
+{
+ int i;
+
+ for (i = 0; i < sizeof(test) / sizeof(test[0]); i++)
+ printf("%lx\n", hex_to_dec(test[i]));
+ return 0;
+}
--- valgrind/none/tests/s390x/cvd.stderr.exp
+++ valgrind/none/tests/s390x/cvd.stderr.exp
@@ -0,0 +1,2 @@
+
+
--- valgrind/none/tests/s390x/cvd.stdout.exp
+++ valgrind/none/tests/s390x/cvd.stdout.exp
@@ -0,0 +1,10 @@
+c
+1c
+1d
+2147483647c
+2147483648d
+305419896c
+2023406815d
+1431655765c
+286331153c
+1431655766d
--- valgrind/none/tests/s390x/cvd.vgtest
+++ valgrind/none/tests/s390x/cvd.vgtest
@@ -0,0 +1 @@
+prog: cvd
--- valgrind/none/tests/s390x/ex_clone.c
+++ valgrind/none/tests/s390x/ex_clone.c
@@ -0,0 +1,60 @@
+#include <features.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+char source[40] = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\0";
+char target[40] = " \0";
+
+pthread_t thread;
+
+void *threadfunc(void *arg)
+{
+ char buf2[40];
+ int i;
+
+ memset(buf2, 0, sizeof(buf2));
+ for (i=0; i<5000; i++)
+ asm volatile(
+ "lghi 2,0\n"
+ "lghi 3,0\n"
+ "lgr 4,%0\n"
+ "lgr 5,%1\n"
+ "larl 1,1f\n"
+ "0: ex 0,0(1)\n"
+ "j 2f\n"
+ "1: mvc 0(30,4),0(5)\n"
+ "2:\n"
+ ::"a" (buf2), "a" (source)
+ : "1", "2", "3", "4", "5", "memory");
+ printf("%s\n", buf2);
+ pthread_exit(0);
+}
+
+int main()
+{
+ int i;
+
+ pthread_create(&thread, NULL, threadfunc, NULL);
+
+ for (i=0; i<5000; i++)
+ asm volatile(
+ "lghi 4,0\n"
+ "lghi 5,0\n"
+ "lgr 2,%0\n"
+ "lgr 3,%1\n"
+ "larl 1,1f\n"
+ "0: ex 0,0(1)\n"
+ "j 2f\n"
+ "1: mvc 0(20,2),0(3)\n"
+ "2:\n"
+ ::"a" (target), "a" (source)
+ : "1", "2", "3", "4", "5", "memory");
+ pthread_join(thread, NULL);
+ printf("%s\n", target);
+ pthread_exit(0);
+}
--- valgrind/none/tests/s390x/ex_clone.stderr.exp
+++ valgrind/none/tests/s390x/ex_clone.stderr.exp
@@ -0,0 +1,2 @@
+
+
--- valgrind/none/tests/s390x/ex_clone.stdout.exp
+++ valgrind/none/tests/s390x/ex_clone.stdout.exp
@@ -0,0 +1,2 @@
+aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+aaaaaaaaaaaaaaaaaaaa
--- valgrind/none/tests/s390x/ex_clone.vgtest
+++ valgrind/none/tests/s390x/ex_clone.vgtest
@@ -0,0 +1 @@
+prog: ex_clone
--- valgrind/none/tests/s390x/ex_sig.c
+++ valgrind/none/tests/s390x/ex_sig.c
@@ -0,0 +1,46 @@
+#include <features.h>
+#include <fpu_control.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <ucontext.h>
+#include <unistd.h>
+
+char source[40] = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\0";
+char target[40] = " \0";
+
+void handle_SIG(int sig)
+{
+ static int counter;
+ char buf2[40];
+
+ counter++;
+ asm volatile( "larl 1,1f\n"
+ "ex 0,0(1)\n"
+ "j 2f\n"
+ "1: mvc 0(30,%0),0(%1)\n"
+ "2:\n"
+ ::"a" (buf2), "a" (source)
+ : "1");
+ if (counter == 2) {
+ printf("%s\n", target);
+ exit(1);
+ } else
+ alarm(1);
+}
+
+int main()
+{
+ signal(SIGALRM, handle_SIG);
+ alarm(1);
+
+ asm volatile( "larl 1,1f\n"
+ "0: ex 0,0(1)\n"
+ "j 0b\n"
+ "1: mvc 0(20,%0),0(%1)\n"
+ ::"a" (target), "a" (source)
+ : "1");
+ exit(0);
+}
--- valgrind/none/tests/s390x/ex_sig.stderr.exp
+++ valgrind/none/tests/s390x/ex_sig.stderr.exp
@@ -0,0 +1,2 @@
+
+
--- valgrind/none/tests/s390x/ex_sig.stdout.exp
+++ valgrind/none/tests/s390x/ex_sig.stdout.exp
@@ -0,0 +1 @@
+aaaaaaaaaaaaaaaaaaaa
--- valgrind/none/tests/s390x/ex_sig.vgtest
+++ valgrind/none/tests/s390x/ex_sig.vgtest
@@ -0,0 +1 @@
+prog: ex_sig
--- valgrind/none/tests/s390x/filter_stderr
+++ valgrind/none/tests/s390x/filter_stderr
@@ -0,0 +1,4 @@
+#! /bin/sh
+
+../filter_stderr
+
--- /dev/null
+++ valgrind/none/tests/s390x/flogr.c
@@ -0,0 +1,68 @@
+#include <stdio.h>
+
+
+/* Call FLOGR on INPUT. The results are returned through the parms. */
+void
+flogr(unsigned long input, unsigned long *bitpos, unsigned long *modval,
+ unsigned int *cc)
+{
+ unsigned int psw;
+ register unsigned long value asm("4") = input;
+
+ asm volatile ( "flogr 2, %[val]\n\t"
+ "ipm %[psw]\n\t"
+ "stg 2, %[bitpos]\n\t"
+ "stg 3, %[modval]\n\t"
+ : [bitpos]"=m"(*bitpos), [modval]"=m"(*modval),
+ [psw]"=d"(psw)
+ : [val] "d"(value)
+ : "2", "3", "cc");
+
+ *cc = psw >> 28;
+#if 0
+ printf("value = %lx, bitpos = %lu, modval = %lx, cc = %d\n",
+ value, *bitpos, *modval, *cc);
+#endif
+}
+
+void
+runtest(void)
+{
+ unsigned long bitpos, modval, value;
+ unsigned int cc;
+ int i;
+
+ /* Value 0 is special */
+ value = 0;
+ flogr(value, &bitpos, &modval, &cc);
+ if (modval != 0) fprintf(stderr, "modval is wrong for %lx\n", value);
+ if (bitpos != 64) fprintf(stderr, "bitpos is wrong for %lx\n", value);
+ if (cc != 0) fprintf(stderr, "cc is wrong for %lx\n", value);
+
+ /* Test with exactly 1 bit set */
+ for (i = 0; i < 64; ++i) {
+ value = 1ull << i;
+ flogr(value, &bitpos, &modval, &cc);
+ if (modval != 0) fprintf(stderr, "modval is wrong for %lx\n", value);
+ if (bitpos != 63 - i) fprintf(stderr, "bitpos is wrong for %lx\n", value);
+ if (cc != 2) fprintf(stderr, "cc is wrong for %lx\n", value);
+ }
+
+ /* Test with all bits 1 right from first 1 bit */
+ for (i = 1; i < 64; ++i) {
+ value = 1ull << i;
+ value = value | (value - 1);
+ flogr(value, &bitpos, &modval, &cc);
+ if (modval != (value >> 1)) fprintf(stderr, "modval is wrong for %lx\n", value);
+ if (bitpos != 63 - i) fprintf(stderr, "bitpos is wrong for %lx\n", value);
+ if (cc != 2) fprintf(stderr, "cc is wrong for %lx\n", value);
+ }
+}
+
+
+int main()
+{
+ runtest();
+
+ return 0;
+}
--- valgrind/none/tests/s390x/flogr.stderr.exp
+++ valgrind/none/tests/s390x/flogr.stderr.exp
@@ -0,0 +1,2 @@
+
+
--- valgrind/none/tests/s390x/flogr.vgtest
+++ valgrind/none/tests/s390x/flogr.vgtest
@@ -0,0 +1 @@
+prog: flogr
--- valgrind/none/tests/s390x/lpr.c
+++ valgrind/none/tests/s390x/lpr.c
@@ -0,0 +1,95 @@
+#include <limits.h>
+#include <stdio.h>
+
+int lpr(int org, int *new)
+{
+ int _new, cc;
+ asm volatile( "lpr %0,%2\n\t"
+ "ipm %1\n\t"
+ "srl %1,28\n\t"
+ : "=d" (_new), "=d" (cc)
+ : "d" (org)
+ : "cc");
+ *new = _new;
+ return cc;
+}
+
+int lpgr(unsigned long org, unsigned long *new)
+{
+ unsigned long _new;
+ int cc;
+ asm volatile( "lpgr %0,%2\n\t"
+ "ipm %1\n\t"
+ "srl %1,28\n\t"
+ : "=d" (_new), "=d" (cc)
+ : "d" (org)
+ : "cc");
+ *new = _new;
+ return cc;
+}
+
+int lpgfr(unsigned long org, unsigned long *new)
+{
+ unsigned long _new;
+ int cc;
+ asm volatile( "lpgfr %0,%2\n\t"
+ "ipm %1\n\t"
+ "srl %1,28\n\t"
+ : "=d" (_new), "=d" (cc)
+ : "d" (org)
+ : "cc");
+ *new = _new;
+ return cc;
+}
+
+
+void t32(int value)
+{
+ int n,cc;
+
+ cc = lpr(value, &n);
+
+ printf("new: %d cc: %d\n", n, cc);
+}
+
+void t64(unsigned long value)
+{
+ int cc;
+ unsigned long n;
+
+ cc = lpgr(value, &n);
+
+ printf("new: %ld cc: %d\n", n, cc);
+}
+
+void t3264(unsigned long value)
+{
+ int cc;
+ unsigned long n;
+
+ cc = lpgfr(value, &n);
+
+ printf("new: %ld cc: %d\n", n, cc);
+}
+
+
+
+int main()
+{
+ printf("lpr\n");
+ t32(0); t32(1); t32(-1);
+ t32(INT_MAX); t32(INT_MIN); t32(UINT_MAX);
+
+ printf("lpgr\n");
+ t64(0); t64(1); t64(-1);
+ t64(INT_MAX); t64(INT_MIN); t64(UINT_MAX);
+ t64(LONG_MAX); t64(LONG_MIN); t64(ULONG_MAX);
+
+ printf("lpgfr\n");
+ t3264(0); t3264(1); t64(-1);
+ t3264(INT_MAX); t3264(INT_MIN); t3264(UINT_MAX);
+ t3264(LONG_MAX); t3264(LONG_MIN); t3264(ULONG_MAX);
+
+ return 0;
+}
+
--- valgrind/none/tests/s390x/lpr.stderr.exp
+++ valgrind/none/tests/s390x/lpr.stderr.exp
@@ -0,0 +1,2 @@
+
+
--- valgrind/none/tests/s390x/lpr.stdout.exp
+++ valgrind/none/tests/s390x/lpr.stdout.exp
@@ -0,0 +1,27 @@
+lpr
+new: 0 cc: 0
+new: 1 cc: 2
+new: 1 cc: 2
+new: 2147483647 cc: 2
+new: -2147483648 cc: 3
+new: 1 cc: 2
+lpgr
+new: 0 cc: 0
+new: 1 cc: 2
+new: 1 cc: 2
+new: 2147483647 cc: 2
+new: 2147483648 cc: 2
+new: 4294967295 cc: 2
+new: 9223372036854775807 cc: 2
+new: -9223372036854775808 cc: 3
+new: 1 cc: 2
+lpgfr
+new: 0 cc: 0
+new: 1 cc: 2
+new: 1 cc: 2
+new: 2147483647 cc: 2
+new: 2147483648 cc: 2
+new: 1 cc: 2
+new: 1 cc: 2
+new: 0 cc: 0
+new: 1 cc: 2
--- valgrind/none/tests/s390x/lpr.vgtest
+++ valgrind/none/tests/s390x/lpr.vgtest
@@ -0,0 +1 @@
+prog: lpr
--- valgrind/none/tests/s390x/Makefile.in 2010-11-05 20:28:21.856830652 +0100
+++ valgrind/none/tests/s390x/Makefile.in 2010-11-05 20:28:51.358704320 +0100
@@ -0,0 +1,746 @@
+# Makefile.in generated by automake 1.11.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# This file is used for tool tests, and also in perf/Makefile.am.
+
+# This file should be included (directly or indirectly) by every
+# Makefile.am that builds programs. And also the top-level Makefile.am.
+
+#----------------------------------------------------------------------------
+# Global stuff
+#----------------------------------------------------------------------------
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+DIST_COMMON = $(dist_noinst_SCRIPTS) $(srcdir)/Makefile.am \
+ $(srcdir)/Makefile.in $(top_srcdir)/Makefile.all.am \
+ $(top_srcdir)/Makefile.tool-tests.am
+check_PROGRAMS = $(am__EXEEXT_1) ex_sig$(EXEEXT) ex_clone$(EXEEXT)
+subdir = none/tests/s390x
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/configure.in
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__EXEEXT_1 = clcle$(EXEEXT) cvb$(EXEEXT) cvd$(EXEEXT) lpr$(EXEEXT) \
+ flogr$(EXEEXT)
+clcle_SOURCES = clcle.c
+clcle_OBJECTS = clcle.$(OBJEXT)
+clcle_LDADD = $(LDADD)
+cvb_SOURCES = cvb.c
+cvb_OBJECTS = cvb.$(OBJEXT)
+cvb_LDADD = $(LDADD)
+cvd_SOURCES = cvd.c
+cvd_OBJECTS = cvd.$(OBJEXT)
+cvd_LDADD = $(LDADD)
+ex_clone_SOURCES = ex_clone.c
+ex_clone_OBJECTS = ex_clone.$(OBJEXT)
+ex_clone_LDADD = $(LDADD)
+ex_clone_LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(ex_clone_LDFLAGS) \
+ $(LDFLAGS) -o $@
+ex_sig_SOURCES = ex_sig.c
+ex_sig_OBJECTS = ex_sig.$(OBJEXT)
+ex_sig_LDADD = $(LDADD)
+flogr_SOURCES = flogr.c
+flogr_OBJECTS = flogr.$(OBJEXT)
+flogr_LDADD = $(LDADD)
+lpr_SOURCES = lpr.c
+lpr_OBJECTS = lpr.$(OBJEXT)
+lpr_LDADD = $(LDADD)
+SCRIPTS = $(dist_noinst_SCRIPTS)
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
+SOURCES = clcle.c cvb.c cvd.c ex_clone.c ex_sig.c flogr.c lpr.c
+DIST_SOURCES = clcle.c cvb.c cvd.c ex_clone.c ex_sig.c flogr.c lpr.c
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BOOST_CFLAGS = @BOOST_CFLAGS@
+BOOST_LIBS = @BOOST_LIBS@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFAULT_SUPP = @DEFAULT_SUPP@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DIFF = @DIFF@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FLAG_FNO_STACK_PROTECTOR = @FLAG_FNO_STACK_PROTECTOR@
+FLAG_M32 = @FLAG_M32@
+FLAG_M64 = @FLAG_M64@
+FLAG_MAIX32 = @FLAG_MAIX32@
+FLAG_MAIX64 = @FLAG_MAIX64@
+FLAG_MMMX = @FLAG_MMMX@
+FLAG_MSSE = @FLAG_MSSE@
+FLAG_NO_BUILD_ID = @FLAG_NO_BUILD_ID@
+FLAG_UNLIMITED_INLINE_UNIT_GROWTH = @FLAG_UNLIMITED_INLINE_UNIT_GROWTH@
+FLAG_W_EXTRA = @FLAG_W_EXTRA@
+FLAG_W_NO_EMPTY_BODY = @FLAG_W_NO_EMPTY_BODY@
+FLAG_W_NO_FORMAT_ZERO_LENGTH = @FLAG_W_NO_FORMAT_ZERO_LENGTH@
+FLAG_W_NO_UNINITIALIZED = @FLAG_W_NO_UNINITIALIZED@
+GDB = @GDB@
+GLIBC_VERSION = @GLIBC_VERSION@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MPI_CC = @MPI_CC@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PERL = @PERL@
+PKG_CONFIG = @PKG_CONFIG@
+PREFERRED_STACK_BOUNDARY = @PREFERRED_STACK_BOUNDARY@
+QTCORE_CFLAGS = @QTCORE_CFLAGS@
+QTCORE_LIBS = @QTCORE_LIBS@
+RANLIB = @RANLIB@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+VALT_LOAD_ADDRESS_PRI = @VALT_LOAD_ADDRESS_PRI@
+VALT_LOAD_ADDRESS_SEC = @VALT_LOAD_ADDRESS_SEC@
+VERSION = @VERSION@
+VGCONF_ARCH_PRI = @VGCONF_ARCH_PRI@
+VGCONF_ARCH_SEC = @VGCONF_ARCH_SEC@
+VGCONF_OS = @VGCONF_OS@
+VGCONF_PLATFORM_PRI_CAPS = @VGCONF_PLATFORM_PRI_CAPS@
+VGCONF_PLATFORM_SEC_CAPS = @VGCONF_PLATFORM_SEC_CAPS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+inplacedir = $(top_builddir)/.in_place
+
+#----------------------------------------------------------------------------
+# Flags
+#----------------------------------------------------------------------------
+
+# Baseline flags for all compilations. Aim here is to maximise
+# performance and get whatever useful warnings we can out of gcc.
+AM_CFLAGS_BASE = \
+ -O2 -g \
+ -Wall \
+ -Wmissing-prototypes \
+ -Wshadow \
+ -Wpointer-arith \
+ -Wstrict-prototypes \
+ -Wmissing-declarations \
+ @FLAG_W_NO_FORMAT_ZERO_LENGTH@ \
+ -fno-strict-aliasing
+
+@VGCONF_OS_IS_DARWIN_FALSE@AM_CFLAGS_PIC = -fpic -O -g -fno-omit-frame-pointer -fno-strict-aliasing
+
+# These flags are used for building the preload shared objects.
+# The aim is to give reasonable performance but also to have good
+# stack traces, since users often see stack traces extending
+# into (and through) the preloads.
+@VGCONF_OS_IS_DARWIN_TRUE@AM_CFLAGS_PIC = -dynamic -O -g -fno-omit-frame-pointer -fno-strict-aliasing \
+@VGCONF_OS_IS_DARWIN_TRUE@ -mno-dynamic-no-pic -fpic -fPIC
+
+
+# Flags for specific targets.
+#
+# Nb: the AM_CPPFLAGS_* values are suitable for building tools and auxprogs.
+# For building the core, coregrind/Makefile.am files add some extra things.
+#
+# Also: in newer versions of automake (1.10 onwards?) asm files ending with
+# '.S' are considered "pre-processed" (as opposed to those ending in '.s')
+# and so the CPPFLAGS are passed to the assembler. But this is not true for
+# older automakes (e.g. 1.8.5, 1.9.6), sigh. So we include
+# AM_CPPFLAGS_<PLATFORM> in each AM_CCASFLAGS_<PLATFORM> variable. This
+# means some of the flags are duplicated on systems with newer versions of
+# automake, but this does not really matter and seems hard to avoid.
+AM_CPPFLAGS_@VGCONF_PLATFORM_PRI_CAPS@ = \
+ -I$(top_srcdir) \
+ -I$(top_srcdir)/include \
+ -I$(top_srcdir)/VEX/pub \
+ -DVGA_@VGCONF_ARCH_PRI@=1 \
+ -DVGO_@VGCONF_OS@=1 \
+ -DVGP_@VGCONF_ARCH_PRI@_@VGCONF_OS@=1
+
+@VGCONF_HAVE_PLATFORM_SEC_TRUE@AM_CPPFLAGS_@VGCONF_PLATFORM_SEC_CAPS@ = \
+@VGCONF_HAVE_PLATFORM_SEC_TRUE@ -I$(top_srcdir) \
+@VGCONF_HAVE_PLATFORM_SEC_TRUE@ -I$(top_srcdir)/include \
+@VGCONF_HAVE_PLATFORM_SEC_TRUE@ -I$(top_srcdir)/VEX/pub \
+@VGCONF_HAVE_PLATFORM_SEC_TRUE@ -DVGA_@VGCONF_ARCH_SEC@=1 \
+@VGCONF_HAVE_PLATFORM_SEC_TRUE@ -DVGO_@VGCONF_OS@=1 \
+@VGCONF_HAVE_PLATFORM_SEC_TRUE@ -DVGP_@VGCONF_ARCH_SEC@_@VGCONF_OS@=1
+
+AM_FLAG_M3264_X86_LINUX = @FLAG_M32@
+AM_CFLAGS_X86_LINUX = @FLAG_M32@ @PREFERRED_STACK_BOUNDARY@ \
+ $(AM_CFLAGS_BASE)
+
+AM_CCASFLAGS_X86_LINUX = $(AM_CPPFLAGS_X86_LINUX) @FLAG_M32@ -g
+AM_FLAG_M3264_AMD64_LINUX = @FLAG_M64@
+AM_CFLAGS_AMD64_LINUX = @FLAG_M64@ -fomit-frame-pointer \
+ @PREFERRED_STACK_BOUNDARY@ $(AM_CFLAGS_BASE)
+
+AM_CCASFLAGS_AMD64_LINUX = $(AM_CPPFLAGS_AMD64_LINUX) @FLAG_M64@ -g
+AM_FLAG_M3264_PPC32_LINUX = @FLAG_M32@
+AM_CFLAGS_PPC32_LINUX = @FLAG_M32@ $(AM_CFLAGS_BASE)
+AM_CCASFLAGS_PPC32_LINUX = $(AM_CPPFLAGS_PPC32_LINUX) @FLAG_M32@ -g
+AM_FLAG_M3264_PPC64_LINUX = @FLAG_M64@
+AM_CFLAGS_PPC64_LINUX = @FLAG_M64@ $(AM_CFLAGS_BASE)
+AM_CCASFLAGS_PPC64_LINUX = $(AM_CPPFLAGS_PPC64_LINUX) @FLAG_M64@ -g
+AM_FLAG_M3264_ARM_LINUX = @FLAG_M32@
+AM_CFLAGS_ARM_LINUX = @FLAG_M32@ @PREFERRED_STACK_BOUNDARY@ \
+ $(AM_CFLAGS_BASE) -marm
+
+AM_CCASFLAGS_ARM_LINUX = $(AM_CPPFLAGS_ARM_LINUX) @FLAG_M32@ -marm -g
+AM_FLAG_M3264_PPC32_AIX5 = @FLAG_MAIX32@
+AM_CFLAGS_PPC32_AIX5 = @FLAG_MAIX32@ -mcpu=powerpc $(AM_CFLAGS_BASE)
+AM_CCASFLAGS_PPC32_AIX5 = $(AM_CPPFLAGS_PPC32_AIX5) \
+ @FLAG_MAIX32@ -mcpu=powerpc -g
+
+AM_FLAG_M3264_PPC64_AIX5 = @FLAG_MAIX64@
+AM_CFLAGS_PPC64_AIX5 = @FLAG_MAIX64@ -mcpu=powerpc64 $(AM_CFLAGS_BASE)
+AM_CCASFLAGS_PPC64_AIX5 = $(AM_CPPFLAGS_PPC64_AIX5) \
+ @FLAG_MAIX64@ -mcpu=powerpc64 -g
+
+AM_FLAG_M3264_X86_DARWIN = -arch i386
+AM_CFLAGS_X86_DARWIN = $(WERROR) -arch i386 $(AM_CFLAGS_BASE) \
+ -mmacosx-version-min=10.5 \
+ -fno-stack-protector -fno-pic -fno-PIC
+
+AM_CCASFLAGS_X86_DARWIN = $(AM_CPPFLAGS_X86_DARWIN) -arch i386 -g
+AM_FLAG_M3264_AMD64_DARWIN = -arch x86_64
+AM_CFLAGS_AMD64_DARWIN = $(WERROR) -arch x86_64 $(AM_CFLAGS_BASE) \
+ -mmacosx-version-min=10.5 -fno-stack-protector
+
+AM_CCASFLAGS_AMD64_DARWIN = $(AM_CPPFLAGS_AMD64_DARWIN) -arch x86_64 -g
+AM_FLAG_M3264_S390X_LINUX = @FLAG_M64@
+AM_CFLAGS_S390X_LINUX = @FLAG_M64@ $(AM_CFLAGS_BASE)
+AM_CCASFLAGS_S390X_LINUX = $(AM_CPPFLAGS_S390X_LINUX) -mzarch -march=z900 \
+ @FLAG_M64@ -g
+
+
+# Flags for the primary target. These must be used to build the
+# regtests and performance tests. In fact, these must be used to
+# build anything which is built only once on a dual-arch build.
+#
+AM_FLAG_M3264_PRI = $(AM_FLAG_M3264_@VGCONF_PLATFORM_PRI_CAPS@)
+AM_CPPFLAGS_PRI = $(AM_CPPFLAGS_@VGCONF_PLATFORM_PRI_CAPS@)
+AM_CFLAGS_PRI = $(AM_CFLAGS_@VGCONF_PLATFORM_PRI_CAPS@)
+AM_CCASFLAGS_PRI = $(AM_CCASFLAGS_@VGCONF_PLATFORM_PRI_CAPS@)
+@VGCONF_HAVE_PLATFORM_SEC_FALSE@AM_FLAG_M3264_SEC =
+@VGCONF_HAVE_PLATFORM_SEC_TRUE@AM_FLAG_M3264_SEC = $(AM_FLAG_M3264_@VGCONF_PLATFORM_SEC_CAPS@)
+
+# Baseline link flags for making vgpreload shared objects.
+#
+PRELOAD_LDFLAGS_COMMON_LINUX = -nodefaultlibs -shared -Wl,-z,interpose,-z,initfirst
+PRELOAD_LDFLAGS_COMMON_AIX5 = -nodefaultlibs -shared -Wl,-G -Wl,-bnogc
+PRELOAD_LDFLAGS_COMMON_DARWIN = -dynamic -dynamiclib -all_load
+PRELOAD_LDFLAGS_X86_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M32@
+PRELOAD_LDFLAGS_AMD64_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M64@
+PRELOAD_LDFLAGS_PPC32_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M32@
+PRELOAD_LDFLAGS_PPC64_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M64@
+PRELOAD_LDFLAGS_ARM_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M32@
+PRELOAD_LDFLAGS_PPC32_AIX5 = $(PRELOAD_LDFLAGS_COMMON_AIX5) @FLAG_MAIX32@
+PRELOAD_LDFLAGS_PPC64_AIX5 = $(PRELOAD_LDFLAGS_COMMON_AIX5) @FLAG_MAIX64@
+PRELOAD_LDFLAGS_X86_DARWIN = $(PRELOAD_LDFLAGS_COMMON_DARWIN) -arch i386
+PRELOAD_LDFLAGS_AMD64_DARWIN = $(PRELOAD_LDFLAGS_COMMON_DARWIN) -arch x86_64
+PRELOAD_LDFLAGS_S390X_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M64@
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/include \
+ -I$(top_srcdir)/coregrind -I$(top_builddir)/include \
+ -I$(top_srcdir)/VEX/pub \
+ -DVGA_$(VGCONF_ARCH_PRI)=1 \
+ -DVGO_$(VGCONF_OS)=1 \
+ -DVGP_$(VGCONF_ARCH_PRI)_$(VGCONF_OS)=1
+
+# Nb: Tools need to augment these flags with an arch-selection option, such
+# as $(AM_FLAG_M3264_PRI).
+AM_CFLAGS = -Winline -Wall -Wshadow -g @FLAG_M64@
+AM_CXXFLAGS = -Winline -Wall -Wshadow -g @FLAG_M64@
+# Include AM_CPPFLAGS in AM_CCASFLAGS to allow for older versions of
+# automake; see comments in Makefile.all.am for more detail.
+AM_CCASFLAGS = $(AM_CPPFLAGS) @FLAG_M64@
+@VGCONF_OS_IS_DARWIN_TRUE@noinst_DSYMS = $(check_PROGRAMS)
+dist_noinst_SCRIPTS = filter_stderr
+INSN_TESTS = clcle cvb cvd lpr flogr
+EXTRA_DIST = \
+ $(addsuffix .stderr.exp,$(check_PROGRAMS)) \
+ $(addsuffix .stdout.exp,$(check_PROGRAMS)) \
+ $(addsuffix .vgtest,$(check_PROGRAMS))
+
+ex_clone_LDFLAGS = -lpthread
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/Makefile.tool-tests.am $(top_srcdir)/Makefile.all.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign none/tests/s390x/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign none/tests/s390x/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-checkPROGRAMS:
+ -test -z "$(check_PROGRAMS)" || rm -f $(check_PROGRAMS)
+clcle$(EXEEXT): $(clcle_OBJECTS) $(clcle_DEPENDENCIES)
+ @rm -f clcle$(EXEEXT)
+ $(LINK) $(clcle_OBJECTS) $(clcle_LDADD) $(LIBS)
+cvb$(EXEEXT): $(cvb_OBJECTS) $(cvb_DEPENDENCIES)
+ @rm -f cvb$(EXEEXT)
+ $(LINK) $(cvb_OBJECTS) $(cvb_LDADD) $(LIBS)
+cvd$(EXEEXT): $(cvd_OBJECTS) $(cvd_DEPENDENCIES)
+ @rm -f cvd$(EXEEXT)
+ $(LINK) $(cvd_OBJECTS) $(cvd_LDADD) $(LIBS)
+ex_clone$(EXEEXT): $(ex_clone_OBJECTS) $(ex_clone_DEPENDENCIES)
+ @rm -f ex_clone$(EXEEXT)
+ $(ex_clone_LINK) $(ex_clone_OBJECTS) $(ex_clone_LDADD) $(LIBS)
+ex_sig$(EXEEXT): $(ex_sig_OBJECTS) $(ex_sig_DEPENDENCIES)
+ @rm -f ex_sig$(EXEEXT)
+ $(LINK) $(ex_sig_OBJECTS) $(ex_sig_LDADD) $(LIBS)
+flogr$(EXEEXT): $(flogr_OBJECTS) $(flogr_DEPENDENCIES)
+ @rm -f flogr$(EXEEXT)
+ $(LINK) $(flogr_OBJECTS) $(flogr_LDADD) $(LIBS)
+lpr$(EXEEXT): $(lpr_OBJECTS) $(lpr_DEPENDENCIES)
+ @rm -f lpr$(EXEEXT)
+ $(LINK) $(lpr_OBJECTS) $(lpr_LDADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/clcle.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cvb.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cvd.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ex_clone.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ex_sig.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/flogr.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lpr.Po@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ set x; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: CTAGS
+CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+ $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS)
+ $(MAKE) $(AM_MAKEFLAGS) check-local
+check: check-am
+all-am: Makefile $(SCRIPTS)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-checkPROGRAMS clean-generic clean-local mostlyclean-am
+
+distclean: distclean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: check-am install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am check-local clean \
+ clean-checkPROGRAMS clean-generic clean-local ctags distclean \
+ distclean-compile distclean-generic distclean-tags distdir dvi \
+ dvi-am html html-am info info-am install install-am \
+ install-data install-data-am install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-compile \
+ mostlyclean-generic pdf pdf-am ps ps-am tags uninstall \
+ uninstall-am
+
+
+# This used to be required when Vex had a handwritten Makefile. It
+# shouldn't be needed any more, though.
+
+#----------------------------------------------------------------------------
+# noinst_PROGRAMS and noinst_DSYMS targets
+#----------------------------------------------------------------------------
+
+# On Darwin, for a program 'p', the DWARF debug info is stored in the
+# directory 'p.dSYM'. This must be generated after the executable is
+# created, with 'dsymutil p'. We could redefine LINK with a script that
+# executes 'dsymutil' after linking, but that's a pain. Instead we use this
+# hook so that every time "make check" is run, we subsequently invoke
+# 'dsymutil' on all the executables that lack a .dSYM directory, or that are
+# newer than their corresponding .dSYM directory.
+build-noinst_DSYMS: $(noinst_DSYMS)
+ for f in $(noinst_DSYMS); do \
+ if [ ! -e $$f.dSYM -o $$f -nt $$f.dSYM ] ; then \
+ echo "dsymutil $$f"; \
+ dsymutil $$f; \
+ fi; \
+ done
+
+# This is used by coregrind/Makefile.am and Makefile.tool.am for doing
+# "in-place" installs. It copies $(noinst_PROGRAMS) into $inplacedir.
+# It needs to be depended on by an 'all-local' rule.
+inplace-noinst_PROGRAMS: $(noinst_PROGRAMS)
+ mkdir -p $(inplacedir); \
+ for f in $(noinst_PROGRAMS) ; do \
+ rm -f $(inplacedir)/$$f; \
+ ln -f -s ../$(subdir)/$$f $(inplacedir); \
+ done
+
+# Similar to inplace-noinst_PROGRAMS
+inplace-noinst_DSYMS: build-noinst_DSYMS
+ mkdir -p $(inplacedir); \
+ for f in $(noinst_DSYMS); do \
+ rm -f $(inplacedir)/$$f.dSYM; \
+ ln -f -s ../$(subdir)/$$f.dSYM $(inplacedir); \
+ done
+
+# This is used by coregrind/Makefile.am and by <tool>/Makefile.am for doing
+# "make install". It copies $(noinst_PROGRAMS) into $prefix/lib/valgrind/.
+# It needs to be depended on by an 'install-exec-local' rule.
+install-noinst_PROGRAMS: $(noinst_PROGRAMS)
+ $(mkinstalldirs) $(DESTDIR)$(pkglibdir); \
+ for f in $(noinst_PROGRAMS); do \
+ $(INSTALL_PROGRAM) $$f $(DESTDIR)$(pkglibdir); \
+ done
+
+# Similar to install-noinst_PROGRAMS.
+# Nb: we don't use $(INSTALL_PROGRAM) here because it doesn't work with
+# directories. XXX: not sure whether the resulting permissions will be
+# correct when using 'cp -R'...
+install-noinst_DSYMS: build-noinst_DSYMS
+ $(mkinstalldirs) $(DESTDIR)$(pkglibdir); \
+ for f in $(noinst_DSYMS); do \
+ cp -R $$f.dSYM $(DESTDIR)$(pkglibdir); \
+ done
+
+# This needs to be depended on by a 'clean-local' rule.
+clean-noinst_DSYMS:
+ for f in $(noinst_DSYMS); do \
+ rm -rf $$f.dSYM; \
+ done
+
+check-local: build-noinst_DSYMS
+
+clean-local: clean-noinst_DSYMS
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT: