Blame SOURCES/gcc48-rh1469697-14.patch

13f101
commit 21397732bbcef3347c0d5ff8a0ee5163e803e2fb
13f101
Author: Jeff Law <law@redhat.com>
13f101
Date:   Mon Oct 2 12:30:26 2017 -0600
13f101
13f101
    Dependencies for aarch64 work
13f101
13f101
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
13f101
index 07ff7031b35..91dd5b7fc02 100644
13f101
--- a/gcc/config/aarch64/aarch64-protos.h
13f101
+++ b/gcc/config/aarch64/aarch64-protos.h
13f101
@@ -181,6 +181,7 @@ unsigned aarch64_dbx_register_number (unsigned);
13f101
 unsigned aarch64_trampoline_size (void);
13f101
 void aarch64_asm_output_labelref (FILE *, const char *);
13f101
 void aarch64_elf_asm_named_section (const char *, unsigned, tree);
13f101
+const char * aarch64_output_probe_stack_range (rtx, rtx);
13f101
 void aarch64_expand_epilogue (bool);
13f101
 void aarch64_expand_mov_immediate (rtx, rtx);
13f101
 void aarch64_expand_prologue (void);
13f101
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
13f101
index 5afc167d569..cadf193cfcf 100644
13f101
--- a/gcc/config/aarch64/aarch64.c
13f101
+++ b/gcc/config/aarch64/aarch64.c
13f101
@@ -969,6 +969,199 @@ aarch64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
13f101
   return true;
13f101
 }
13f101
 
13f101
+static int
13f101
+aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
13f101
+				enum machine_mode mode)
13f101
+{
13f101
+  int i;
13f101
+  unsigned HOST_WIDE_INT val, val2, mask;
13f101
+  int one_match, zero_match;
13f101
+  int num_insns;
13f101
+
13f101
+  val = INTVAL (imm);
13f101
+
13f101
+  if (aarch64_move_imm (val, mode))
13f101
+    {
13f101
+      if (generate)
13f101
+	emit_insn (gen_rtx_SET (VOIDmode, dest, imm));
13f101
+      return 1;
13f101
+    }
13f101
+
13f101
+  /* Check to see if the low 32 bits are either 0xffffXXXX or 0xXXXXffff
13f101
+     (with XXXX non-zero). In that case check to see if the move can be done in
13f101
+     a smaller mode.  */
13f101
+  val2 = val & 0xffffffff;
13f101
+  if (mode == DImode
13f101
+      && aarch64_move_imm (val2, SImode)
13f101
+      && (((val >> 32) & 0xffff) == 0 || (val >> 48) == 0))
13f101
+    {
13f101
+      if (generate)
13f101
+	emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (val2)));
13f101
+
13f101
+      /* Check if we have to emit a second instruction by checking to see
13f101
+         if any of the upper 32 bits of the original DI mode value is set.  */
13f101
+      if (val == val2)
13f101
+	return 1;
13f101
+
13f101
+      i = (val >> 48) ? 48 : 32;
13f101
+
13f101
+      if (generate)
13f101
+	 emit_insn (gen_insv_immdi (dest, GEN_INT (i),
13f101
+				    GEN_INT ((val >> i) & 0xffff)));
13f101
+
13f101
+      return 2;
13f101
+    }
13f101
+
13f101
+  if ((val >> 32) == 0 || mode == SImode)
13f101
+    {
13f101
+      if (generate)
13f101
+	{
13f101
+	  emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (val & 0xffff)));
13f101
+	  if (mode == SImode)
13f101
+	    emit_insn (gen_insv_immsi (dest, GEN_INT (16),
13f101
+				       GEN_INT ((val >> 16) & 0xffff)));
13f101
+	  else
13f101
+	    emit_insn (gen_insv_immdi (dest, GEN_INT (16),
13f101
+				       GEN_INT ((val >> 16) & 0xffff)));
13f101
+	}
13f101
+      return 2;
13f101
+    }
13f101
+
13f101
+  /* Remaining cases are all for DImode.  */
13f101
+
13f101
+  mask = 0xffff;
13f101
+  zero_match = ((val & mask) == 0) + ((val & (mask << 16)) == 0) +
13f101
+    ((val & (mask << 32)) == 0) + ((val & (mask << 48)) == 0);
13f101
+  one_match = ((~val & mask) == 0) + ((~val & (mask << 16)) == 0) +
13f101
+    ((~val & (mask << 32)) == 0) + ((~val & (mask << 48)) == 0);
13f101
+
13f101
+  if (zero_match != 2 && one_match != 2)
13f101
+    {
13f101
+      /* Try emitting a bitmask immediate with a movk replacing 16 bits.
13f101
+	 For a 64-bit bitmask try whether changing 16 bits to all ones or
13f101
+	 zeroes creates a valid bitmask.  To check any repeated bitmask,
13f101
+	 try using 16 bits from the other 32-bit half of val.  */
13f101
+
13f101
+      for (i = 0; i < 64; i += 16, mask <<= 16)
13f101
+	{
13f101
+	  val2 = val & ~mask;
13f101
+	  if (val2 != val && aarch64_bitmask_imm (val2, mode))
13f101
+	    break;
13f101
+	  val2 = val | mask;
13f101
+	  if (val2 != val && aarch64_bitmask_imm (val2, mode))
13f101
+	    break;
13f101
+	  val2 = val2 & ~mask;
13f101
+	  val2 = val2 | (((val2 >> 32) | (val2 << 32)) & mask);
13f101
+	  if (val2 != val && aarch64_bitmask_imm (val2, mode))
13f101
+	    break;
13f101
+	}
13f101
+      if (i != 64)
13f101
+	{
13f101
+	  if (generate)
13f101
+	    {
13f101
+	      emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (val2)));
13f101
+	      emit_insn (gen_insv_immdi (dest, GEN_INT (i),
13f101
+					 GEN_INT ((val >> i) & 0xffff)));
13f101
+	    }
13f101
+	  return 2;
13f101
+	}
13f101
+    }
13f101
+
13f101
+  /* Generate 2-4 instructions, skipping 16 bits of all zeroes or ones which
13f101
+     are emitted by the initial mov.  If one_match > zero_match, skip set bits,
13f101
+     otherwise skip zero bits.  */
13f101
+
13f101
+  num_insns = 1;
13f101
+  mask = 0xffff;
13f101
+  val2 = one_match > zero_match ? ~val : val;
13f101
+  i = (val2 & mask) != 0 ? 0 : (val2 & (mask << 16)) != 0 ? 16 : 32;
13f101
+
13f101
+  if (generate)
13f101
+    emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (one_match > zero_match
13f101
+					   ? (val | ~(mask << i))
13f101
+					   : (val & (mask << i)))));
13f101
+  for (i += 16; i < 64; i += 16)
13f101
+    {
13f101
+      if ((val2 & (mask << i)) == 0)
13f101
+	continue;
13f101
+      if (generate)
13f101
+	emit_insn (gen_insv_immdi (dest, GEN_INT (i),
13f101
+				   GEN_INT ((val >> i) & 0xffff)));
13f101
+      num_insns ++;
13f101
+    }
13f101
+
13f101
+  return num_insns;
13f101
+}
13f101
+
13f101
+/* Add DELTA to REGNUM in mode MODE.  SCRATCHREG can be used to hold a
13f101
+   temporary value if necessary.  FRAME_RELATED_P should be true if
13f101
+   the RTX_FRAME_RELATED flag should be set and CFA adjustments added
13f101
+   to the generated instructions.  If SCRATCHREG is known to hold
13f101
+   abs (delta), EMIT_MOVE_IMM can be set to false to avoid emitting the
13f101
+   immediate again.
13f101
+
13f101
+   Since this function may be used to adjust the stack pointer, we must
13f101
+   ensure that it cannot cause transient stack deallocation (for example
13f101
+   by first incrementing SP and then decrementing when adjusting by a
13f101
+   large immediate).  */
13f101
+
13f101
+static void
13f101
+aarch64_add_constant_internal (enum machine_mode mode, int regnum,
13f101
+			       int scratchreg, HOST_WIDE_INT delta,
13f101
+			       bool frame_related_p, bool emit_move_imm)
13f101
+{
13f101
+  HOST_WIDE_INT mdelta = abs_hwi (delta);
13f101
+  rtx this_rtx = gen_rtx_REG (mode, regnum);
13f101
+  rtx insn;
13f101
+
13f101
+  if (!mdelta)
13f101
+    return;
13f101
+
13f101
+  /* Single instruction adjustment.  */
13f101
+  if (aarch64_uimm12_shift (mdelta))
13f101
+    {
13f101
+      insn = emit_insn (gen_add2_insn (this_rtx, GEN_INT (delta)));
13f101
+      RTX_FRAME_RELATED_P (insn) = frame_related_p;
13f101
+      return;
13f101
+    }
13f101
+
13f101
+  /* Emit 2 additions/subtractions if the adjustment is less than 24 bits.
13f101
+     Only do this if mdelta is not a 16-bit move as adjusting using a move
13f101
+     is better.  */
13f101
+  if (mdelta < 0x1000000 && !aarch64_move_imm (mdelta, mode))
13f101
+    {
13f101
+      HOST_WIDE_INT low_off = mdelta & 0xfff;
13f101
+
13f101
+      low_off = delta < 0 ? -low_off : low_off;
13f101
+      insn = emit_insn (gen_add2_insn (this_rtx, GEN_INT (low_off)));
13f101
+      RTX_FRAME_RELATED_P (insn) = frame_related_p;
13f101
+      insn = emit_insn (gen_add2_insn (this_rtx, GEN_INT (delta - low_off)));
13f101
+      RTX_FRAME_RELATED_P (insn) = frame_related_p;
13f101
+      return;
13f101
+    }
13f101
+
13f101
+  /* Emit a move immediate if required and an addition/subtraction.  */
13f101
+  rtx scratch_rtx = gen_rtx_REG (mode, scratchreg);
13f101
+  if (emit_move_imm)
13f101
+    aarch64_internal_mov_immediate (scratch_rtx, GEN_INT (mdelta), true, mode);
13f101
+  insn = emit_insn (delta < 0 ? gen_sub2_insn (this_rtx, scratch_rtx)
13f101
+			      : gen_add2_insn (this_rtx, scratch_rtx));
13f101
+  if (frame_related_p)
13f101
+    {
13f101
+      RTX_FRAME_RELATED_P (insn) = frame_related_p;
13f101
+      rtx adj = plus_constant (mode, this_rtx, delta);
13f101
+      add_reg_note (insn , REG_CFA_ADJUST_CFA,
13f101
+		    gen_rtx_SET (VOIDmode, this_rtx, adj));
13f101
+    }
13f101
+}
13f101
+
13f101
+static inline void
13f101
+aarch64_sub_sp (int scratchreg, HOST_WIDE_INT delta, bool frame_related_p)
13f101
+{
13f101
+  aarch64_add_constant_internal (Pmode, SP_REGNUM, scratchreg, -delta,
13f101
+				 frame_related_p, true);
13f101
+}
13f101
+
13f101
 /* Implement TARGET_PASS_BY_REFERENCE.  */
13f101
 
13f101
 static bool
13f101
@@ -1476,6 +1669,47 @@ aarch64_libgcc_cmp_return_mode (void)
13f101
   return SImode;
13f101
 }
13f101
 
13f101
+#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
13f101
+
13f101
+/* We use the 12-bit shifted immediate arithmetic instructions so values
13f101
+   must be multiple of (1 << 12), i.e. 4096.  */
13f101
+#define ARITH_FACTOR 4096
13f101
+
13f101
+/* Probe a range of stack addresses from REG1 to REG2 inclusive.  These are
13f101
+   absolute addresses.  */
13f101
+
13f101
+const char *
13f101
+aarch64_output_probe_stack_range (rtx reg1, rtx reg2)
13f101
+{
13f101
+  static int labelno = 0;
13f101
+  char loop_lab[32];
13f101
+  rtx xops[2];
13f101
+
13f101
+  ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
13f101
+
13f101
+  /* Loop.  */
13f101
+  ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
13f101
+
13f101
+  /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL.  */
13f101
+  xops[0] = reg1;
13f101
+  xops[1] = GEN_INT (PROBE_INTERVAL);
13f101
+  output_asm_insn ("sub\t%0, %0, %1", xops);
13f101
+
13f101
+  /* Probe at TEST_ADDR.  */
13f101
+  output_asm_insn ("str\txzr, [%0]", xops);
13f101
+
13f101
+  /* Test if TEST_ADDR == LAST_ADDR.  */
13f101
+  xops[1] = reg2;
13f101
+  output_asm_insn ("cmp\t%0, %1", xops);
13f101
+
13f101
+  /* Branch.  */
13f101
+  fputs ("\tb.ne\t", asm_out_file);
13f101
+  assemble_name_raw (asm_out_file, loop_lab);
13f101
+  fputc ('\n', asm_out_file);
13f101
+
13f101
+  return "";
13f101
+}
13f101
+
13f101
 static bool
13f101
 aarch64_frame_pointer_required (void)
13f101
 {
13f101
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
13f101
index 91299901bbf..17082486ac8 100644
13f101
--- a/gcc/config/aarch64/aarch64.md
13f101
+++ b/gcc/config/aarch64/aarch64.md
13f101
@@ -88,6 +88,7 @@
13f101
     UNSPEC_ST4
13f101
     UNSPEC_TLS
13f101
     UNSPEC_TLSDESC
13f101
+    UNSPECV_PROBE_STACK_RANGE   ; Represent stack range probing.
13f101
     UNSPEC_VSTRUCTDUMMY
13f101
 ])
13f101
 
13f101
@@ -3399,6 +3400,18 @@
13f101
   [(set_attr "length" "0")]
13f101
 )
13f101
 
13f101
+(define_insn "probe_stack_range"
13f101
+  [(set (match_operand:DI 0 "register_operand" "=r")
13f101
+	(unspec_volatile:DI [(match_operand:DI 1 "register_operand" "0")
13f101
+			     (match_operand:DI 2 "register_operand" "r")]
13f101
+			      UNSPECV_PROBE_STACK_RANGE))]
13f101
+  ""
13f101
+{
13f101
+  return aarch64_output_probe_stack_range (operands[0], operands[2]);
13f101
+}
13f101
+  [(set_attr "length" "32")]
13f101
+)
13f101
+
13f101
 ;; Named pattern for expanding thread pointer reference.
13f101
 (define_expand "get_thread_pointerdi"
13f101
   [(match_operand:DI 0 "register_operand" "=r")]