Blame SOURCES/gcc8-rh1512529-aarch64.patch

ed1ed2
--- gcc/config/aarch64/aarch64.c
ed1ed2
+++ gcc/config/aarch64/aarch64.c
ed1ed2
@@ -3799,7 +3799,14 @@ aarch64_output_probe_stack_range (rtx reg1, rtx reg2)
ed1ed2
   output_asm_insn ("sub\t%0, %0, %1", xops);
ed1ed2
 
ed1ed2
   /* Probe at TEST_ADDR.  */
ed1ed2
-  output_asm_insn ("str\txzr, [%0]", xops);
ed1ed2
+  if (flag_stack_clash_protection)
ed1ed2
+    {
ed1ed2
+      gcc_assert (xops[0] == stack_pointer_rtx);
ed1ed2
+      xops[1] = GEN_INT (PROBE_INTERVAL - 8);
ed1ed2
+      output_asm_insn ("str\txzr, [%0, %1]", xops);
ed1ed2
+    }
ed1ed2
+  else
ed1ed2
+    output_asm_insn ("str\txzr, [%0]", xops);
ed1ed2
 
ed1ed2
   /* Test if TEST_ADDR == LAST_ADDR.  */
ed1ed2
   xops[1] = reg2;
ed1ed2
@@ -4589,6 +4596,133 @@ aarch64_set_handled_components (sbitmap components)
ed1ed2
       cfun->machine->reg_is_wrapped_separately[regno] = true;
ed1ed2
 }
ed1ed2
 
ed1ed2
+/* Allocate POLY_SIZE bytes of stack space using TEMP1 and TEMP2 as scratch
ed1ed2
+   registers.  */
ed1ed2
+
ed1ed2
+static void
ed1ed2
+aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
ed1ed2
+					poly_int64 poly_size)
ed1ed2
+{
ed1ed2
+  HOST_WIDE_INT size;
ed1ed2
+  if (!poly_size.is_constant (&size))
ed1ed2
+    {
ed1ed2
+      sorry ("stack probes for SVE frames");
ed1ed2
+      return;
ed1ed2
+    }
ed1ed2
+
ed1ed2
+  HOST_WIDE_INT probe_interval
ed1ed2
+    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
ed1ed2
+  HOST_WIDE_INT guard_size
ed1ed2
+    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
ed1ed2
+  HOST_WIDE_INT guard_used_by_caller = 1024;
ed1ed2
+
ed1ed2
+  /* SIZE should be large enough to require probing here.  ie, it
ed1ed2
+     must be larger than GUARD_SIZE - GUARD_USED_BY_CALLER.
ed1ed2
+
ed1ed2
+     We can allocate GUARD_SIZE - GUARD_USED_BY_CALLER as a single chunk
ed1ed2
+     without any probing.  */
ed1ed2
+  gcc_assert (size >= guard_size - guard_used_by_caller);
ed1ed2
+  aarch64_sub_sp (temp1, temp2, guard_size - guard_used_by_caller, true);
ed1ed2
+  HOST_WIDE_INT orig_size = size;
ed1ed2
+  size -= (guard_size - guard_used_by_caller);
ed1ed2
+
ed1ed2
+  HOST_WIDE_INT rounded_size = size & -probe_interval;
ed1ed2
+  HOST_WIDE_INT residual = size - rounded_size;
ed1ed2
+
ed1ed2
+  /* We can handle a small number of allocations/probes inline.  Otherwise
ed1ed2
+     punt to a loop.  */
ed1ed2
+  if (rounded_size && rounded_size <= 4 * probe_interval)
ed1ed2
+    {
ed1ed2
+      /* We don't use aarch64_sub_sp here because we don't want to
ed1ed2
+	 repeatedly load TEMP1.  */
ed1ed2
+      rtx step = GEN_INT (-probe_interval);
ed1ed2
+      if (probe_interval > ARITH_FACTOR)
ed1ed2
+	{
ed1ed2
+	  emit_move_insn (temp1, step);
ed1ed2
+	  step = temp1;
ed1ed2
+	}
ed1ed2
+
ed1ed2
+      for (HOST_WIDE_INT i = 0; i < rounded_size; i += probe_interval)
ed1ed2
+	{
ed1ed2
+	  rtx_insn *insn = emit_insn (gen_add2_insn (stack_pointer_rtx, step));
ed1ed2
+          add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
ed1ed2
+
ed1ed2
+	  if (probe_interval > ARITH_FACTOR)
ed1ed2
+	    {
ed1ed2
+	      RTX_FRAME_RELATED_P (insn) = 1;
ed1ed2
+	      rtx adj = plus_constant (Pmode, stack_pointer_rtx, -probe_interval);
ed1ed2
+	      add_reg_note (insn, REG_CFA_ADJUST_CFA,
ed1ed2
+			    gen_rtx_SET (stack_pointer_rtx, adj));
ed1ed2
+	    }
ed1ed2
+
ed1ed2
+	  emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
ed1ed2
+					   (probe_interval
ed1ed2
+					    - GET_MODE_SIZE (word_mode))));
ed1ed2
+	  emit_insn (gen_blockage ());
ed1ed2
+	}
ed1ed2
+      dump_stack_clash_frame_info (PROBE_INLINE, size != rounded_size);
ed1ed2
+    }
ed1ed2
+  else if (rounded_size)
ed1ed2
+    {
ed1ed2
+      /* Compute the ending address.  */
ed1ed2
+      unsigned int scratchreg = REGNO (temp1);
ed1ed2
+      emit_move_insn (temp1, GEN_INT (-rounded_size));
ed1ed2
+      rtx_insn *insn
ed1ed2
+	 = emit_insn (gen_add3_insn (temp1, stack_pointer_rtx, temp1));
ed1ed2
+
ed1ed2
+      /* For the initial allocation, we don't have a frame pointer
ed1ed2
+	 set up, so we always need CFI notes.  If we're doing the
ed1ed2
+	 final allocation, then we may have a frame pointer, in which
ed1ed2
+	 case it is the CFA, otherwise we need CFI notes.
ed1ed2
+
ed1ed2
+	 We can determine which allocation we are doing by looking at
ed1ed2
+	 the temporary register.  IP0 is the initial allocation, IP1
ed1ed2
+	 is the final allocation.  */
ed1ed2
+      if (scratchreg == IP0_REGNUM || !frame_pointer_needed)
ed1ed2
+	{
ed1ed2
+	  /* We want the CFA independent of the stack pointer for the
ed1ed2
+	     duration of the loop.  */
ed1ed2
+	  add_reg_note (insn, REG_CFA_DEF_CFA,
ed1ed2
+			plus_constant (Pmode, temp1,
ed1ed2
+				       (rounded_size + (orig_size - size))));
ed1ed2
+	  RTX_FRAME_RELATED_P (insn) = 1;
ed1ed2
+	}
ed1ed2
+
ed1ed2
+      /* This allocates and probes the stack.
ed1ed2
+
ed1ed2
+	 It also probes at a 4k interval regardless of the value of
ed1ed2
+	 PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL.  */
ed1ed2
+      insn = emit_insn (gen_probe_stack_range (stack_pointer_rtx,
ed1ed2
+					       stack_pointer_rtx, temp1));
ed1ed2
+
ed1ed2
+      /* Now reset the CFA register if needed.  */
ed1ed2
+      if (scratchreg == IP0_REGNUM || !frame_pointer_needed)
ed1ed2
+	{
ed1ed2
+	  add_reg_note (insn, REG_CFA_DEF_CFA,
ed1ed2
+			plus_constant (Pmode, stack_pointer_rtx,
ed1ed2
+				       (rounded_size + (orig_size - size))));
ed1ed2
+	  RTX_FRAME_RELATED_P (insn) = 1;
ed1ed2
+	}
ed1ed2
+
ed1ed2
+      emit_insn (gen_blockage ());
ed1ed2
+      dump_stack_clash_frame_info (PROBE_LOOP, size != rounded_size);
ed1ed2
+    }
ed1ed2
+  else
ed1ed2
+    dump_stack_clash_frame_info (PROBE_INLINE, size != rounded_size);
ed1ed2
+
ed1ed2
+  /* Handle any residuals.
ed1ed2
+     Note that any residual must be probed.  */
ed1ed2
+  if (residual)
ed1ed2
+    {
ed1ed2
+      aarch64_sub_sp (temp1, temp2, residual, true);
ed1ed2
+      add_reg_note (get_last_insn (), REG_STACK_CHECK, const0_rtx);
ed1ed2
+      emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
ed1ed2
+				       (residual - GET_MODE_SIZE (word_mode))));
ed1ed2
+      emit_insn (gen_blockage ());
ed1ed2
+    }
ed1ed2
+  return;
ed1ed2
+}
ed1ed2
+
ed1ed2
 /* Add a REG_CFA_EXPRESSION note to INSN to say that register REG
ed1ed2
    is saved at BASE + OFFSET.  */
ed1ed2
 
ed1ed2
@@ -4686,7 +4820,54 @@ aarch64_expand_prologue (void)
ed1ed2
   rtx ip0_rtx = gen_rtx_REG (Pmode, IP0_REGNUM);
ed1ed2
   rtx ip1_rtx = gen_rtx_REG (Pmode, IP1_REGNUM);
ed1ed2
 
ed1ed2
-  aarch64_sub_sp (ip0_rtx, ip1_rtx, initial_adjust, true);
ed1ed2
+  /* We do not fully protect aarch64 against stack clash style attacks
ed1ed2
+     as doing so would be prohibitively expensive with less utility over
ed1ed2
+     time as newer compilers are deployed.
ed1ed2
+
ed1ed2
+     We assume the guard is at least 64k.  Furthermore, we assume that
ed1ed2
+     the caller has not pushed the stack pointer more than 1k into
ed1ed2
+     the guard.  A caller that pushes the stack pointer than 1k into
ed1ed2
+     the guard is considered invalid.
ed1ed2
+
ed1ed2
+     Note that the caller's ability to push the stack pointer into the
ed1ed2
+     guard is a function of the number and size of outgoing arguments and/or
ed1ed2
+     dynamic stack allocations due to the mandatory save of the link register
ed1ed2
+     in the caller's frame.
ed1ed2
+
ed1ed2
+     With those assumptions the callee can allocate up to 63k of stack
ed1ed2
+     space without probing.
ed1ed2
+
ed1ed2
+     When probing is needed, we emit a probe at the start of the prologue
ed1ed2
+     and every PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes thereafter.
ed1ed2
+
ed1ed2
+     We have to track how much space has been allocated, but we do not
ed1ed2
+     track stores into the stack as implicit probes except for the
ed1ed2
+     fp/lr store.  */
ed1ed2
+  HOST_WIDE_INT guard_size
ed1ed2
+    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
ed1ed2
+  HOST_WIDE_INT guard_used_by_caller = 1024;
ed1ed2
+  if (flag_stack_clash_protection)
ed1ed2
+    {
ed1ed2
+      if (known_eq (frame_size, 0))
ed1ed2
+	dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
ed1ed2
+      else if (known_lt (initial_adjust, guard_size - guard_used_by_caller)
ed1ed2
+	       && known_lt (final_adjust, guard_size - guard_used_by_caller))
ed1ed2
+	dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
ed1ed2
+    }
ed1ed2
+
ed1ed2
+  /* In theory we should never have both an initial adjustment
ed1ed2
+     and a callee save adjustment.  Verify that is the case since the
ed1ed2
+     code below does not handle it for -fstack-clash-protection.  */
ed1ed2
+  gcc_assert (known_eq (initial_adjust, 0) || callee_adjust == 0);
ed1ed2
+
ed1ed2
+  /* Only probe if the initial adjustment is larger than the guard
ed1ed2
+     less the amount of the guard reserved for use by the caller's
ed1ed2
+     outgoing args.  */
ed1ed2
+  if (flag_stack_clash_protection
ed1ed2
+      && maybe_ge (initial_adjust, guard_size - guard_used_by_caller))
ed1ed2
+    aarch64_allocate_and_probe_stack_space (ip0_rtx, ip1_rtx, initial_adjust);
ed1ed2
+  else
ed1ed2
+    aarch64_sub_sp (ip0_rtx, ip1_rtx, initial_adjust, true);
ed1ed2
 
ed1ed2
   if (callee_adjust != 0)
ed1ed2
     aarch64_push_regs (reg1, reg2, callee_adjust);
ed1ed2
@@ -4742,7 +4923,31 @@ aarch64_expand_prologue (void)
ed1ed2
 			     callee_adjust != 0 || emit_frame_chain);
ed1ed2
   aarch64_save_callee_saves (DFmode, callee_offset, V0_REGNUM, V31_REGNUM,
ed1ed2
 			     callee_adjust != 0 || emit_frame_chain);
ed1ed2
-  aarch64_sub_sp (ip1_rtx, ip0_rtx, final_adjust, !frame_pointer_needed);
ed1ed2
+
ed1ed2
+  /* We may need to probe the final adjustment as well.  */
ed1ed2
+  if (flag_stack_clash_protection && maybe_ne (final_adjust, 0))
ed1ed2
+    {
ed1ed2
+      /* First probe if the final adjustment is larger than the guard size
ed1ed2
+	 less the amount of the guard reserved for use by the caller's
ed1ed2
+	 outgoing args.  */
ed1ed2
+      if (maybe_ge (final_adjust, guard_size - guard_used_by_caller))
ed1ed2
+	aarch64_allocate_and_probe_stack_space (ip1_rtx, ip0_rtx,
ed1ed2
+						final_adjust);
ed1ed2
+      else
ed1ed2
+	aarch64_sub_sp (ip1_rtx, ip0_rtx, final_adjust, !frame_pointer_needed);
ed1ed2
+
ed1ed2
+      /* We must also probe if the final adjustment is larger than the guard
ed1ed2
+	 that is assumed used by the caller.  This may be sub-optimal.  */
ed1ed2
+      if (maybe_ge (final_adjust, guard_used_by_caller))
ed1ed2
+	{
ed1ed2
+	  if (dump_file)
ed1ed2
+	    fprintf (dump_file,
ed1ed2
+		     "Stack clash aarch64 large outgoing arg, probing\n");
ed1ed2
+	  emit_stack_probe (stack_pointer_rtx);
ed1ed2
+	}
ed1ed2
+    }
ed1ed2
+  else
ed1ed2
+    aarch64_sub_sp (ip1_rtx, ip0_rtx, final_adjust, !frame_pointer_needed);
ed1ed2
 }
ed1ed2
 
ed1ed2
 /* Return TRUE if we can use a simple_return insn.
ed1ed2
@@ -10476,6 +10681,12 @@ aarch64_override_options_internal (struct gcc_options *opts)
ed1ed2
       && opts->x_optimize >= aarch64_tune_params.prefetch->default_opt_level)
ed1ed2
     opts->x_flag_prefetch_loop_arrays = 1;
ed1ed2
 
ed1ed2
+  /* We assume the guard page is 64k.  */
ed1ed2
+  maybe_set_param_value (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE,
ed1ed2
+			 16,
ed1ed2
+			 opts->x_param_values,
ed1ed2
+			 global_options_set.x_param_values);
ed1ed2
+
ed1ed2
   aarch64_override_options_after_change_1 (opts);
ed1ed2
 }
ed1ed2
 
ed1ed2
@@ -17161,6 +17372,28 @@ aarch64_sched_can_speculate_insn (rtx_insn *insn)
ed1ed2
     }
ed1ed2
 }
ed1ed2
 
ed1ed2
+/* It has been decided that to allow up to 1kb of outgoing argument
ed1ed2
+   space to be allocated w/o probing.  If more than 1kb of outgoing
ed1ed2
+   argment space is allocated, then it must be probed and the last
ed1ed2
+   probe must occur no more than 1kbyte away from the end of the
ed1ed2
+   allocated space.
ed1ed2
+
ed1ed2
+   This implies that the residual part of an alloca allocation may
ed1ed2
+   need probing in cases where the generic code might not otherwise
ed1ed2
+   think a probe is needed.
ed1ed2
+
ed1ed2
+   This target hook returns TRUE when allocating RESIDUAL bytes of
ed1ed2
+   alloca space requires an additional probe, otherwise FALSE is
ed1ed2
+   returned.  */
ed1ed2
+
ed1ed2
+static bool
ed1ed2
+aarch64_stack_clash_protection_final_dynamic_probe (rtx residual)
ed1ed2
+{
ed1ed2
+  return (residual == CONST0_RTX (Pmode)
ed1ed2
+	  || GET_CODE (residual) != CONST_INT
ed1ed2
+	  || INTVAL (residual) >= 1024);
ed1ed2
+}
ed1ed2
+
ed1ed2
 /* Implement TARGET_COMPUTE_PRESSURE_CLASSES.  */
ed1ed2
 
ed1ed2
 static int
ed1ed2
@@ -17669,6 +17902,10 @@ aarch64_libgcc_floating_mode_supported_p
ed1ed2
 #undef TARGET_CONSTANT_ALIGNMENT
ed1ed2
 #define TARGET_CONSTANT_ALIGNMENT aarch64_constant_alignment
ed1ed2
 
ed1ed2
+#undef TARGET_STACK_CLASH_PROTECTION_FINAL_DYNAMIC_PROBE
ed1ed2
+#define TARGET_STACK_CLASH_PROTECTION_FINAL_DYNAMIC_PROBE \
ed1ed2
+  aarch64_stack_clash_protection_final_dynamic_probe
ed1ed2
+
ed1ed2
 #undef TARGET_COMPUTE_PRESSURE_CLASSES
ed1ed2
 #define TARGET_COMPUTE_PRESSURE_CLASSES aarch64_compute_pressure_classes
ed1ed2
 
ed1ed2
--- gcc/config/aarch64/aarch64.md
ed1ed2
+++ gcc/config/aarch64/aarch64.md
ed1ed2
@@ -5812,7 +5812,7 @@
ed1ed2
 )
ed1ed2
 
ed1ed2
 (define_insn "probe_stack_range"
ed1ed2
-  [(set (match_operand:DI 0 "register_operand" "=r")
ed1ed2
+  [(set (match_operand:DI 0 "register_operand" "=rk")
ed1ed2
 	(unspec_volatile:DI [(match_operand:DI 1 "register_operand" "0")
ed1ed2
 			     (match_operand:DI 2 "register_operand" "r")]
ed1ed2
 			      UNSPECV_PROBE_STACK_RANGE))]
ed1ed2
--- gcc/testsuite/gcc.target/aarch64/stack-check-12.c
ed1ed2
+++ gcc/testsuite/gcc.target/aarch64/stack-check-12.c
ed1ed2
@@ -0,0 +1,20 @@
ed1ed2
+/* { dg-do compile } */
ed1ed2
+/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=12" } */
ed1ed2
+/* { dg-require-effective-target supports_stack_clash_protection } */
ed1ed2
+
ed1ed2
+extern void arf (unsigned long int *, unsigned long int *);
ed1ed2
+void
ed1ed2
+frob ()
ed1ed2
+{
ed1ed2
+  unsigned long int num[1000];
ed1ed2
+  unsigned long int den[1000];
ed1ed2
+  arf (den, num);
ed1ed2
+}
ed1ed2
+
ed1ed2
+/* This verifies that the scheduler did not break the dependencies
ed1ed2
+   by adjusting the offsets within the probe and that the scheduler
ed1ed2
+   did not reorder around the stack probes.  */
ed1ed2
+/* { dg-final { scan-assembler-times "sub\\tsp, sp, #4096\\n\\tstr\\txzr, .sp, 4088." 3 } } */
ed1ed2
+
ed1ed2
+
ed1ed2
+
ed1ed2
--- gcc/testsuite/gcc.target/aarch64/stack-check-13.c
ed1ed2
+++ gcc/testsuite/gcc.target/aarch64/stack-check-13.c
ed1ed2
@@ -0,0 +1,28 @@
ed1ed2
+/* { dg-do compile } */
ed1ed2
+/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=12" } */
ed1ed2
+/* { dg-require-effective-target supports_stack_clash_protection } */
ed1ed2
+
ed1ed2
+#define ARG32(X) X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
ed1ed2
+#define ARG192(X) ARG32(X),ARG32(X),ARG32(X),ARG32(X),ARG32(X),ARG32(X)
ed1ed2
+void out1(ARG192(__int128));
ed1ed2
+int t1(int);
ed1ed2
+
ed1ed2
+int t3(int x)
ed1ed2
+{
ed1ed2
+  if (x < 1000)
ed1ed2
+    return t1 (x) + 1;
ed1ed2
+
ed1ed2
+  out1 (ARG192(1));
ed1ed2
+  return 0;
ed1ed2
+}
ed1ed2
+
ed1ed2
+
ed1ed2
+
ed1ed2
+/* This test creates a large (> 1k) outgoing argument area that needs
ed1ed2
+   to be probed.  We don't test the exact size of the space or the
ed1ed2
+   exact offset to make the test a little less sensitive to trivial
ed1ed2
+   output changes.  */
ed1ed2
+/* { dg-final { scan-assembler-times "sub\\tsp, sp, #....\\n\\tstr\\txzr, \\\[sp" 1 } } */
ed1ed2
+
ed1ed2
+
ed1ed2
+
ed1ed2
--- gcc/testsuite/gcc.target/aarch64/stack-check-14.c
ed1ed2
+++ gcc/testsuite/gcc.target/aarch64/stack-check-14.c
ed1ed2
@@ -0,0 +1,25 @@
ed1ed2
+/* { dg-do compile } */
ed1ed2
+/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=12" } */
ed1ed2
+/* { dg-require-effective-target supports_stack_clash_protection } */
ed1ed2
+
ed1ed2
+int t1(int);
ed1ed2
+
ed1ed2
+int t2(int x)
ed1ed2
+{
ed1ed2
+  char *p = __builtin_alloca (4050);
ed1ed2
+  x = t1 (x);
ed1ed2
+  return p[x];
ed1ed2
+}
ed1ed2
+
ed1ed2
+
ed1ed2
+/* This test has a constant sized alloca that is smaller than the
ed1ed2
+   probe interval.  But it actually requires two probes instead
ed1ed2
+   of one because of the optimistic assumptions we made in the
ed1ed2
+   aarch64 prologue code WRT probing state. 
ed1ed2
+
ed1ed2
+   The form can change quite a bit so we just check for two
ed1ed2
+   probes without looking at the actual address.  */
ed1ed2
+/* { dg-final { scan-assembler-times "str\\txzr," 2 } } */
ed1ed2
+
ed1ed2
+
ed1ed2
+
ed1ed2
--- gcc/testsuite/gcc.target/aarch64/stack-check-15.c
ed1ed2
+++ gcc/testsuite/gcc.target/aarch64/stack-check-15.c
ed1ed2
@@ -0,0 +1,24 @@
ed1ed2
+/* { dg-do compile } */
ed1ed2
+/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=12" } */
ed1ed2
+/* { dg-require-effective-target supports_stack_clash_protection } */
ed1ed2
+
ed1ed2
+int t1(int);
ed1ed2
+
ed1ed2
+int t2(int x)
ed1ed2
+{
ed1ed2
+  char *p = __builtin_alloca (x);
ed1ed2
+  x = t1 (x);
ed1ed2
+  return p[x];
ed1ed2
+}
ed1ed2
+
ed1ed2
+
ed1ed2
+/* This test has a variable sized alloca.  It requires 3 probes.
ed1ed2
+   One in the loop, one for the residual and at the end of the
ed1ed2
+   alloca area. 
ed1ed2
+
ed1ed2
+   The form can change quite a bit so we just check for two
ed1ed2
+   probes without looking at the actual address.  */
ed1ed2
+/* { dg-final { scan-assembler-times "str\\txzr," 3 } } */
ed1ed2
+
ed1ed2
+
ed1ed2
+
ed1ed2
--- gcc/testsuite/lib/target-supports.exp
ed1ed2
+++ gcc/testsuite/lib/target-supports.exp
ed1ed2
@@ -9201,14 +9201,9 @@ proc check_effective_target_autoincdec { } {
ed1ed2
 # 
ed1ed2
 proc check_effective_target_supports_stack_clash_protection { } {
ed1ed2
 
ed1ed2
-   # Temporary until the target bits are fully ACK'd.
ed1ed2
-#  if { [istarget aarch*-*-*] } {
ed1ed2
-#	return 1
ed1ed2
-#  }
ed1ed2
-
ed1ed2
     if { [istarget x86_64-*-*] || [istarget i?86-*-*] 
ed1ed2
 	  || [istarget powerpc*-*-*] || [istarget rs6000*-*-*]
ed1ed2
-	  || [istarget s390*-*-*] } {
ed1ed2
+	  || [istarget aarch64*-**] || [istarget s390*-*-*] } {
ed1ed2
 	return 1
ed1ed2
     }
ed1ed2
   return 0
ed1ed2
@@ -9217,9 +9212,9 @@ proc check_effective_target_supports_stack_clash_protection { } {
ed1ed2
 # Return 1 if the target creates a frame pointer for non-leaf functions
ed1ed2
 # Note we ignore cases where we apply tail call optimization here.
ed1ed2
 proc check_effective_target_frame_pointer_for_non_leaf { } {
ed1ed2
-  if { [istarget aarch*-*-*] } {
ed1ed2
-	return 1
ed1ed2
-  }
ed1ed2
+#  if { [istarget aarch*-*-*] } {
ed1ed2
+#	return 1
ed1ed2
+#  }
ed1ed2
 
ed1ed2
   # Solaris/x86 defaults to -fno-omit-frame-pointer.
ed1ed2
   if { [istarget i?86-*-solaris*] || [istarget x86_64-*-solaris*] } {