Blame SOURCES/gdb-6.3-ia64-gcore-speedup-20050714.patch

01917d
2005-07-14  Jeff Johnsotn  <jjohnstn@redhat.com>
01917d
01917d
	* linux-nat.c (linux_nat_xfer_memory): Incorporate Fujitsu
01917d
	work-around to use /proc/mem for storage, but to fall-back
01917d
	to PTRACE for ia64 rse register areas.
01917d
	* ia64-linux-nat.c (ia64_rse_slot_num): New static function.
01917d
	(ia64_rse_skip_regs): Ditto.
01917d
	(ia64_linux_check_stack_region): New function.
01917d
	
01917d
Index: gdb-6.8.50.20090803/gdb/linux-nat.c
01917d
===================================================================
01917d
--- gdb-6.8.50.20090803.orig/gdb/linux-nat.c	2009-08-04 06:29:55.000000000 +0200
01917d
+++ gdb-6.8.50.20090803/gdb/linux-nat.c	2009-08-04 06:30:53.000000000 +0200
01917d
@@ -4495,15 +4495,38 @@ linux_xfer_partial (struct target_ops *o
01917d
 	offset &= ((ULONGEST) 1 << addr_bit) - 1;
01917d
     }
01917d
 
01917d
-#ifndef NATIVE_XFER_UNWIND_TABLE
01917d
-  /* FIXME: For ia64, we cannot currently use linux_proc_xfer_memory
01917d
-	    for accessing thread storage.  Revert when Bugzilla 147436
01917d
-	    is fixed.  */
01917d
   xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
01917d
 				  offset, len);
01917d
   if (xfer != 0)
01917d
-    return xfer;
01917d
+    {
01917d
+#ifdef NATIVE_XFER_UNWIND_TABLE
01917d
+      struct mem_region range;
01917d
+      range.lo = memaddr;
01917d
+      range.hi = memaddr + len;
01917d
+
01917d
+      /* FIXME: For ia64, we cannot currently use
01917d
+	 linux_proc_xfer_partial for accessing rse register storage.
01917d
+	 Revert when Bugzilla 147436 is fixed.  */
01917d
+#ifdef NATIVE_XFER_UNWIND_TABLE
01917d
+      extern int ia64_linux_check_stack_region (struct lwp_info *lwp,
01917d
+						void *range);
01917d
+#endif
01917d
+      if (iterate_over_lwps (ia64_linux_check_stack_region, &range) != NULL)
01917d
+	{ /* This region contains ia64 rse registers, we have to re-read.  */
01917d
+	  int xxfer;
01917d
+
01917d
+	  /* Re-read register stack area.  */
01917d
+	  xxfer = super_xfer_partial (ops, object, annex,
01917d
+				      readbuf + (range.lo - memaddr),
01917d
+				      writebuf + (range.lo - memaddr),
01917d
+				      offset + (range.lo - memaddr),
01917d
+				      range.hi - range.lo);
01917d
+	  if (xxfer == 0)
01917d
+	    xfer = 0;
01917d
+	}
01917d
 #endif
01917d
+      return xfer;
01917d
+    }
01917d
 
01917d
   return super_xfer_partial (ops, object, annex, readbuf, writebuf,
01917d
 			     offset, len);
01917d
Index: gdb-6.8.50.20090803/gdb/ia64-linux-nat.c
01917d
===================================================================
01917d
--- gdb-6.8.50.20090803.orig/gdb/ia64-linux-nat.c	2009-02-23 01:03:49.000000000 +0100
01917d
+++ gdb-6.8.50.20090803/gdb/ia64-linux-nat.c	2009-08-04 06:30:53.000000000 +0200
01917d
@@ -809,6 +809,64 @@ ia64_linux_xfer_partial (struct target_o
01917d
 
01917d
 void _initialize_ia64_linux_nat (void);
01917d
 
01917d
+/*
01917d
+ * Note: taken from ia64_tdep.c
01917d
+ *
01917d
+ */
01917d
+
01917d
+static __inline__ unsigned long
01917d
+ia64_rse_slot_num (unsigned long addr)
01917d
+{
01917d
+  return (addr >> 3) & 0x3f;
01917d
+}
01917d
+
01917d
+/* Skip over a designated number of registers in the backing
01917d
+   store, remembering every 64th position is for NAT.  */
01917d
+static __inline__ unsigned long
01917d
+ia64_rse_skip_regs (unsigned long  addr, long num_regs)
01917d
+{
01917d
+  long delta = ia64_rse_slot_num(addr) + num_regs;
01917d
+
01917d
+  if (num_regs < 0)
01917d
+    delta -= 0x3e;
01917d
+  return addr + ((num_regs + delta/0x3f) << 3);
01917d
+}
01917d
+
01917d
+/*
01917d
+ * Check mem_region is stack or not. If stack, /proc/<pid>/mem cannot return 
01917d
+ * expected value.
01917d
+ */
01917d
+int ia64_linux_check_stack_region(struct lwp_info *ti, struct mem_region *range)
01917d
+{
01917d
+	CORE_ADDR addr;
01917d
+	int error;
01917d
+	unsigned long bsp, cfm, bspstore;
01917d
+	long sof;
01917d
+	pid_t pid = ptid_get_lwp(ti->ptid);
01917d
+	bsp = ptrace(PTRACE_PEEKUSER, pid, PT_AR_BSP ,NULL);
01917d
+	if (bsp == (unsigned long)-1) {
01917d
+		return 1;
01917d
+	}
01917d
+	/* stack is allocated by one-segment, not separated into several segments.
01917d
+	   So, we only have to check whether bsp is in *range* or not. */ 		
01917d
+	if((range->lo <= bsp) && (bsp <= range->hi)) {
01917d
+		bspstore = ptrace(PTRACE_PEEKUSER, pid, PT_AR_BSPSTORE, NULL);
01917d
+		cfm = ptrace(PTRACE_PEEKUSER, pid, PT_CFM, NULL);
01917d
+		sof = cfm & 0x3f;
01917d
+		bsp = ia64_rse_skip_regs(bsp, -sof);
01917d
+		range->lo = bspstore;
01917d
+		range->hi = bsp;
01917d
+		/* we have to check the size of dirty register stack area */
01917d
+		/*
01917d
+		fprintf_unfiltered(gdb_stdlog, "<%d> <%p>  <%lx> <%p> <%p>\n",
01917d
+				   pid, bsp, sof, range->lo, range->hi);
01917d
+		*/
01917d
+		return 1;
01917d
+	}
01917d
+	
01917d
+	return 0;
01917d
+}
01917d
+
01917d
 void
01917d
 _initialize_ia64_linux_nat (void)
01917d
 {