Blame SOURCES/ntp-4.2.6p5-cve-2015-7977_7978.patch

063937
diff -up ntp-4.2.6p5/ntpd/ntp_request.c.cve-2015-7977_7978 ntp-4.2.6p5/ntpd/ntp_request.c
063937
--- ntp-4.2.6p5/ntpd/ntp_request.c.cve-2015-7977_7978	2011-12-01 03:55:17.000000000 +0100
063937
+++ ntp-4.2.6p5/ntpd/ntp_request.c	2016-01-20 11:14:20.855586406 +0100
063937
@@ -1730,56 +1730,143 @@ setclr_flags(
063937
 	     	loop_config(LOOP_DRIFTCOMP, drift_comp);
063937
 }
063937
 
063937
+/* There have been some issues with the restrict list processing,
063937
+ * ranging from problems with deep recursion (resulting in stack
063937
+ * overflows) and overfull reply buffers.
063937
+ *
063937
+ * To avoid this trouble the list reversal is done iteratively using a
063937
+ * scratch pad.
063937
+ */
063937
+typedef struct RestrictStack RestrictStackT;
063937
+struct RestrictStack {
063937
+	RestrictStackT   *link;
063937
+	size_t            fcnt;
063937
+	const restrict_u *pres[63];
063937
+};
063937
+
063937
+static size_t
063937
+getStackSheetSize(
063937
+	RestrictStackT *sp
063937
+	)
063937
+{
063937
+	if (sp)
063937
+		return sizeof(sp->pres)/sizeof(sp->pres[0]);
063937
+	return 0u;
063937
+}
063937
+
063937
+static int/*BOOL*/
063937
+pushRestriction(
063937
+	RestrictStackT  **spp,
063937
+	const restrict_u *ptr
063937
+	)
063937
+{
063937
+	RestrictStackT *sp;
063937
+
063937
+	if (NULL == (sp = *spp) || 0 == sp->fcnt) {
063937
+		/* need another sheet in the scratch pad */
063937
+		sp = emalloc(sizeof(*sp));
063937
+		sp->link = *spp;
063937
+		sp->fcnt = getStackSheetSize(sp);
063937
+		*spp = sp;
063937
+	}
063937
+	sp->pres[--sp->fcnt] = ptr;
063937
+	return TRUE;
063937
+}
063937
+
063937
+static int/*BOOL*/
063937
+popRestriction(
063937
+	RestrictStackT   **spp,
063937
+	const restrict_u **opp
063937
+	)
063937
+{
063937
+	RestrictStackT *sp;
063937
+
063937
+	if (NULL == (sp = *spp) || sp->fcnt >= getStackSheetSize(sp))
063937
+		return FALSE;
063937
+	
063937
+	*opp = sp->pres[sp->fcnt++];
063937
+	if (sp->fcnt >= getStackSheetSize(sp)) {
063937
+		/* discard sheet from scratch pad */
063937
+		*spp = sp->link;
063937
+		free(sp);
063937
+	}
063937
+	return TRUE;
063937
+}
063937
+
063937
+static void
063937
+flushRestrictionStack(
063937
+	RestrictStackT **spp
063937
+	)
063937
+{
063937
+	RestrictStackT *sp;
063937
+
063937
+	while (NULL != (sp = *spp)) {
063937
+		*spp = sp->link;
063937
+		free(sp);
063937
+	}
063937
+}
063937
+
063937
 /*
063937
- * list_restrict4 - recursive helper for list_restrict dumps IPv4
063937
+ * list_restrict4 - iterative helper for list_restrict dumps IPv4
063937
  *		    restriction list in reverse order.
063937
  */
063937
 static void
063937
 list_restrict4(
063937
-	restrict_u *		res,
063937
+	const restrict_u *	res,
063937
 	struct info_restrict **	ppir
063937
 	)
063937
 {
063937
+	RestrictStackT *	rpad;
063937
 	struct info_restrict *	pir;
063937
 
063937
-	if (res->link != NULL)
063937
-		list_restrict4(res->link, ppir);
063937
-
063937
 	pir = *ppir;
063937
-	pir->addr = htonl(res->u.v4.addr);
063937
-	if (client_v6_capable) 
063937
-		pir->v6_flag = 0;
063937
-	pir->mask = htonl(res->u.v4.mask);
063937
-	pir->count = htonl(res->count);
063937
-	pir->flags = htons(res->flags);
063937
-	pir->mflags = htons(res->mflags);
063937
-	*ppir = (struct info_restrict *)more_pkt();
063937
+	for (rpad = NULL; res; res = res->link)
063937
+		if (!pushRestriction(&rpad, res))
063937
+			break;
063937
+	
063937
+	while (pir && popRestriction(&rpad, &res)) {
063937
+		pir->addr = htonl(res->u.v4.addr);
063937
+		if (client_v6_capable) 
063937
+			pir->v6_flag = 0;
063937
+		pir->mask = htonl(res->u.v4.mask);
063937
+		pir->count = htonl(res->count);
063937
+		pir->flags = htons(res->flags);
063937
+		pir->mflags = htons(res->mflags);
063937
+		pir = (struct info_restrict *)more_pkt();
063937
+	}
063937
+	flushRestrictionStack(&rpad;;
063937
+	*ppir = pir;
063937
 }
063937
 
063937
-
063937
 /*
063937
- * list_restrict6 - recursive helper for list_restrict dumps IPv6
063937
+ * list_restrict6 - iterative helper for list_restrict dumps IPv6
063937
  *		    restriction list in reverse order.
063937
  */
063937
 static void
063937
 list_restrict6(
063937
-	restrict_u *		res,
063937
+	const restrict_u *	res,
063937
 	struct info_restrict **	ppir
063937
 	)
063937
 {
063937
+	RestrictStackT *	rpad;
063937
 	struct info_restrict *	pir;
063937
 
063937
-	if (res->link != NULL)
063937
-		list_restrict6(res->link, ppir);
063937
-
063937
 	pir = *ppir;
063937
-	pir->addr6 = res->u.v6.addr; 
063937
-	pir->mask6 = res->u.v6.mask;
063937
-	pir->v6_flag = 1;
063937
-	pir->count = htonl(res->count);
063937
-	pir->flags = htons(res->flags);
063937
-	pir->mflags = htons(res->mflags);
063937
-	*ppir = (struct info_restrict *)more_pkt();
063937
+	for (rpad = NULL; res; res = res->link)
063937
+		if (!pushRestriction(&rpad, res))
063937
+			break;
063937
+
063937
+	while (pir && popRestriction(&rpad, &res)) {
063937
+		pir->addr6 = res->u.v6.addr; 
063937
+		pir->mask6 = res->u.v6.mask;
063937
+		pir->v6_flag = 1;
063937
+		pir->count = htonl(res->count);
063937
+		pir->flags = htons(res->flags);
063937
+		pir->mflags = htons(res->mflags);
063937
+		pir = (struct info_restrict *)more_pkt();
063937
+	}
063937
+	flushRestrictionStack(&rpad;;
063937
+	*ppir = pir;
063937
 }
063937
 
063937
 
063937
@@ -1803,8 +1890,7 @@ list_restrict(
063937
 	/*
063937
 	 * The restriction lists are kept sorted in the reverse order
063937
 	 * than they were originally.  To preserve the output semantics,
063937
-	 * dump each list in reverse order.  A recursive helper function
063937
-	 * achieves that.
063937
+	 * dump each list in reverse order. The workers take care of that.
063937
 	 */
063937
 	list_restrict4(restrictlist4, &ir;;
063937
 	if (client_v6_capable)