Blame SOURCES/ntp-4.2.6p5-cve-2015-7977_7978.patch

2b78f7
diff -up ntp-4.2.6p5/ntpd/ntp_request.c.cve-2015-7977_7978 ntp-4.2.6p5/ntpd/ntp_request.c
2b78f7
--- ntp-4.2.6p5/ntpd/ntp_request.c.cve-2015-7977_7978	2011-12-01 03:55:17.000000000 +0100
2b78f7
+++ ntp-4.2.6p5/ntpd/ntp_request.c	2016-01-20 11:14:20.855586406 +0100
2b78f7
@@ -1730,56 +1730,143 @@ setclr_flags(
2b78f7
 	     	loop_config(LOOP_DRIFTCOMP, drift_comp);
2b78f7
 }
2b78f7
 
2b78f7
+/* There have been some issues with the restrict list processing,
2b78f7
+ * ranging from problems with deep recursion (resulting in stack
2b78f7
+ * overflows) and overfull reply buffers.
2b78f7
+ *
2b78f7
+ * To avoid this trouble the list reversal is done iteratively using a
2b78f7
+ * scratch pad.
2b78f7
+ */
2b78f7
+typedef struct RestrictStack RestrictStackT;
2b78f7
+struct RestrictStack {
2b78f7
+	RestrictStackT   *link;
2b78f7
+	size_t            fcnt;
2b78f7
+	const restrict_u *pres[63];
2b78f7
+};
2b78f7
+
2b78f7
+static size_t
2b78f7
+getStackSheetSize(
2b78f7
+	RestrictStackT *sp
2b78f7
+	)
2b78f7
+{
2b78f7
+	if (sp)
2b78f7
+		return sizeof(sp->pres)/sizeof(sp->pres[0]);
2b78f7
+	return 0u;
2b78f7
+}
2b78f7
+
2b78f7
+static int/*BOOL*/
2b78f7
+pushRestriction(
2b78f7
+	RestrictStackT  **spp,
2b78f7
+	const restrict_u *ptr
2b78f7
+	)
2b78f7
+{
2b78f7
+	RestrictStackT *sp;
2b78f7
+
2b78f7
+	if (NULL == (sp = *spp) || 0 == sp->fcnt) {
2b78f7
+		/* need another sheet in the scratch pad */
2b78f7
+		sp = emalloc(sizeof(*sp));
2b78f7
+		sp->link = *spp;
2b78f7
+		sp->fcnt = getStackSheetSize(sp);
2b78f7
+		*spp = sp;
2b78f7
+	}
2b78f7
+	sp->pres[--sp->fcnt] = ptr;
2b78f7
+	return TRUE;
2b78f7
+}
2b78f7
+
2b78f7
+static int/*BOOL*/
2b78f7
+popRestriction(
2b78f7
+	RestrictStackT   **spp,
2b78f7
+	const restrict_u **opp
2b78f7
+	)
2b78f7
+{
2b78f7
+	RestrictStackT *sp;
2b78f7
+
2b78f7
+	if (NULL == (sp = *spp) || sp->fcnt >= getStackSheetSize(sp))
2b78f7
+		return FALSE;
2b78f7
+	
2b78f7
+	*opp = sp->pres[sp->fcnt++];
2b78f7
+	if (sp->fcnt >= getStackSheetSize(sp)) {
2b78f7
+		/* discard sheet from scratch pad */
2b78f7
+		*spp = sp->link;
2b78f7
+		free(sp);
2b78f7
+	}
2b78f7
+	return TRUE;
2b78f7
+}
2b78f7
+
2b78f7
+static void
2b78f7
+flushRestrictionStack(
2b78f7
+	RestrictStackT **spp
2b78f7
+	)
2b78f7
+{
2b78f7
+	RestrictStackT *sp;
2b78f7
+
2b78f7
+	while (NULL != (sp = *spp)) {
2b78f7
+		*spp = sp->link;
2b78f7
+		free(sp);
2b78f7
+	}
2b78f7
+}
2b78f7
+
2b78f7
 /*
2b78f7
- * list_restrict4 - recursive helper for list_restrict dumps IPv4
2b78f7
+ * list_restrict4 - iterative helper for list_restrict dumps IPv4
2b78f7
  *		    restriction list in reverse order.
2b78f7
  */
2b78f7
 static void
2b78f7
 list_restrict4(
2b78f7
-	restrict_u *		res,
2b78f7
+	const restrict_u *	res,
2b78f7
 	struct info_restrict **	ppir
2b78f7
 	)
2b78f7
 {
2b78f7
+	RestrictStackT *	rpad;
2b78f7
 	struct info_restrict *	pir;
2b78f7
 
2b78f7
-	if (res->link != NULL)
2b78f7
-		list_restrict4(res->link, ppir);
2b78f7
-
2b78f7
 	pir = *ppir;
2b78f7
-	pir->addr = htonl(res->u.v4.addr);
2b78f7
-	if (client_v6_capable) 
2b78f7
-		pir->v6_flag = 0;
2b78f7
-	pir->mask = htonl(res->u.v4.mask);
2b78f7
-	pir->count = htonl(res->count);
2b78f7
-	pir->flags = htons(res->flags);
2b78f7
-	pir->mflags = htons(res->mflags);
2b78f7
-	*ppir = (struct info_restrict *)more_pkt();
2b78f7
+	for (rpad = NULL; res; res = res->link)
2b78f7
+		if (!pushRestriction(&rpad, res))
2b78f7
+			break;
2b78f7
+	
2b78f7
+	while (pir && popRestriction(&rpad, &res)) {
2b78f7
+		pir->addr = htonl(res->u.v4.addr);
2b78f7
+		if (client_v6_capable) 
2b78f7
+			pir->v6_flag = 0;
2b78f7
+		pir->mask = htonl(res->u.v4.mask);
2b78f7
+		pir->count = htonl(res->count);
2b78f7
+		pir->flags = htons(res->flags);
2b78f7
+		pir->mflags = htons(res->mflags);
2b78f7
+		pir = (struct info_restrict *)more_pkt();
2b78f7
+	}
2b78f7
+	flushRestrictionStack(&rpad;;
2b78f7
+	*ppir = pir;
2b78f7
 }
2b78f7
 
2b78f7
-
2b78f7
 /*
2b78f7
- * list_restrict6 - recursive helper for list_restrict dumps IPv6
2b78f7
+ * list_restrict6 - iterative helper for list_restrict dumps IPv6
2b78f7
  *		    restriction list in reverse order.
2b78f7
  */
2b78f7
 static void
2b78f7
 list_restrict6(
2b78f7
-	restrict_u *		res,
2b78f7
+	const restrict_u *	res,
2b78f7
 	struct info_restrict **	ppir
2b78f7
 	)
2b78f7
 {
2b78f7
+	RestrictStackT *	rpad;
2b78f7
 	struct info_restrict *	pir;
2b78f7
 
2b78f7
-	if (res->link != NULL)
2b78f7
-		list_restrict6(res->link, ppir);
2b78f7
-
2b78f7
 	pir = *ppir;
2b78f7
-	pir->addr6 = res->u.v6.addr; 
2b78f7
-	pir->mask6 = res->u.v6.mask;
2b78f7
-	pir->v6_flag = 1;
2b78f7
-	pir->count = htonl(res->count);
2b78f7
-	pir->flags = htons(res->flags);
2b78f7
-	pir->mflags = htons(res->mflags);
2b78f7
-	*ppir = (struct info_restrict *)more_pkt();
2b78f7
+	for (rpad = NULL; res; res = res->link)
2b78f7
+		if (!pushRestriction(&rpad, res))
2b78f7
+			break;
2b78f7
+
2b78f7
+	while (pir && popRestriction(&rpad, &res)) {
2b78f7
+		pir->addr6 = res->u.v6.addr; 
2b78f7
+		pir->mask6 = res->u.v6.mask;
2b78f7
+		pir->v6_flag = 1;
2b78f7
+		pir->count = htonl(res->count);
2b78f7
+		pir->flags = htons(res->flags);
2b78f7
+		pir->mflags = htons(res->mflags);
2b78f7
+		pir = (struct info_restrict *)more_pkt();
2b78f7
+	}
2b78f7
+	flushRestrictionStack(&rpad;;
2b78f7
+	*ppir = pir;
2b78f7
 }
2b78f7
 
2b78f7
 
2b78f7
@@ -1803,8 +1890,7 @@ list_restrict(
2b78f7
 	/*
2b78f7
 	 * The restriction lists are kept sorted in the reverse order
2b78f7
 	 * than they were originally.  To preserve the output semantics,
2b78f7
-	 * dump each list in reverse order.  A recursive helper function
2b78f7
-	 * achieves that.
2b78f7
+	 * dump each list in reverse order. The workers take care of that.
2b78f7
 	 */
2b78f7
 	list_restrict4(restrictlist4, &ir;;
2b78f7
 	if (client_v6_capable)