Blame SOURCES/ntp-4.2.6p5-cve-2015-7977_7978.patch

6e16f6
diff -up ntp-4.2.6p5/ntpd/ntp_request.c.cve-2015-7977_7978 ntp-4.2.6p5/ntpd/ntp_request.c
6e16f6
--- ntp-4.2.6p5/ntpd/ntp_request.c.cve-2015-7977_7978	2011-12-01 03:55:17.000000000 +0100
6e16f6
+++ ntp-4.2.6p5/ntpd/ntp_request.c	2016-01-20 11:14:20.855586406 +0100
6e16f6
@@ -1730,56 +1730,143 @@ setclr_flags(
6e16f6
 	     	loop_config(LOOP_DRIFTCOMP, drift_comp);
6e16f6
 }
6e16f6
 
6e16f6
+/* There have been some issues with the restrict list processing,
6e16f6
+ * ranging from problems with deep recursion (resulting in stack
6e16f6
+ * overflows) and overfull reply buffers.
6e16f6
+ *
6e16f6
+ * To avoid this trouble the list reversal is done iteratively using a
6e16f6
+ * scratch pad.
6e16f6
+ */
6e16f6
+typedef struct RestrictStack RestrictStackT;
6e16f6
+struct RestrictStack {
6e16f6
+	RestrictStackT   *link;
6e16f6
+	size_t            fcnt;
6e16f6
+	const restrict_u *pres[63];
6e16f6
+};
6e16f6
+
6e16f6
+static size_t
6e16f6
+getStackSheetSize(
6e16f6
+	RestrictStackT *sp
6e16f6
+	)
6e16f6
+{
6e16f6
+	if (sp)
6e16f6
+		return sizeof(sp->pres)/sizeof(sp->pres[0]);
6e16f6
+	return 0u;
6e16f6
+}
6e16f6
+
6e16f6
+static int/*BOOL*/
6e16f6
+pushRestriction(
6e16f6
+	RestrictStackT  **spp,
6e16f6
+	const restrict_u *ptr
6e16f6
+	)
6e16f6
+{
6e16f6
+	RestrictStackT *sp;
6e16f6
+
6e16f6
+	if (NULL == (sp = *spp) || 0 == sp->fcnt) {
6e16f6
+		/* need another sheet in the scratch pad */
6e16f6
+		sp = emalloc(sizeof(*sp));
6e16f6
+		sp->link = *spp;
6e16f6
+		sp->fcnt = getStackSheetSize(sp);
6e16f6
+		*spp = sp;
6e16f6
+	}
6e16f6
+	sp->pres[--sp->fcnt] = ptr;
6e16f6
+	return TRUE;
6e16f6
+}
6e16f6
+
6e16f6
+static int/*BOOL*/
6e16f6
+popRestriction(
6e16f6
+	RestrictStackT   **spp,
6e16f6
+	const restrict_u **opp
6e16f6
+	)
6e16f6
+{
6e16f6
+	RestrictStackT *sp;
6e16f6
+
6e16f6
+	if (NULL == (sp = *spp) || sp->fcnt >= getStackSheetSize(sp))
6e16f6
+		return FALSE;
6e16f6
+	
6e16f6
+	*opp = sp->pres[sp->fcnt++];
6e16f6
+	if (sp->fcnt >= getStackSheetSize(sp)) {
6e16f6
+		/* discard sheet from scratch pad */
6e16f6
+		*spp = sp->link;
6e16f6
+		free(sp);
6e16f6
+	}
6e16f6
+	return TRUE;
6e16f6
+}
6e16f6
+
6e16f6
+static void
6e16f6
+flushRestrictionStack(
6e16f6
+	RestrictStackT **spp
6e16f6
+	)
6e16f6
+{
6e16f6
+	RestrictStackT *sp;
6e16f6
+
6e16f6
+	while (NULL != (sp = *spp)) {
6e16f6
+		*spp = sp->link;
6e16f6
+		free(sp);
6e16f6
+	}
6e16f6
+}
6e16f6
+
6e16f6
 /*
6e16f6
- * list_restrict4 - recursive helper for list_restrict dumps IPv4
6e16f6
+ * list_restrict4 - iterative helper for list_restrict dumps IPv4
6e16f6
  *		    restriction list in reverse order.
6e16f6
  */
6e16f6
 static void
6e16f6
 list_restrict4(
6e16f6
-	restrict_u *		res,
6e16f6
+	const restrict_u *	res,
6e16f6
 	struct info_restrict **	ppir
6e16f6
 	)
6e16f6
 {
6e16f6
+	RestrictStackT *	rpad;
6e16f6
 	struct info_restrict *	pir;
6e16f6
 
6e16f6
-	if (res->link != NULL)
6e16f6
-		list_restrict4(res->link, ppir);
6e16f6
-
6e16f6
 	pir = *ppir;
6e16f6
-	pir->addr = htonl(res->u.v4.addr);
6e16f6
-	if (client_v6_capable) 
6e16f6
-		pir->v6_flag = 0;
6e16f6
-	pir->mask = htonl(res->u.v4.mask);
6e16f6
-	pir->count = htonl(res->count);
6e16f6
-	pir->flags = htons(res->flags);
6e16f6
-	pir->mflags = htons(res->mflags);
6e16f6
-	*ppir = (struct info_restrict *)more_pkt();
6e16f6
+	for (rpad = NULL; res; res = res->link)
6e16f6
+		if (!pushRestriction(&rpad, res))
6e16f6
+			break;
6e16f6
+	
6e16f6
+	while (pir && popRestriction(&rpad, &res)) {
6e16f6
+		pir->addr = htonl(res->u.v4.addr);
6e16f6
+		if (client_v6_capable) 
6e16f6
+			pir->v6_flag = 0;
6e16f6
+		pir->mask = htonl(res->u.v4.mask);
6e16f6
+		pir->count = htonl(res->count);
6e16f6
+		pir->flags = htons(res->flags);
6e16f6
+		pir->mflags = htons(res->mflags);
6e16f6
+		pir = (struct info_restrict *)more_pkt();
6e16f6
+	}
6e16f6
+	flushRestrictionStack(&rpad;;
6e16f6
+	*ppir = pir;
6e16f6
 }
6e16f6
 
6e16f6
-
6e16f6
 /*
6e16f6
- * list_restrict6 - recursive helper for list_restrict dumps IPv6
6e16f6
+ * list_restrict6 - iterative helper for list_restrict dumps IPv6
6e16f6
  *		    restriction list in reverse order.
6e16f6
  */
6e16f6
 static void
6e16f6
 list_restrict6(
6e16f6
-	restrict_u *		res,
6e16f6
+	const restrict_u *	res,
6e16f6
 	struct info_restrict **	ppir
6e16f6
 	)
6e16f6
 {
6e16f6
+	RestrictStackT *	rpad;
6e16f6
 	struct info_restrict *	pir;
6e16f6
 
6e16f6
-	if (res->link != NULL)
6e16f6
-		list_restrict6(res->link, ppir);
6e16f6
-
6e16f6
 	pir = *ppir;
6e16f6
-	pir->addr6 = res->u.v6.addr; 
6e16f6
-	pir->mask6 = res->u.v6.mask;
6e16f6
-	pir->v6_flag = 1;
6e16f6
-	pir->count = htonl(res->count);
6e16f6
-	pir->flags = htons(res->flags);
6e16f6
-	pir->mflags = htons(res->mflags);
6e16f6
-	*ppir = (struct info_restrict *)more_pkt();
6e16f6
+	for (rpad = NULL; res; res = res->link)
6e16f6
+		if (!pushRestriction(&rpad, res))
6e16f6
+			break;
6e16f6
+
6e16f6
+	while (pir && popRestriction(&rpad, &res)) {
6e16f6
+		pir->addr6 = res->u.v6.addr; 
6e16f6
+		pir->mask6 = res->u.v6.mask;
6e16f6
+		pir->v6_flag = 1;
6e16f6
+		pir->count = htonl(res->count);
6e16f6
+		pir->flags = htons(res->flags);
6e16f6
+		pir->mflags = htons(res->mflags);
6e16f6
+		pir = (struct info_restrict *)more_pkt();
6e16f6
+	}
6e16f6
+	flushRestrictionStack(&rpad;;
6e16f6
+	*ppir = pir;
6e16f6
 }
6e16f6
 
6e16f6
 
6e16f6
@@ -1803,8 +1890,7 @@ list_restrict(
6e16f6
 	/*
6e16f6
 	 * The restriction lists are kept sorted in the reverse order
6e16f6
 	 * than they were originally.  To preserve the output semantics,
6e16f6
-	 * dump each list in reverse order.  A recursive helper function
6e16f6
-	 * achieves that.
6e16f6
+	 * dump each list in reverse order. The workers take care of that.
6e16f6
 	 */
6e16f6
 	list_restrict4(restrictlist4, &ir;;
6e16f6
 	if (client_v6_capable)