Blob Blame History Raw
From 7af485f0fc9926425681ba0280ab6c2c8dd04530 Mon Sep 17 00:00:00 2001
From: "W.C.A. Wijngaards" <wouter@nlnetlabs.nl>
Date: Wed, 21 Sep 2022 11:10:38 +0200
Subject: [PATCH] - Patch for CVE-2022-3204 Non-Responsive Delegation Attack.

---
 unbound-1.16.2/iterator/iter_delegpt.c |  3 +++
 unbound-1.16.2/iterator/iter_delegpt.h |  2 ++
 unbound-1.16.2/iterator/iter_utils.c   |  3 +++
 unbound-1.16.2/iterator/iter_utils.h   |  9 +++++++
 unbound-1.16.2/iterator/iterator.c     | 36 +++++++++++++++++++++++++-
 unbound-1.16.2/services/cache/dns.c    |  3 +++
 unbound-1.16.2/services/mesh.c         |  7 +++++
 unbound-1.16.2/services/mesh.h         | 11 ++++++++
 8 files changed, 73 insertions(+), 1 deletion(-)

diff --git a/unbound-1.16.2/iterator/iter_delegpt.c b/unbound-1.16.2/iterator/iter_delegpt.c
index 4bffa1b..fd07aaa 100644
--- a/unbound-1.16.2/iterator/iter_delegpt.c
+++ b/unbound-1.16.2/iterator/iter_delegpt.c
@@ -78,6 +78,7 @@ struct delegpt* delegpt_copy(struct delegpt* dp, struct regional* region)
 		if(!delegpt_add_ns(copy, region, ns->name, ns->lame,
 			ns->tls_auth_name, ns->port))
 			return NULL;
+		copy->nslist->cache_lookup_count = ns->cache_lookup_count;
 		copy->nslist->resolved = ns->resolved;
 		copy->nslist->got4 = ns->got4;
 		copy->nslist->got6 = ns->got6;
@@ -121,6 +122,7 @@ delegpt_add_ns(struct delegpt* dp, struct regional* region, uint8_t* name,
 	ns->namelen = len;
 	dp->nslist = ns;
 	ns->name = regional_alloc_init(region, name, ns->namelen);
+	ns->cache_lookup_count = 0;
 	ns->resolved = 0;
 	ns->got4 = 0;
 	ns->got6 = 0;
@@ -620,6 +622,7 @@ int delegpt_add_ns_mlc(struct delegpt* dp, uint8_t* name, uint8_t lame,
 	}
 	ns->next = dp->nslist;
 	dp->nslist = ns;
+	ns->cache_lookup_count = 0;
 	ns->resolved = 0;
 	ns->got4 = 0;
 	ns->got6 = 0;
diff --git a/unbound-1.16.2/iterator/iter_delegpt.h b/unbound-1.16.2/iterator/iter_delegpt.h
index 62c8edc..586597a 100644
--- a/unbound-1.16.2/iterator/iter_delegpt.h
+++ b/unbound-1.16.2/iterator/iter_delegpt.h
@@ -101,6 +101,8 @@ struct delegpt_ns {
 	uint8_t* name;
 	/** length of name */
 	size_t namelen;
+	/** number of cache lookups for the name */
+	int cache_lookup_count;
 	/** 
 	 * If the name has been resolved. false if not queried for yet.
 	 * true if the A, AAAA queries have been generated.
diff --git a/unbound-1.16.2/iterator/iter_utils.c b/unbound-1.16.2/iterator/iter_utils.c
index 3e13e59..56b184a 100644
--- a/unbound-1.16.2/iterator/iter_utils.c
+++ b/unbound-1.16.2/iterator/iter_utils.c
@@ -1209,6 +1209,9 @@ int iter_lookup_parent_glue_from_cache(struct module_env* env,
 	struct delegpt_ns* ns;
 	size_t num = delegpt_count_targets(dp);
 	for(ns = dp->nslist; ns; ns = ns->next) {
+		if(ns->cache_lookup_count > ITERATOR_NAME_CACHELOOKUP_MAX_PSIDE)
+			continue;
+		ns->cache_lookup_count++;
 		/* get cached parentside A */
 		akey = rrset_cache_lookup(env->rrset_cache, ns->name,
 			ns->namelen, LDNS_RR_TYPE_A, qinfo->qclass,
diff --git a/unbound-1.16.2/iterator/iter_utils.h b/unbound-1.16.2/iterator/iter_utils.h
index 8583fde..850be96 100644
--- a/unbound-1.16.2/iterator/iter_utils.h
+++ b/unbound-1.16.2/iterator/iter_utils.h
@@ -62,6 +62,15 @@ struct ub_packed_rrset_key;
 struct module_stack;
 struct outside_network;
 
+/* max number of lookups in the cache for target nameserver names.
+ * This stops, for large delegations, N*N lookups in the cache. */
+#define ITERATOR_NAME_CACHELOOKUP_MAX	3
+/* max number of lookups in the cache for parentside glue for nameserver names
+ * This stops, for larger delegations, N*N lookups in the cache.
+ * It is a little larger than the nonpside max, so it allows a couple extra
+ * lookups of parent side glue. */
+#define ITERATOR_NAME_CACHELOOKUP_MAX_PSIDE	5
+
 /**
  * Process config options and set iterator module state.
  * Sets default values if no config is found.
diff --git a/unbound-1.16.2/iterator/iterator.c b/unbound-1.16.2/iterator/iterator.c
index 25e5cfe..da9b799 100644
--- a/unbound-1.16.2/iterator/iterator.c
+++ b/unbound-1.16.2/iterator/iterator.c
@@ -1218,6 +1218,15 @@ generate_dnskey_prefetch(struct module_qstate* qstate,
 		(qstate->query_flags&BIT_RD) && !(qstate->query_flags&BIT_CD)){
 		return;
 	}
+	/* we do not generate this prefetch when the query list is full,
+	 * the query is fetched, if needed, when the validator wants it.
+	 * At that time the validator waits for it, after spawning it.
+	 * This means there is one state that uses cpu and a socket, the
+	 * spawned while this one waits, and not several at the same time,
+	 * if we had created the lookup here. And this helps to keep
+	 * the total load down, but the query still succeeds to resolve. */
+	if(mesh_jostle_exceeded(qstate->env->mesh))
+		return;
 
 	/* if the DNSKEY is in the cache this lookup will stop quickly */
 	log_nametypeclass(VERB_ALGO, "schedule dnskey prefetch", 
@@ -1911,6 +1920,14 @@ query_for_targets(struct module_qstate* qstate, struct iter_qstate* iq,
 				return 0;
 			}
 			query_count++;
+			/* If the mesh query list is full, exit the loop here.
+			 * This makes the routine spawn one query at a time,
+			 * and this means there is no query state load
+			 * increase, because the spawned state uses cpu and a
+			 * socket while this state waits for that spawned
+			 * state. Next time we can look up further targets */
+			if(mesh_jostle_exceeded(qstate->env->mesh))
+				break;
 		}
 		/* Send the A request. */
 		if(ie->supports_ipv4 &&
@@ -1925,6 +1942,9 @@ query_for_targets(struct module_qstate* qstate, struct iter_qstate* iq,
 				return 0;
 			}
 			query_count++;
+			/* If the mesh query list is full, exit the loop. */
+			if(mesh_jostle_exceeded(qstate->env->mesh))
+				break;
 		}
 
 		/* mark this target as in progress. */
@@ -2085,6 +2105,15 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
 			}
 			ns->done_pside6 = 1;
 			query_count++;
+			if(mesh_jostle_exceeded(qstate->env->mesh)) {
+				/* Wait for the lookup; do not spawn multiple
+				 * lookups at a time. */
+				verbose(VERB_ALGO, "try parent-side glue lookup");
+				iq->num_target_queries += query_count;
+				target_count_increase(iq, query_count);
+				qstate->ext_state[id] = module_wait_subquery;
+				return 0;
+			}
 		}
 		if(ie->supports_ipv4 && !ns->done_pside4) {
 			/* Send the A request. */
@@ -2560,7 +2589,12 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq,
 	if(iq->depth < ie->max_dependency_depth
 		&& iq->num_target_queries == 0
 		&& (!iq->target_count || iq->target_count[TARGET_COUNT_NX]==0)
-		&& iq->sent_count < TARGET_FETCH_STOP) {
+		&& iq->sent_count < TARGET_FETCH_STOP
+		/* if the mesh query list is full, then do not waste cpu
+		 * and sockets to fetch promiscuous targets. They can be
+		 * looked up when needed. */
+		&& !mesh_jostle_exceeded(qstate->env->mesh)
+		) {
 		tf_policy = ie->target_fetch_policy[iq->depth];
 	}
 
diff --git a/unbound-1.16.2/services/cache/dns.c b/unbound-1.16.2/services/cache/dns.c
index 6bca8d8..b6e5697 100644
--- a/unbound-1.16.2/services/cache/dns.c
+++ b/unbound-1.16.2/services/cache/dns.c
@@ -404,6 +404,9 @@ cache_fill_missing(struct module_env* env, uint16_t qclass,
 	struct ub_packed_rrset_key* akey;
 	time_t now = *env->now;
 	for(ns = dp->nslist; ns; ns = ns->next) {
+		if(ns->cache_lookup_count > ITERATOR_NAME_CACHELOOKUP_MAX)
+			continue;
+		ns->cache_lookup_count++;
 		akey = rrset_cache_lookup(env->rrset_cache, ns->name, 
 			ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0);
 		if(akey) {
diff --git a/unbound-1.16.2/services/mesh.c b/unbound-1.16.2/services/mesh.c
index 30bcf7c..2a41194 100644
--- a/unbound-1.16.2/services/mesh.c
+++ b/unbound-1.16.2/services/mesh.c
@@ -2240,3 +2240,10 @@ mesh_serve_expired_callback(void* arg)
 		mesh_do_callback(mstate, LDNS_RCODE_NOERROR, msg->rep, c, &tv);
 	}
 }
+
+int mesh_jostle_exceeded(struct mesh_area* mesh)
+{
+	if(mesh->all.count < mesh->max_reply_states)
+		return 0;
+	return 1;
+}
diff --git a/unbound-1.16.2/services/mesh.h b/unbound-1.16.2/services/mesh.h
index 3be9b63..25121a6 100644
--- a/unbound-1.16.2/services/mesh.h
+++ b/unbound-1.16.2/services/mesh.h
@@ -685,4 +685,15 @@ struct dns_msg*
 mesh_serve_expired_lookup(struct module_qstate* qstate,
 	struct query_info* lookup_qinfo);
 
+/**
+ * See if the mesh has space for more queries. You can allocate queries
+ * anyway, but this checks for the allocated space.
+ * @param mesh: mesh area.
+ * @return true if the query list is full.
+ * 	It checks the number of all queries, not just number of reply states,
+ * 	that have a client address. So that spawned queries count too,
+ * 	that were created by the iterator, or other modules.
+ */
+int mesh_jostle_exceeded(struct mesh_area* mesh);
+
 #endif /* SERVICES_MESH_H */
-- 
2.37.3