diff --git a/SOURCES/root.anchor b/SOURCES/root.anchor index 4a5f11e..c78ee03 100644 --- a/SOURCES/root.anchor +++ b/SOURCES/root.anchor @@ -1,2 +1 @@ . 172800 IN DNSKEY 257 3 8 AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3+/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kvArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+eoZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfdRUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwNR1AkUTV74bU= ;{id = 20326 (ksk), size = 2048b} -. 98799 IN DNSKEY 257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjFFVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoXbfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaDX6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpzW5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relSQageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulqQxA+Uk1ihz0= ;{id = 19036 (ksk), size = 2048b} diff --git a/SOURCES/root.key b/SOURCES/root.key index 077ca98..a0b1bef 100644 --- a/SOURCES/root.key +++ b/SOURCES/root.key @@ -2,7 +2,4 @@ ; // named, unbound, et. For libunbound, use ub_ctx_trustedkeys() to load this trusted-keys { "." 257 3 8 "AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3+/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kvArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+eoZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfdRUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwNR1AkUTV74bU="; // key id = 20326 - -"." 257 3 8 "AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjFFVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoXbfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaDX6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpzW5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relSQageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulqQxA+Uk1ihz0="; // key id = 19036 - }; diff --git a/SOURCES/tmpfiles-unbound.conf b/SOURCES/tmpfiles-unbound.conf index d625589..bb88f01 100644 --- a/SOURCES/tmpfiles-unbound.conf +++ b/SOURCES/tmpfiles-unbound.conf @@ -1 +1 @@ -D /var/run/unbound 0755 unbound unbound - +D /run/unbound 0755 unbound unbound - diff --git a/SOURCES/unbound-1.7.3-DNS-over-TLS-memory-leak.patch b/SOURCES/unbound-1.7.3-DNS-over-TLS-memory-leak.patch new file mode 100644 index 0000000..9823850 --- /dev/null +++ b/SOURCES/unbound-1.7.3-DNS-over-TLS-memory-leak.patch @@ -0,0 +1,36 @@ +From 377d5b426a30fc915cf7905786f93c0ec89845b7 Mon Sep 17 00:00:00 2001 +From: Wouter Wijngaards +Date: Tue, 25 Sep 2018 09:01:13 +0000 +Subject: [PATCH] - Add SSL cleanup for tcp timeout. + +git-svn-id: file:///svn/unbound/trunk@4915 be551aaa-1e26-0410-a405-d3ace91eadb9 +--- + services/outside_network.c | 11 +++++++++++ + 1 files changed, 9 insertions(+) +diff --git a/services/outside_network.c b/services/outside_network.c +index 5700ef8..b52cdab 100644 +--- a/services/outside_network.c ++++ b/services/outside_network.c +@@ -373,6 +373,8 @@ outnet_tcp_take_into_use(struct waiting_tcp* w, uint8_t* pkt, size_t pkt_len) + if(!SSL_set1_host(pend->c->ssl, w->tls_auth_name)) { + log_err("SSL_set1_host failed"); + pend->c->fd = s; ++ SSL_free(pend->c->ssl); ++ pend->c->ssl = NULL; + comm_point_close(pend->c); + return 0; + } +@@ -1258,6 +1260,13 @@ outnet_tcptimer(void* arg) + } else { + /* it was in use */ + struct pending_tcp* pend=(struct pending_tcp*)w->next_waiting; ++ if(pend->c->ssl) { ++#ifdef HAVE_SSL ++ SSL_shutdown(pend->c->ssl); ++ SSL_free(pend->c->ssl); ++ pend->c->ssl = NULL; ++#endif ++ } + comm_point_close(pend->c); + pend->query = NULL; + pend->next_free = outnet->tcp_free; diff --git a/SOURCES/unbound-1.7.3-amplifying-an-incoming-query.patch b/SOURCES/unbound-1.7.3-amplifying-an-incoming-query.patch new file mode 100644 index 0000000..a3f9aef --- /dev/null +++ b/SOURCES/unbound-1.7.3-amplifying-an-incoming-query.patch @@ -0,0 +1,944 @@ +diff --git a/iterator/iter_delegpt.c b/iterator/iter_delegpt.c +index f88b3e1..522e0e9 100644 +--- a/iterator/iter_delegpt.c ++++ b/iterator/iter_delegpt.c +@@ -84,7 +84,7 @@ struct delegpt* delegpt_copy(struct delegpt* dp, struct regional* region) + } + for(a = dp->target_list; a; a = a->next_target) { + if(!delegpt_add_addr(copy, region, &a->addr, a->addrlen, +- a->bogus, a->lame, a->tls_auth_name)) ++ a->bogus, a->lame, a->tls_auth_name, NULL)) + return NULL; + } + return copy; +@@ -161,7 +161,7 @@ delegpt_find_addr(struct delegpt* dp, struct sockaddr_storage* addr, + int + delegpt_add_target(struct delegpt* dp, struct regional* region, + uint8_t* name, size_t namelen, struct sockaddr_storage* addr, +- socklen_t addrlen, uint8_t bogus, uint8_t lame) ++ socklen_t addrlen, uint8_t bogus, uint8_t lame, int* additions) + { + struct delegpt_ns* ns = delegpt_find_ns(dp, name, namelen); + log_assert(!dp->dp_type_mlc); +@@ -176,13 +176,14 @@ delegpt_add_target(struct delegpt* dp, struct regional* region, + if(ns->got4 && ns->got6) + ns->resolved = 1; + } +- return delegpt_add_addr(dp, region, addr, addrlen, bogus, lame, NULL); ++ return delegpt_add_addr(dp, region, addr, addrlen, bogus, lame, NULL, ++ additions); + } + + int + delegpt_add_addr(struct delegpt* dp, struct regional* region, + struct sockaddr_storage* addr, socklen_t addrlen, uint8_t bogus, +- uint8_t lame, char* tls_auth_name) ++ uint8_t lame, char* tls_auth_name, int* additions) + { + struct delegpt_addr* a; + log_assert(!dp->dp_type_mlc); +@@ -195,6 +196,9 @@ delegpt_add_addr(struct delegpt* dp, struct regional* region, + return 1; + } + ++ if(additions) ++ *additions = 1; ++ + a = (struct delegpt_addr*)regional_alloc(region, + sizeof(struct delegpt_addr)); + if(!a) +@@ -382,10 +386,10 @@ delegpt_from_message(struct dns_msg* msg, struct regional* region) + continue; + + if(ntohs(s->rk.type) == LDNS_RR_TYPE_A) { +- if(!delegpt_add_rrset_A(dp, region, s, 0)) ++ if(!delegpt_add_rrset_A(dp, region, s, 0, NULL)) + return NULL; + } else if(ntohs(s->rk.type) == LDNS_RR_TYPE_AAAA) { +- if(!delegpt_add_rrset_AAAA(dp, region, s, 0)) ++ if(!delegpt_add_rrset_AAAA(dp, region, s, 0, NULL)) + return NULL; + } + } +@@ -416,7 +420,7 @@ delegpt_rrset_add_ns(struct delegpt* dp, struct regional* region, + + int + delegpt_add_rrset_A(struct delegpt* dp, struct regional* region, +- struct ub_packed_rrset_key* ak, uint8_t lame) ++ struct ub_packed_rrset_key* ak, uint8_t lame, int* additions) + { + struct packed_rrset_data* d=(struct packed_rrset_data*)ak->entry.data; + size_t i; +@@ -432,7 +436,7 @@ delegpt_add_rrset_A(struct delegpt* dp, struct regional* region, + memmove(&sa.sin_addr, d->rr_data[i]+2, INET_SIZE); + if(!delegpt_add_target(dp, region, ak->rk.dname, + ak->rk.dname_len, (struct sockaddr_storage*)&sa, +- len, (d->security==sec_status_bogus), lame)) ++ len, (d->security==sec_status_bogus), lame, additions)) + return 0; + } + return 1; +@@ -440,7 +444,7 @@ delegpt_add_rrset_A(struct delegpt* dp, struct regional* region, + + int + delegpt_add_rrset_AAAA(struct delegpt* dp, struct regional* region, +- struct ub_packed_rrset_key* ak, uint8_t lame) ++ struct ub_packed_rrset_key* ak, uint8_t lame, int* additions) + { + struct packed_rrset_data* d=(struct packed_rrset_data*)ak->entry.data; + size_t i; +@@ -456,7 +460,7 @@ delegpt_add_rrset_AAAA(struct delegpt* dp, struct regional* region, + memmove(&sa.sin6_addr, d->rr_data[i]+2, INET6_SIZE); + if(!delegpt_add_target(dp, region, ak->rk.dname, + ak->rk.dname_len, (struct sockaddr_storage*)&sa, +- len, (d->security==sec_status_bogus), lame)) ++ len, (d->security==sec_status_bogus), lame, additions)) + return 0; + } + return 1; +@@ -464,20 +468,32 @@ delegpt_add_rrset_AAAA(struct delegpt* dp, struct regional* region, + + int + delegpt_add_rrset(struct delegpt* dp, struct regional* region, +- struct ub_packed_rrset_key* rrset, uint8_t lame) ++ struct ub_packed_rrset_key* rrset, uint8_t lame, int* additions) + { + if(!rrset) + return 1; + if(ntohs(rrset->rk.type) == LDNS_RR_TYPE_NS) + return delegpt_rrset_add_ns(dp, region, rrset, lame); + else if(ntohs(rrset->rk.type) == LDNS_RR_TYPE_A) +- return delegpt_add_rrset_A(dp, region, rrset, lame); ++ return delegpt_add_rrset_A(dp, region, rrset, lame, additions); + else if(ntohs(rrset->rk.type) == LDNS_RR_TYPE_AAAA) +- return delegpt_add_rrset_AAAA(dp, region, rrset, lame); ++ return delegpt_add_rrset_AAAA(dp, region, rrset, lame, additions); + log_warn("Unknown rrset type added to delegpt"); + return 1; + } + ++void delegpt_mark_neg(struct delegpt_ns* ns, uint16_t qtype) ++{ ++ if(ns) { ++ if(qtype == LDNS_RR_TYPE_A) ++ ns->got4 = 2; ++ else if(qtype == LDNS_RR_TYPE_AAAA) ++ ns->got6 = 2; ++ if(ns->got4 && ns->got6) ++ ns->resolved = 1; ++ } ++} ++ + void delegpt_add_neg_msg(struct delegpt* dp, struct msgreply_entry* msg) + { + struct reply_info* rep = (struct reply_info*)msg->entry.data; +@@ -487,14 +503,7 @@ void delegpt_add_neg_msg(struct delegpt* dp, struct msgreply_entry* msg) + if(FLAGS_GET_RCODE(rep->flags) != 0 || rep->an_numrrsets == 0) { + struct delegpt_ns* ns = delegpt_find_ns(dp, msg->key.qname, + msg->key.qname_len); +- if(ns) { +- if(msg->key.qtype == LDNS_RR_TYPE_A) +- ns->got4 = 1; +- else if(msg->key.qtype == LDNS_RR_TYPE_AAAA) +- ns->got6 = 1; +- if(ns->got4 && ns->got6) +- ns->resolved = 1; +- } ++ delegpt_mark_neg(ns, msg->key.qtype); + } + } + +diff --git a/iterator/iter_delegpt.h b/iterator/iter_delegpt.h +index 354bd61..3aded22 100644 +--- a/iterator/iter_delegpt.h ++++ b/iterator/iter_delegpt.h +@@ -104,9 +104,10 @@ struct delegpt_ns { + * and marked true if got4 and got6 are both true. + */ + int resolved; +- /** if the ipv4 address is in the delegpt */ ++ /** if the ipv4 address is in the delegpt, 0=not, 1=yes 2=negative, ++ * negative means it was done, but no content. */ + uint8_t got4; +- /** if the ipv6 address is in the delegpt */ ++ /** if the ipv6 address is in the delegpt, 0=not, 1=yes 2=negative */ + uint8_t got6; + /** + * If the name is parent-side only and thus dispreferred. +@@ -213,11 +214,12 @@ int delegpt_rrset_add_ns(struct delegpt* dp, struct regional* regional, + * @param addrlen: the length of addr. + * @param bogus: security status for the address, pass true if bogus. + * @param lame: address is lame. ++ * @param additions: will be set to 1 if a new address is added + * @return false on error. + */ + int delegpt_add_target(struct delegpt* dp, struct regional* regional, + uint8_t* name, size_t namelen, struct sockaddr_storage* addr, +- socklen_t addrlen, uint8_t bogus, uint8_t lame); ++ socklen_t addrlen, uint8_t bogus, uint8_t lame, int* additions); + + /** + * Add A RRset to delegpt. +@@ -225,10 +227,11 @@ int delegpt_add_target(struct delegpt* dp, struct regional* regional, + * @param regional: where to allocate the info. + * @param rrset: RRset A to add. + * @param lame: rrset is lame, disprefer it. ++ * @param additions: will be set to 1 if a new address is added + * @return 0 on alloc error. + */ + int delegpt_add_rrset_A(struct delegpt* dp, struct regional* regional, +- struct ub_packed_rrset_key* rrset, uint8_t lame); ++ struct ub_packed_rrset_key* rrset, uint8_t lame, int* additions); + + /** + * Add AAAA RRset to delegpt. +@@ -236,10 +239,11 @@ int delegpt_add_rrset_A(struct delegpt* dp, struct regional* regional, + * @param regional: where to allocate the info. + * @param rrset: RRset AAAA to add. + * @param lame: rrset is lame, disprefer it. ++ * @param additions: will be set to 1 if a new address is added + * @return 0 on alloc error. + */ + int delegpt_add_rrset_AAAA(struct delegpt* dp, struct regional* regional, +- struct ub_packed_rrset_key* rrset, uint8_t lame); ++ struct ub_packed_rrset_key* rrset, uint8_t lame, int* additions); + + /** + * Add any RRset to delegpt. +@@ -248,10 +252,11 @@ int delegpt_add_rrset_AAAA(struct delegpt* dp, struct regional* regional, + * @param regional: where to allocate the info. + * @param rrset: RRset to add, NS, A, AAAA. + * @param lame: rrset is lame, disprefer it. ++ * @param additions: will be set to 1 if a new address is added + * @return 0 on alloc error. + */ + int delegpt_add_rrset(struct delegpt* dp, struct regional* regional, +- struct ub_packed_rrset_key* rrset, uint8_t lame); ++ struct ub_packed_rrset_key* rrset, uint8_t lame, int* additions); + + /** + * Add address to the delegation point. No servername is associated or checked. +@@ -262,11 +267,13 @@ int delegpt_add_rrset(struct delegpt* dp, struct regional* regional, + * @param bogus: if address is bogus. + * @param lame: if address is lame. + * @param tls_auth_name: TLS authentication name (or NULL). ++ * @param additions: will be set to 1 if a new address is added ++ * @return 0 on alloc error. + * @return false on error. + */ + int delegpt_add_addr(struct delegpt* dp, struct regional* regional, + struct sockaddr_storage* addr, socklen_t addrlen, +- uint8_t bogus, uint8_t lame, char* tls_auth_name); ++ uint8_t bogus, uint8_t lame, char* tls_auth_name, int* additions); + + /** + * Find NS record in name list of delegation point. +@@ -339,6 +346,14 @@ size_t delegpt_count_targets(struct delegpt* dp); + struct delegpt* delegpt_from_message(struct dns_msg* msg, + struct regional* regional); + ++/** ++* Mark negative return in delegation point for specific nameserver. ++* sets the got4 or got6 to negative, updates the ns->resolved. ++* @param ns: the nameserver in the delegpt. ++* @param qtype: A or AAAA (host order). ++*/ ++void delegpt_mark_neg(struct delegpt_ns* ns, uint16_t qtype); ++ + /** + * Add negative message to delegation point. + * @param dp: delegation point. +diff --git a/iterator/iter_scrub.c b/iterator/iter_scrub.c +index 12580dc..8230d17 100644 +--- a/iterator/iter_scrub.c ++++ b/iterator/iter_scrub.c +@@ -185,8 +185,9 @@ mark_additional_rrset(sldns_buffer* pkt, struct msg_parse* msg, + /** Get target name of a CNAME */ + static int + parse_get_cname_target(struct rrset_parse* rrset, uint8_t** sname, +- size_t* snamelen) ++ size_t* snamelen, sldns_buffer* pkt) + { ++ size_t oldpos, dlen; + if(rrset->rr_count != 1) { + struct rr_parse* sig; + verbose(VERB_ALGO, "Found CNAME rrset with " +@@ -204,6 +205,19 @@ parse_get_cname_target(struct rrset_parse* rrset, uint8_t** sname, + *sname = rrset->rr_first->ttl_data + sizeof(uint32_t) + + sizeof(uint16_t); /* skip ttl, rdatalen */ + *snamelen = rrset->rr_first->size - sizeof(uint16_t); ++ ++ if(rrset->rr_first->outside_packet) { ++ if(!dname_valid(*sname, *snamelen)) ++ return 0; ++ return 1; ++ } ++ oldpos = sldns_buffer_position(pkt); ++ sldns_buffer_set_position(pkt, (size_t)(*sname - sldns_buffer_begin(pkt))); ++ dlen = pkt_dname_len(pkt); ++ sldns_buffer_set_position(pkt, oldpos); ++ if(dlen == 0) ++ return 0; /* parse fail on the rdata name */ ++ *snamelen = dlen; + return 1; + } + +@@ -215,7 +229,7 @@ synth_cname(uint8_t* qname, size_t qnamelen, struct rrset_parse* dname_rrset, + /* we already know that sname is a strict subdomain of DNAME owner */ + uint8_t* dtarg = NULL; + size_t dtarglen; +- if(!parse_get_cname_target(dname_rrset, &dtarg, &dtarglen)) ++ if(!parse_get_cname_target(dname_rrset, &dtarg, &dtarglen, pkt)) + return 0; + log_assert(qnamelen > dname_rrset->dname_len); + /* DNAME from com. to net. with qname example.com. -> example.net. */ +@@ -372,7 +386,7 @@ scrub_normalize(sldns_buffer* pkt, struct msg_parse* msg, + /* check next cname */ + uint8_t* t = NULL; + size_t tlen = 0; +- if(!parse_get_cname_target(nx, &t, &tlen)) ++ if(!parse_get_cname_target(nx, &t, &tlen, pkt)) + return 0; + if(dname_pkt_compare(pkt, alias, t) == 0) { + /* it's OK and better capitalized */ +@@ -423,7 +437,7 @@ scrub_normalize(sldns_buffer* pkt, struct msg_parse* msg, + size_t tlen = 0; + if(synth_cname(sname, snamelen, nx, alias, + &aliaslen, pkt) && +- parse_get_cname_target(rrset, &t, &tlen) && ++ parse_get_cname_target(rrset, &t, &tlen, pkt) && + dname_pkt_compare(pkt, alias, t) == 0) { + /* the synthesized CNAME equals the + * current CNAME. This CNAME is the +@@ -442,7 +456,7 @@ scrub_normalize(sldns_buffer* pkt, struct msg_parse* msg, + } + + /* move to next name in CNAME chain */ +- if(!parse_get_cname_target(rrset, &sname, &snamelen)) ++ if(!parse_get_cname_target(rrset, &sname, &snamelen, pkt)) + return 0; + prev = rrset; + rrset = rrset->rrset_all_next; +diff --git a/iterator/iter_utils.c b/iterator/iter_utils.c +index 0a8f770..a107bee 100644 +--- a/iterator/iter_utils.c ++++ b/iterator/iter_utils.c +@@ -1008,7 +1008,7 @@ int iter_lookup_parent_glue_from_cache(struct module_env* env, + log_rrset_key(VERB_ALGO, "found parent-side", akey); + ns->done_pside4 = 1; + /* a negative-cache-element has no addresses it adds */ +- if(!delegpt_add_rrset_A(dp, region, akey, 1)) ++ if(!delegpt_add_rrset_A(dp, region, akey, 1, NULL)) + log_err("malloc failure in lookup_parent_glue"); + lock_rw_unlock(&akey->entry.lock); + } +@@ -1020,7 +1020,7 @@ int iter_lookup_parent_glue_from_cache(struct module_env* env, + log_rrset_key(VERB_ALGO, "found parent-side", akey); + ns->done_pside6 = 1; + /* a negative-cache-element has no addresses it adds */ +- if(!delegpt_add_rrset_AAAA(dp, region, akey, 1)) ++ if(!delegpt_add_rrset_AAAA(dp, region, akey, 1, NULL)) + log_err("malloc failure in lookup_parent_glue"); + lock_rw_unlock(&akey->entry.lock); + } +diff --git a/iterator/iterator.c b/iterator/iterator.c +index 58a9bff..a4ad319 100644 +--- a/iterator/iterator.c ++++ b/iterator/iterator.c +@@ -69,6 +69,8 @@ + #include "sldns/parseutil.h" + #include "sldns/sbuffer.h" + ++static void target_count_increase_nx(struct iter_qstate* iq, int num); ++ + int + iter_init(struct module_env* env, int id) + { +@@ -147,6 +149,7 @@ iter_new(struct module_qstate* qstate, int id) + iq->sent_count = 0; + iq->ratelimit_ok = 0; + iq->target_count = NULL; ++ iq->dp_target_count = 0; + iq->wait_priming_stub = 0; + iq->refetch_glue = 0; + iq->dnssec_expected = 0; +@@ -218,6 +221,7 @@ final_state(struct iter_qstate* iq) + static void + error_supers(struct module_qstate* qstate, int id, struct module_qstate* super) + { ++ struct iter_env* ie = (struct iter_env*)qstate->env->modinfo[id]; + struct iter_qstate* super_iq = (struct iter_qstate*)super->minfo[id]; + + if(qstate->qinfo.qtype == LDNS_RR_TYPE_A || +@@ -242,7 +246,11 @@ error_supers(struct module_qstate* qstate, int id, struct module_qstate* super) + super->region, super_iq->dp)) + log_err("out of memory adding missing"); + } ++ delegpt_mark_neg(dpns, qstate->qinfo.qtype); + dpns->resolved = 1; /* mark as failed */ ++ if((dpns->got4 == 2 || !ie->supports_ipv4) && ++ (dpns->got6 == 2 || !ie->supports_ipv6)) ++ target_count_increase_nx(super_iq, 1); + } + if(qstate->qinfo.qtype == LDNS_RR_TYPE_NS) { + /* prime failed to get delegation */ +@@ -577,7 +585,7 @@ static void + target_count_create(struct iter_qstate* iq) + { + if(!iq->target_count) { +- iq->target_count = (int*)calloc(2, sizeof(int)); ++ iq->target_count = (int*)calloc(3, sizeof(int)); + /* if calloc fails we simply do not track this number */ + if(iq->target_count) + iq->target_count[0] = 1; +@@ -590,6 +598,15 @@ target_count_increase(struct iter_qstate* iq, int num) + target_count_create(iq); + if(iq->target_count) + iq->target_count[1] += num; ++ iq->dp_target_count++; ++} ++ ++static void ++target_count_increase_nx(struct iter_qstate* iq, int num) ++{ ++ target_count_create(iq); ++ if(iq->target_count) ++ iq->target_count[2] += num; + } + + /** +@@ -612,13 +629,15 @@ target_count_increase(struct iter_qstate* iq, int num) + * @param subq_ret: if newly allocated, the subquerystate, or NULL if it does + * not need initialisation. + * @param v: if true, validation is done on the subquery. ++ * @param detached: true if this qstate should not attach to the subquery + * @return false on error (malloc). + */ + static int + generate_sub_request(uint8_t* qname, size_t qnamelen, uint16_t qtype, + uint16_t qclass, struct module_qstate* qstate, int id, + struct iter_qstate* iq, enum iter_state initial_state, +- enum iter_state finalstate, struct module_qstate** subq_ret, int v) ++ enum iter_state finalstate, struct module_qstate** subq_ret, int v, ++ int detached) + { + struct module_qstate* subq = NULL; + struct iter_qstate* subiq = NULL; +@@ -645,12 +664,24 @@ generate_sub_request(uint8_t* qname, size_t qnamelen, uint16_t qtype, + valrec = 1; + } + +- /* attach subquery, lookup existing or make a new one */ +- fptr_ok(fptr_whitelist_modenv_attach_sub(qstate->env->attach_sub)); +- if(!(*qstate->env->attach_sub)(qstate, &qinf, qflags, prime, valrec, +- &subq)) { +- return 0; +- } ++ if(detached) { ++ struct mesh_state* sub = NULL; ++ fptr_ok(fptr_whitelist_modenv_add_sub( ++ qstate->env->add_sub)); ++ if(!(*qstate->env->add_sub)(qstate, &qinf, ++ qflags, prime, valrec, &subq, &sub)){ ++ return 0; ++ } ++ } ++ else { ++ /* attach subquery, lookup existing or make a new one */ ++ fptr_ok(fptr_whitelist_modenv_attach_sub( ++ qstate->env->attach_sub)); ++ if(!(*qstate->env->attach_sub)(qstate, &qinf, qflags, prime, ++ valrec, &subq)) { ++ return 0; ++ } ++ } + *subq_ret = subq; + if(subq) { + /* initialise the new subquery */ +@@ -672,6 +703,7 @@ generate_sub_request(uint8_t* qname, size_t qnamelen, uint16_t qtype, + subiq->target_count = iq->target_count; + if(iq->target_count) + iq->target_count[0] ++; /* extra reference */ ++ subiq->dp_target_count = 0; + subiq->num_current_queries = 0; + subiq->depth = iq->depth+1; + outbound_list_init(&subiq->outlist); +@@ -715,7 +747,7 @@ prime_root(struct module_qstate* qstate, struct iter_qstate* iq, int id, + * the normal INIT state logic (which would cause an infloop). */ + if(!generate_sub_request((uint8_t*)"\000", 1, LDNS_RR_TYPE_NS, + qclass, qstate, id, iq, QUERYTARGETS_STATE, PRIME_RESP_STATE, +- &subq, 0)) { ++ &subq, 0, 0)) { + verbose(VERB_ALGO, "could not prime root"); + return 0; + } +@@ -805,7 +837,7 @@ prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id, + * redundant INIT state processing. */ + if(!generate_sub_request(stub_dp->name, stub_dp->namelen, + LDNS_RR_TYPE_NS, qclass, qstate, id, iq, +- QUERYTARGETS_STATE, PRIME_RESP_STATE, &subq, 0)) { ++ QUERYTARGETS_STATE, PRIME_RESP_STATE, &subq, 0, 0)) { + verbose(VERB_ALGO, "could not prime stub"); + (void)error_response(qstate, id, LDNS_RCODE_SERVFAIL); + return 1; /* return 1 to make module stop, with error */ +@@ -976,7 +1008,7 @@ generate_a_aaaa_check(struct module_qstate* qstate, struct iter_qstate* iq, + if(!generate_sub_request(s->rk.dname, s->rk.dname_len, + ntohs(s->rk.type), ntohs(s->rk.rrset_class), + qstate, id, iq, +- INIT_REQUEST_STATE, FINISHED_STATE, &subq, 1)) { ++ INIT_REQUEST_STATE, FINISHED_STATE, &subq, 1, 0)) { + verbose(VERB_ALGO, "could not generate addr check"); + return; + } +@@ -1020,7 +1052,7 @@ generate_ns_check(struct module_qstate* qstate, struct iter_qstate* iq, int id) + iq->dp->name, LDNS_RR_TYPE_NS, iq->qchase.qclass); + if(!generate_sub_request(iq->dp->name, iq->dp->namelen, + LDNS_RR_TYPE_NS, iq->qchase.qclass, qstate, id, iq, +- INIT_REQUEST_STATE, FINISHED_STATE, &subq, 1)) { ++ INIT_REQUEST_STATE, FINISHED_STATE, &subq, 1, 0)) { + verbose(VERB_ALGO, "could not generate ns check"); + return; + } +@@ -1077,7 +1109,7 @@ generate_dnskey_prefetch(struct module_qstate* qstate, + iq->dp->name, LDNS_RR_TYPE_DNSKEY, iq->qchase.qclass); + if(!generate_sub_request(iq->dp->name, iq->dp->namelen, + LDNS_RR_TYPE_DNSKEY, iq->qchase.qclass, qstate, id, iq, +- INIT_REQUEST_STATE, FINISHED_STATE, &subq, 0)) { ++ INIT_REQUEST_STATE, FINISHED_STATE, &subq, 0, 0)) { + /* we'll be slower, but it'll work */ + verbose(VERB_ALGO, "could not generate dnskey prefetch"); + return; +@@ -1251,6 +1283,7 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq, + iq->refetch_glue = 0; + iq->query_restart_count++; + iq->sent_count = 0; ++ iq->dp_target_count = 0; + sock_list_insert(&qstate->reply_origin, NULL, 0, qstate->region); + if(qstate->env->cfg->qname_minimisation) + iq->minimisation_state = INIT_MINIMISE_STATE; +@@ -1613,7 +1646,7 @@ generate_parentside_target_query(struct module_qstate* qstate, + { + struct module_qstate* subq; + if(!generate_sub_request(name, namelen, qtype, qclass, qstate, +- id, iq, INIT_REQUEST_STATE, FINISHED_STATE, &subq, 0)) ++ id, iq, INIT_REQUEST_STATE, FINISHED_STATE, &subq, 0, 0)) + return 0; + if(subq) { + struct iter_qstate* subiq = +@@ -1664,7 +1697,7 @@ generate_target_query(struct module_qstate* qstate, struct iter_qstate* iq, + { + struct module_qstate* subq; + if(!generate_sub_request(name, namelen, qtype, qclass, qstate, +- id, iq, INIT_REQUEST_STATE, FINISHED_STATE, &subq, 0)) ++ id, iq, INIT_REQUEST_STATE, FINISHED_STATE, &subq, 0, 0)) + return 0; + log_nametypeclass(VERB_QUERY, "new target", name, qtype, qclass); + return 1; +@@ -1703,6 +1736,14 @@ query_for_targets(struct module_qstate* qstate, struct iter_qstate* iq, + "number of glue fetches %d", s, iq->target_count[1]); + return 0; + } ++ if(iq->dp_target_count > MAX_DP_TARGET_COUNT) { ++ char s[LDNS_MAX_DOMAINLEN+1]; ++ dname_str(qstate->qinfo.qname, s); ++ verbose(VERB_QUERY, "request %s has exceeded the maximum " ++ "number of glue fetches %d to a single delegation point", ++ s, iq->dp_target_count); ++ return 0; ++ } + + iter_mark_cycle_targets(qstate, iq->dp); + missing = (int)delegpt_count_missing_targets(iq->dp); +@@ -1815,7 +1856,7 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq, + for(a = p->target_list; a; a=a->next_target) { + (void)delegpt_add_addr(iq->dp, qstate->region, + &a->addr, a->addrlen, a->bogus, +- a->lame, a->tls_auth_name); ++ a->lame, a->tls_auth_name, NULL); + } + } + iq->dp->has_parent_side_NS = 1; +@@ -1832,6 +1873,7 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq, + iq->refetch_glue = 1; + iq->query_restart_count++; + iq->sent_count = 0; ++ iq->dp_target_count = 0; + if(qstate->env->cfg->qname_minimisation) + iq->minimisation_state = INIT_MINIMISE_STATE; + return next_state(iq, INIT_REQUEST_STATE); +@@ -1986,7 +2028,7 @@ processDSNSFind(struct module_qstate* qstate, struct iter_qstate* iq, int id) + iq->dsns_point, LDNS_RR_TYPE_NS, iq->qchase.qclass); + if(!generate_sub_request(iq->dsns_point, iq->dsns_point_len, + LDNS_RR_TYPE_NS, iq->qchase.qclass, qstate, id, iq, +- INIT_REQUEST_STATE, FINISHED_STATE, &subq, 0)) { ++ INIT_REQUEST_STATE, FINISHED_STATE, &subq, 0, 0)) { + return error_response_cache(qstate, id, LDNS_RCODE_SERVFAIL); + } + +@@ -2039,7 +2081,14 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq, + "number of sends with %d", iq->sent_count); + return error_response(qstate, id, LDNS_RCODE_SERVFAIL); + } +- ++ if(iq->target_count && iq->target_count[2] > MAX_TARGET_NX) { ++ verbose(VERB_QUERY, "request has exceeded the maximum " ++ " number of nxdomain nameserver lookups with %d", ++ iq->target_count[2]); ++ errinf(qstate, "exceeded the maximum nameserver nxdomains"); ++ return error_response(qstate, id, LDNS_RCODE_SERVFAIL); ++ } ++ + /* Make sure we have a delegation point, otherwise priming failed + * or another failure occurred */ + if(!iq->dp) { +@@ -2139,12 +2188,41 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq, + iq->qinfo_out.qtype, iq->qinfo_out.qclass, + qstate->query_flags, qstate->region, + qstate->env->scratch, 0); +- if(msg && msg->rep->an_numrrsets == 0 +- && FLAGS_GET_RCODE(msg->rep->flags) == ++ if(msg && FLAGS_GET_RCODE(msg->rep->flags) == + LDNS_RCODE_NOERROR) + /* no need to send query if it is already +- * cached as NOERROR/NODATA */ ++ * cached as NOERROR */ + return 1; ++ if(msg && FLAGS_GET_RCODE(msg->rep->flags) == ++ LDNS_RCODE_NXDOMAIN && ++ qstate->env->need_to_validate && ++ qstate->env->cfg->harden_below_nxdomain) { ++ if(msg->rep->security == sec_status_secure) { ++ iq->response = msg; ++ return final_state(iq); ++ } ++ if(msg->rep->security == sec_status_unchecked) { ++ struct module_qstate* subq = NULL; ++ if(!generate_sub_request( ++ iq->qinfo_out.qname, ++ iq->qinfo_out.qname_len, ++ iq->qinfo_out.qtype, ++ iq->qinfo_out.qclass, ++ qstate, id, iq, ++ INIT_REQUEST_STATE, ++ FINISHED_STATE, &subq, 1, 1)) ++ verbose(VERB_ALGO, ++ "could not validate NXDOMAIN " ++ "response"); ++ } ++ } ++ if(msg && FLAGS_GET_RCODE(msg->rep->flags) == ++ LDNS_RCODE_NXDOMAIN) { ++ /* return and add a label in the next ++ * minimisation iteration. ++ */ ++ return 1; ++ } + } + } + if(iq->minimisation_state == SKIP_MINIMISE_STATE) { +@@ -2219,6 +2297,8 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq, + * generated query will immediately be discarded due to depth and + * that servfail is cached, which is not good as opportunism goes. */ + if(iq->depth < ie->max_dependency_depth ++ && iq->num_target_queries == 0 ++ && (!iq->target_count || iq->target_count[2]==0) + && iq->sent_count < TARGET_FETCH_STOP) { + tf_policy = ie->target_fetch_policy[iq->depth]; + } +@@ -2256,6 +2336,7 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq, + iq->num_current_queries++; /* RespState decrements it*/ + iq->referral_count++; /* make sure we don't loop */ + iq->sent_count = 0; ++ iq->dp_target_count = 0; + iq->state = QUERY_RESP_STATE; + return 1; + } +@@ -2341,6 +2422,7 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq, + iq->num_current_queries++; /* RespState decrements it*/ + iq->referral_count++; /* make sure we don't loop */ + iq->sent_count = 0; ++ iq->dp_target_count = 0; + iq->state = QUERY_RESP_STATE; + return 1; + } +@@ -2607,7 +2689,8 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq, + /* Make subrequest to validate intermediate + * NXDOMAIN if harden-below-nxdomain is + * enabled. */ +- if(qstate->env->cfg->harden_below_nxdomain) { ++ if(qstate->env->cfg->harden_below_nxdomain && ++ qstate->env->need_to_validate) { + struct module_qstate* subq = NULL; + log_query_info(VERB_QUERY, + "schedule NXDOMAIN validation:", +@@ -2619,7 +2702,7 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq, + iq->response->qinfo.qclass, + qstate, id, iq, + INIT_REQUEST_STATE, +- FINISHED_STATE, &subq, 1)) ++ FINISHED_STATE, &subq, 1, 1)) + verbose(VERB_ALGO, + "could not validate NXDOMAIN " + "response"); +@@ -2702,6 +2785,7 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq, + /* Count this as a referral. */ + iq->referral_count++; + iq->sent_count = 0; ++ iq->dp_target_count = 0; + /* see if the next dp is a trust anchor, or a DS was sent + * along, indicating dnssec is expected for next zone */ + iq->dnssec_expected = iter_indicates_dnssec(qstate->env, +@@ -2776,6 +2860,7 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq, + iq->dsns_point = NULL; + iq->auth_zone_response = 0; + iq->sent_count = 0; ++ iq->dp_target_count = 0; + if(iq->minimisation_state != MINIMISE_STATE) + /* Only count as query restart when it is not an extra + * query as result of qname minimisation. */ +@@ -2964,7 +3049,7 @@ processPrimeResponse(struct module_qstate* qstate, int id) + if(!generate_sub_request(qstate->qinfo.qname, + qstate->qinfo.qname_len, qstate->qinfo.qtype, + qstate->qinfo.qclass, qstate, id, iq, +- INIT_REQUEST_STATE, FINISHED_STATE, &subq, 1)) { ++ INIT_REQUEST_STATE, FINISHED_STATE, &subq, 1, 0)) { + verbose(VERB_ALGO, "could not generate prime check"); + } + generate_a_aaaa_check(qstate, iq, id); +@@ -2992,6 +3077,7 @@ static void + processTargetResponse(struct module_qstate* qstate, int id, + struct module_qstate* forq) + { ++ struct iter_env* ie = (struct iter_env*)qstate->env->modinfo[id]; + struct iter_qstate* iq = (struct iter_qstate*)qstate->minfo[id]; + struct iter_qstate* foriq = (struct iter_qstate*)forq->minfo[id]; + struct ub_packed_rrset_key* rrset; +@@ -3029,7 +3115,7 @@ processTargetResponse(struct module_qstate* qstate, int id, + log_rrset_key(VERB_ALGO, "add parentside glue to dp", + iq->pside_glue); + if(!delegpt_add_rrset(foriq->dp, forq->region, +- iq->pside_glue, 1)) ++ iq->pside_glue, 1, NULL)) + log_err("out of memory adding pside glue"); + } + +@@ -3040,6 +3126,7 @@ processTargetResponse(struct module_qstate* qstate, int id, + * response type was ANSWER. */ + rrset = reply_find_answer_rrset(&iq->qchase, qstate->return_msg->rep); + if(rrset) { ++ int additions = 0; + /* if CNAMEs have been followed - add new NS to delegpt. */ + /* BTW. RFC 1918 says NS should not have got CNAMEs. Robust. */ + if(!delegpt_find_ns(foriq->dp, rrset->rk.dname, +@@ -3051,13 +3138,23 @@ processTargetResponse(struct module_qstate* qstate, int id, + } + /* if dpns->lame then set the address(es) lame too */ + if(!delegpt_add_rrset(foriq->dp, forq->region, rrset, +- dpns->lame)) ++ dpns->lame, &additions)) + log_err("out of memory adding targets"); ++ if(!additions) { ++ /* no new addresses, increase the nxns counter, like ++ * this could be a list of wildcards with no new ++ * addresses */ ++ target_count_increase_nx(foriq, 1); ++ } + verbose(VERB_ALGO, "added target response"); + delegpt_log(VERB_ALGO, foriq->dp); + } else { + verbose(VERB_ALGO, "iterator TargetResponse failed"); ++ delegpt_mark_neg(dpns, qstate->qinfo.qtype); + dpns->resolved = 1; /* fail the target */ ++ if((dpns->got4 == 2 || !ie->supports_ipv4) && ++ (dpns->got6 == 2 || !ie->supports_ipv6)) ++ target_count_increase_nx(foriq, 1); + } + } + +@@ -3228,7 +3325,7 @@ processCollectClass(struct module_qstate* qstate, int id) + qstate->qinfo.qname_len, qstate->qinfo.qtype, + c, qstate, id, iq, INIT_REQUEST_STATE, + FINISHED_STATE, &subq, +- (int)!(qstate->query_flags&BIT_CD))) { ++ (int)!(qstate->query_flags&BIT_CD), 0)) { + return error_response(qstate, id, + LDNS_RCODE_SERVFAIL); + } +diff --git a/iterator/iterator.h b/iterator/iterator.h +index 67ffeb1..4b325b5 100644 +--- a/iterator/iterator.h ++++ b/iterator/iterator.h +@@ -55,6 +55,11 @@ struct rbtree_type; + + /** max number of targets spawned for a query and its subqueries */ + #define MAX_TARGET_COUNT 64 ++/** max number of target lookups per qstate, per delegation point */ ++#define MAX_DP_TARGET_COUNT 16 ++/** max number of nxdomains allowed for target lookups for a query and ++ * its subqueries */ ++#define MAX_TARGET_NX 5 + /** max number of query restarts. Determines max number of CNAME chain. */ + #define MAX_RESTART_COUNT 8 + /** max number of referrals. Makes sure resolver does not run away */ +@@ -305,9 +310,14 @@ struct iter_qstate { + int sent_count; + + /** number of target queries spawned in [1], for this query and its +- * subqueries, the malloced-array is shared, [0] refcount. */ ++ * subqueries, the malloced-array is shared, [0] refcount. ++ * in [2] the number of nxdomains is counted. */ + int* target_count; + ++ /** number of target lookups per delegation point. Reset to 0 after ++ * receiving referral answer. Not shared with subqueries. */ ++ int dp_target_count; ++ + /** if true, already tested for ratelimiting and passed the test */ + int ratelimit_ok; + +diff --git a/services/cache/dns.c b/services/cache/dns.c +index 35adc35..23ec68e 100644 +--- a/services/cache/dns.c ++++ b/services/cache/dns.c +@@ -271,7 +271,7 @@ find_add_addrs(struct module_env* env, uint16_t qclass, + akey = rrset_cache_lookup(env->rrset_cache, ns->name, + ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0); + if(akey) { +- if(!delegpt_add_rrset_A(dp, region, akey, 0)) { ++ if(!delegpt_add_rrset_A(dp, region, akey, 0, NULL)) { + lock_rw_unlock(&akey->entry.lock); + return 0; + } +@@ -291,7 +291,7 @@ find_add_addrs(struct module_env* env, uint16_t qclass, + akey = rrset_cache_lookup(env->rrset_cache, ns->name, + ns->namelen, LDNS_RR_TYPE_AAAA, qclass, 0, now, 0); + if(akey) { +- if(!delegpt_add_rrset_AAAA(dp, region, akey, 0)) { ++ if(!delegpt_add_rrset_AAAA(dp, region, akey, 0, NULL)) { + lock_rw_unlock(&akey->entry.lock); + return 0; + } +@@ -325,7 +325,8 @@ cache_fill_missing(struct module_env* env, uint16_t qclass, + akey = rrset_cache_lookup(env->rrset_cache, ns->name, + ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0); + if(akey) { +- if(!delegpt_add_rrset_A(dp, region, akey, ns->lame)) { ++ if(!delegpt_add_rrset_A(dp, region, akey, ns->lame, ++ NULL)) { + lock_rw_unlock(&akey->entry.lock); + return 0; + } +@@ -345,7 +346,8 @@ cache_fill_missing(struct module_env* env, uint16_t qclass, + akey = rrset_cache_lookup(env->rrset_cache, ns->name, + ns->namelen, LDNS_RR_TYPE_AAAA, qclass, 0, now, 0); + if(akey) { +- if(!delegpt_add_rrset_AAAA(dp, region, akey, ns->lame)) { ++ if(!delegpt_add_rrset_AAAA(dp, region, akey, ns->lame, ++ NULL)) { + lock_rw_unlock(&akey->entry.lock); + return 0; + } +diff --git a/util/data/dname.c b/util/data/dname.c +index c7360f7..b744f06 100644 +--- a/util/data/dname.c ++++ b/util/data/dname.c +@@ -231,17 +231,28 @@ int + dname_pkt_compare(sldns_buffer* pkt, uint8_t* d1, uint8_t* d2) + { + uint8_t len1, len2; ++ int count1 = 0, count2 = 0; + log_assert(pkt && d1 && d2); + len1 = *d1++; + len2 = *d2++; + while( len1 != 0 || len2 != 0 ) { + /* resolve ptrs */ + if(LABEL_IS_PTR(len1)) { ++ if((size_t)PTR_OFFSET(len1, *d1) ++ >= sldns_buffer_limit(pkt)) ++ return -1; ++ if(count1++ > MAX_COMPRESS_PTRS) ++ return -1; + d1 = sldns_buffer_at(pkt, PTR_OFFSET(len1, *d1)); + len1 = *d1++; + continue; + } + if(LABEL_IS_PTR(len2)) { ++ if((size_t)PTR_OFFSET(len2, *d2) ++ >= sldns_buffer_limit(pkt)) ++ return 1; ++ if(count2++ > MAX_COMPRESS_PTRS) ++ return 1; + d2 = sldns_buffer_at(pkt, PTR_OFFSET(len2, *d2)); + len2 = *d2++; + continue; +@@ -300,12 +311,19 @@ dname_pkt_hash(sldns_buffer* pkt, uint8_t* dname, hashvalue_type h) + uint8_t labuf[LDNS_MAX_LABELLEN+1]; + uint8_t lablen; + int i; ++ int count = 0; + + /* preserve case of query, make hash label by label */ + lablen = *dname++; + while(lablen) { + if(LABEL_IS_PTR(lablen)) { + /* follow pointer */ ++ if((size_t)PTR_OFFSET(lablen, *dname) ++ >= sldns_buffer_limit(pkt)) ++ return h; ++ if(count++ > MAX_COMPRESS_PTRS) ++ return h; ++ + dname = sldns_buffer_at(pkt, PTR_OFFSET(lablen, *dname)); + lablen = *dname++; + continue; +@@ -333,6 +351,9 @@ void dname_pkt_copy(sldns_buffer* pkt, uint8_t* to, uint8_t* dname) + while(lablen) { + if(LABEL_IS_PTR(lablen)) { + /* follow pointer */ ++ if((size_t)PTR_OFFSET(lablen, *dname) ++ >= sldns_buffer_limit(pkt)) ++ return; + dname = sldns_buffer_at(pkt, PTR_OFFSET(lablen, *dname)); + lablen = *dname++; + continue; +@@ -357,6 +378,7 @@ void dname_pkt_copy(sldns_buffer* pkt, uint8_t* to, uint8_t* dname) + void dname_print(FILE* out, struct sldns_buffer* pkt, uint8_t* dname) + { + uint8_t lablen; ++ int count = 0; + if(!out) out = stdout; + if(!dname) return; + +@@ -370,6 +392,15 @@ void dname_print(FILE* out, struct sldns_buffer* pkt, uint8_t* dname) + fputs("??compressionptr??", out); + return; + } ++ if((size_t)PTR_OFFSET(lablen, *dname) ++ >= sldns_buffer_limit(pkt)) { ++ fputs("??compressionptr??", out); ++ return; ++ } ++ if(count++ > MAX_COMPRESS_PTRS) { ++ fputs("??compressionptr??", out); ++ return; ++ } + dname = sldns_buffer_at(pkt, PTR_OFFSET(lablen, *dname)); + lablen = *dname++; + continue; +diff --git a/util/data/msgparse.c b/util/data/msgparse.c +index 13cad8a..c8a5384 100644 +--- a/util/data/msgparse.c ++++ b/util/data/msgparse.c +@@ -55,7 +55,11 @@ smart_compare(sldns_buffer* pkt, uint8_t* dnow, + { + if(LABEL_IS_PTR(*dnow)) { + /* ptr points to a previous dname */ +- uint8_t* p = sldns_buffer_at(pkt, PTR_OFFSET(dnow[0], dnow[1])); ++ uint8_t* p; ++ if((size_t)PTR_OFFSET(dnow[0], dnow[1]) ++ >= sldns_buffer_limit(pkt)) ++ return -1; ++ p = sldns_buffer_at(pkt, PTR_OFFSET(dnow[0], dnow[1])); + if( p == dprfirst || p == dprlast ) + return 0; + /* prev dname is also a ptr, both ptrs are the same. */ diff --git a/SOURCES/unbound-1.7.3-auth-callback.patch b/SOURCES/unbound-1.7.3-auth-callback.patch new file mode 100644 index 0000000..57a8922 --- /dev/null +++ b/SOURCES/unbound-1.7.3-auth-callback.patch @@ -0,0 +1,65 @@ +--- a/services/authzone.c 2018-06-14 09:09:01.000000000 +0200 ++++ b/services/authzone.c 2020-04-16 18:55:50.806693241 +0200 +@@ -5139,7 +5139,7 @@ + log_assert(xfr->task_transfer); + lock_basic_lock(&xfr->lock); + env = xfr->task_transfer->env; +- if(env->outnet->want_to_quit) { ++ if(!env || env->outnet->want_to_quit) { + lock_basic_unlock(&xfr->lock); + return; /* stop on quit */ + } +@@ -5558,7 +5558,7 @@ + log_assert(xfr->task_transfer); + lock_basic_lock(&xfr->lock); + env = xfr->task_transfer->env; +- if(env->outnet->want_to_quit) { ++ if(!env || env->outnet->want_to_quit) { + lock_basic_unlock(&xfr->lock); + return 0; /* stop on quit */ + } +@@ -5619,7 +5619,7 @@ + log_assert(xfr->task_transfer); + lock_basic_lock(&xfr->lock); + env = xfr->task_transfer->env; +- if(env->outnet->want_to_quit) { ++ if(!env || env->outnet->want_to_quit) { + lock_basic_unlock(&xfr->lock); + return 0; /* stop on quit */ + } +@@ -5798,7 +5798,7 @@ + log_assert(xfr->task_probe); + lock_basic_lock(&xfr->lock); + env = xfr->task_probe->env; +- if(env->outnet->want_to_quit) { ++ if(!env || env->outnet->want_to_quit) { + lock_basic_unlock(&xfr->lock); + return; /* stop on quit */ + } +@@ -5829,7 +5829,7 @@ + log_assert(xfr->task_probe); + lock_basic_lock(&xfr->lock); + env = xfr->task_probe->env; +- if(env->outnet->want_to_quit) { ++ if(!env || env->outnet->want_to_quit) { + lock_basic_unlock(&xfr->lock); + return 0; /* stop on quit */ + } +@@ -6030,7 +6030,7 @@ + log_assert(xfr->task_probe); + lock_basic_lock(&xfr->lock); + env = xfr->task_probe->env; +- if(env->outnet->want_to_quit) { ++ if(!env || env->outnet->want_to_quit) { + lock_basic_unlock(&xfr->lock); + return; /* stop on quit */ + } +@@ -6089,7 +6089,7 @@ + log_assert(xfr->task_nextprobe); + lock_basic_lock(&xfr->lock); + env = xfr->task_nextprobe->env; +- if(env->outnet->want_to_quit) { ++ if(!env || env->outnet->want_to_quit) { + lock_basic_unlock(&xfr->lock); + return; /* stop on quit */ + } diff --git a/SOURCES/unbound-1.7.3-ksk-2010-revoked.patch b/SOURCES/unbound-1.7.3-ksk-2010-revoked.patch new file mode 100644 index 0000000..a01109c --- /dev/null +++ b/SOURCES/unbound-1.7.3-ksk-2010-revoked.patch @@ -0,0 +1,14 @@ +diff --git a/smallapp/unbound-anchor.c b/smallapp/unbound-anchor.c +index 2bf5b3ab..a30523c7 100644 +--- a/smallapp/unbound-anchor.c ++++ b/smallapp/unbound-anchor.c +@@ -246,9 +246,7 @@ get_builtin_ds(void) + return + /* The anchors must start on a new line with ". IN DS and end with \n"[;] + * because the makedist script greps on the source here */ +-/* anchor 19036 is from 2010 */ + /* anchor 20326 is from 2017 */ +-". IN DS 19036 8 2 49AAC11D7B6F6446702E54A1607371607A1A41855200FD2CE1CDDE32F24E8FB5\n" + ". IN DS 20326 8 2 E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC683457104237C7F8EC8D\n"; + } + diff --git a/SOURCES/unbound.conf b/SOURCES/unbound.conf index 2de6b64..5efe0d0 100644 --- a/SOURCES/unbound.conf +++ b/SOURCES/unbound.conf @@ -334,7 +334,7 @@ server: # log-replies: no # the pid file. Can be an absolute path outside of chroot/work dir. - pidfile: "/var/run/unbound/unbound.pid" + pidfile: "/run/unbound/unbound.pid" # file to read root hints from. # get one from https://www.internic.net/domain/named.cache diff --git a/SPECS/unbound.spec b/SPECS/unbound.spec index 351185a..9a37167 100644 --- a/SPECS/unbound.spec +++ b/SPECS/unbound.spec @@ -34,7 +34,7 @@ Summary: Validating, recursive, and caching DNS(SEC) resolver Name: unbound Version: 1.7.3 -Release: 10%{?extra_version:.%{extra_version}}%{?dist} +Release: 14%{?extra_version:.%{extra_version}}%{?dist} License: BSD Url: https://www.unbound.net/ Source: https://www.unbound.net/downloads/%{name}-%{version}%{?extra_version}.tar.gz @@ -61,6 +61,10 @@ Patch4: unbound-1.7.3-anchor-fallback.patch Patch5: unbound-1.7.3-host-any.patch Patch6: unbound-1.7.3-use-basic-lock.patch Patch7: unbound-1.7.3-ipsec-hook.patch +Patch8: unbound-1.7.3-auth-callback.patch +Patch9: unbound-1.7.3-ksk-2010-revoked.patch +Patch10: unbound-1.7.3-DNS-over-TLS-memory-leak.patch +Patch11: unbound-1.7.3-amplifying-an-incoming-query.patch BuildRequires: gcc, make BuildRequires: flex, openssl-devel @@ -162,6 +166,10 @@ pushd %{pkgname} %patch5 -p1 -b .host-any %patch6 -p1 -b .use-basic-lock %patch7 -p1 -b .ipsec-hook +%patch8 -p1 -b .auth-callback +%patch9 -p1 -b .ksk-2010-revoked +%patch10 -p1 -b .DNS-over-TLS-memory-leak +%patch11 -p1 -b .amplifying-an-incoming-query # only for snapshots # autoreconf -iv @@ -433,6 +441,27 @@ popd %attr(0644,root,root) %config %{_sysconfdir}/%{name}/root.key %changelog +* Thu May 28 2020 Anna Khaitovich - 1.7.3-14 +- Fix unbound-1.7.3-amplifying-an-incoming-query.patch patch +- Resolves: rhbz#1839178 (CVE-2020-12662) + +* Mon May 25 2020 Anna Khaitovich - 1.7.3-13 +- Fix two previous patches and add missing patch lines to %%prep +- Fix amplifying an incoming query into a large number of queries directed to a target +- Resolves: rhbz#1839178 (CVE-2020-12662) + +* Tue Apr 21 2020 Anna Khaitovich - 1.7.3-12 +- Remove KSK-2010 from configuration files +- Resolves: rhbz#1665502 +- Replace legacy directory /var/run/ with /run +- Resolves: rhbz#1766463 +- Resolves: rhbz#1805978 +- Fix memory leak when DNS over TLS forwarding is configured +- Resolves: rhbz#1819870 + +* Thu Apr 16 2020 Artem Egorenkov - 1.7.3-11 +- Resolves bz1818761. unbound crash fixed. + * Tue Dec 10 2019 Tomas Korbar - 1.7.3-10 - Secure ipsec mode (#1772061) - CVE-2019-18934