diff --git a/SOURCES/bind-9.11-CVE-2023-2828.patch b/SOURCES/bind-9.11-CVE-2023-2828.patch
new file mode 100644
index 0000000..fa5f39d
--- /dev/null
+++ b/SOURCES/bind-9.11-CVE-2023-2828.patch
@@ -0,0 +1,154 @@
+diff -up bind-9.11.4-P2/lib/dns/rbtdb.c.orig bind-9.11.4-P2/lib/dns/rbtdb.c
+--- bind-9.11.4-P2/lib/dns/rbtdb.c.orig	2023-07-03 13:03:49.462352864 +0200
++++ bind-9.11.4-P2/lib/dns/rbtdb.c	2023-07-03 13:05:32.916227615 +0200
+@@ -793,7 +793,7 @@ static void update_header(dns_rbtdb_t *r
+ static void expire_header(dns_rbtdb_t *rbtdb, rdatasetheader_t *header,
+ 			  isc_boolean_t tree_locked, expire_t reason);
+ static void overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start,
+-			  isc_stdtime_t now, isc_boolean_t tree_locked);
++			  size_t purgesize, isc_boolean_t tree_locked);
+ static isc_result_t resign_insert(dns_rbtdb_t *rbtdb, int idx,
+ 				  rdatasetheader_t *newheader);
+ static void resign_delete(dns_rbtdb_t *rbtdb, rbtdb_version_t *version,
+@@ -6745,6 +6745,17 @@ addclosest(dns_rbtdb_t *rbtdb, rdataseth
+ 
+ static dns_dbmethods_t zone_methods;
+ 
++static size_t
++rdataset_size(rdatasetheader_t *header) {
++	if (!NONEXISTENT(header)) {
++		return (dns_rdataslab_size((unsigned char *)header,
++					   sizeof(*header)));
++	}
++
++	return (sizeof(*header));
++}
++
++
+ static isc_result_t
+ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
+ 	    isc_stdtime_t now, dns_rdataset_t *rdataset, unsigned int options,
+@@ -6885,7 +6896,8 @@ addrdataset(dns_db_t *db, dns_dbnode_t *
+ 	}
+ 
+ 	if (cache_is_overmem)
+-		overmem_purge(rbtdb, rbtnode->locknum, now, tree_locked);
++		overmem_purge(rbtdb, rbtnode->locknum, rdataset_size(newheader),
++				  tree_locked);
+ 
+ 	NODE_LOCK(&rbtdb->node_locks[rbtnode->locknum].lock,
+ 		  isc_rwlocktype_write);
+@@ -6900,10 +6912,14 @@ addrdataset(dns_db_t *db, dns_dbnode_t *
+ 			cleanup_dead_nodes(rbtdb, rbtnode->locknum);
+ 
+ 		header = isc_heap_element(rbtdb->heaps[rbtnode->locknum], 1);
+-		if (header && header->rdh_ttl < now - RBTDB_VIRTUAL)
+-			expire_header(rbtdb, header, tree_locked,
+-				      expire_ttl);
++		if (header != NULL) {
++			dns_ttl_t rdh_ttl = header->rdh_ttl;
+ 
++			if (rdh_ttl < now - RBTDB_VIRTUAL) {
++				expire_header(rbtdb, header, tree_locked,
++					      expire_ttl);
++			}
++		}
+ 		/*
+ 		 * If we've been holding a write lock on the tree just for
+ 		 * cleaning, we can release it now.  However, we still need the
+@@ -10339,54 +10355,58 @@ update_header(dns_rbtdb_t *rbtdb, rdatas
+ 	ISC_LIST_PREPEND(rbtdb->rdatasets[header->node->locknum], header, link);
+ }
+ 
++static size_t
++expire_lru_headers(dns_rbtdb_t *rbtdb, unsigned int locknum, size_t purgesize,
++		   isc_boolean_t tree_locked) {
++	rdatasetheader_t *header, *header_prev;
++	size_t purged = 0;
++
++	for (header = ISC_LIST_TAIL(rbtdb->rdatasets[locknum]);
++	     header != NULL && purged <= purgesize; header = header_prev)
++	{
++		header_prev = ISC_LIST_PREV(header, link);
++		/*
++		 * Unlink the entry at this point to avoid checking it
++		 * again even if it's currently used someone else and
++		 * cannot be purged at this moment.  This entry won't be
++		 * referenced any more (so unlinking is safe) since the
++		 * TTL was reset to 0.
++		 */
++		ISC_LIST_UNLINK(rbtdb->rdatasets[locknum], header, link);
++		size_t header_size = rdataset_size(header);
++		expire_header(rbtdb, header, tree_locked, expire_lru);
++		purged += header_size;
++	}
++
++	return (purged);
++}
++
+ /*%
+- * Purge some expired and/or stale (i.e. unused for some period) cache entries
+- * under an overmem condition.  To recover from this condition quickly, up to
+- * 2 entries will be purged.  This process is triggered while adding a new
+- * entry, and we specifically avoid purging entries in the same LRU bucket as
+- * the one to which the new entry will belong.  Otherwise, we might purge
+- * entries of the same name of different RR types while adding RRsets from a
+- * single response (consider the case where we're adding A and AAAA glue records
+- * of the same NS name).
++ * Purge some stale (i.e. unused for some period - LRU based cleaning) cache
++ * entries under the overmem condition.  To recover from this condition quickly,
++ * we cleanup entries up to the size of newly added rdata (passed as purgesize).
++ *
++ * This process is triggered while adding a new entry, and we specifically avoid
++ * purging entries in the same LRU bucket as the one to which the new entry will
++ * belong.  Otherwise, we might purge entries of the same name of different RR
++ * types while adding RRsets from a single response (consider the case where
++ * we're adding A and AAAA glue records of the same NS name).
+  */
+ static void
+-overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start,
+-	      isc_stdtime_t now, isc_boolean_t tree_locked)
++overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start, size_t purgesize,
++		 isc_boolean_t tree_locked)
+ {
+-	rdatasetheader_t *header, *header_prev;
+ 	unsigned int locknum;
+-	int purgecount = 2;
++	size_t purged = 0;
+ 
+ 	for (locknum = (locknum_start + 1) % rbtdb->node_lock_count;
+-	     locknum != locknum_start && purgecount > 0;
++	     locknum != locknum_start && purged <= purgesize;
+ 	     locknum = (locknum + 1) % rbtdb->node_lock_count) {
+ 		NODE_LOCK(&rbtdb->node_locks[locknum].lock,
+ 			  isc_rwlocktype_write);
+ 
+-		header = isc_heap_element(rbtdb->heaps[locknum], 1);
+-		if (header && header->rdh_ttl < now - RBTDB_VIRTUAL) {
+-			expire_header(rbtdb, header, tree_locked,
+-				      expire_ttl);
+-			purgecount--;
+-		}
+-
+-		for (header = ISC_LIST_TAIL(rbtdb->rdatasets[locknum]);
+-		     header != NULL && purgecount > 0;
+-		     header = header_prev) {
+-			header_prev = ISC_LIST_PREV(header, link);
+-			/*
+-			 * Unlink the entry at this point to avoid checking it
+-			 * again even if it's currently used someone else and
+-			 * cannot be purged at this moment.  This entry won't be
+-			 * referenced any more (so unlinking is safe) since the
+-			 * TTL was reset to 0.
+-			 */
+-			ISC_LIST_UNLINK(rbtdb->rdatasets[locknum], header,
+-					link);
+-			expire_header(rbtdb, header, tree_locked,
+-				      expire_lru);
+-			purgecount--;
+-		}
++		purged += expire_lru_headers(rbtdb, locknum, purgesize - purged,
++					     tree_locked);
+ 
+ 		NODE_UNLOCK(&rbtdb->node_locks[locknum].lock,
+ 				    isc_rwlocktype_write);
diff --git a/SPECS/bind.spec b/SPECS/bind.spec
index 7105344..4602cfd 100644
--- a/SPECS/bind.spec
+++ b/SPECS/bind.spec
@@ -64,7 +64,7 @@ Summary:  The Berkeley Internet Name Domain (BIND) DNS (Domain Name System) serv
 Name:     bind
 License:  MPLv2.0
 Version:  9.11.4
-Release:  26%{?PATCHVER:.%{PATCHVER}}%{?PREVER:.%{PREVER}}%{?dist}.13
+Release:  26%{?PATCHVER:.%{PATCHVER}}%{?PREVER:.%{PREVER}}%{?dist}.14
 Epoch:    32
 Url:      http://www.isc.org/products/BIND/
 #
@@ -188,6 +188,7 @@ Patch201: bind-9.16-CVE-2022-38178.patch
 Patch202: bind-9.11-CVE-2022-2795.patch
 Patch203: bind-9.11-CVE-2021-25220-test.patch
 Patch204: bind-9.11-CVE-2021-25220.patch
+Patch205: bind-9.11-CVE-2023-2828.patch
 
 # SDB patches
 Patch11: bind-9.3.2b2-sdbsrc.patch
@@ -568,6 +569,7 @@ are used for building ISC DHCP.
 %patch202 -p1 -b .CVE-2022-2795
 %patch203 -p1 -b .CVE-2021-25220-test
 %patch204 -p1 -b .CVE-2021-25220
+%patch205 -p1 -b .CVE-2023-2828
 
 # Override upstream builtin keys
 cp -fp %{SOURCE29} bind.keys
@@ -1549,6 +1551,9 @@ rm -rf ${RPM_BUILD_ROOT}
 
 
 %changelog
+* Mon Jul 03 2023 Stepan Broz <sbroz@redhat.com> - 2:9.11.4-26.P2.14
+- Prevent the cache going over the configured limit (CVE-2023-2828)
+
 * Wed Dec 14 2022 Petr Menšík <pemensik@redhat.com> - 32:9.11.4-26.P2.13
 - Tighten cache protection against record from forwarders (CVE-2021-25220)