diff --git a/SOURCES/0016-Ticket-50542-Entry-cache-contention-during-base-sear.patch b/SOURCES/0016-Ticket-50542-Entry-cache-contention-during-base-sear.patch
new file mode 100644
index 0000000..4aa0dce
--- /dev/null
+++ b/SOURCES/0016-Ticket-50542-Entry-cache-contention-during-base-sear.patch
@@ -0,0 +1,327 @@
+From 03453b7cb4d03691d47ccf5d82d92fe0572ec244 Mon Sep 17 00:00:00 2001
+From: Thierry Bordaz <tbordaz@redhat.com>
+Date: Thu, 8 Aug 2019 12:05:00 +0200
+Subject: [PATCH] Ticket 50542 - Entry cache contention during base search
+
+Bug Description:
+	During a base search the entry cache lock is acquired to retrieve the target entry.
+	Later when the candidate list is built, the entry cache lock is also acquired
+	to retrieve the candidate that is actually the target entry itself
+
+	So for a base search the entry cache lock is accessed 4 times (2 acquires + 2 releases)
+
+	It is very easy to create a huge contention (e.g. dereferencing large group) increasing
+	etime
+
+Fix Description:
+	The idea is to acquire the entry, from the entry cache (with refcnt++) when searching the base
+	search. Then instead of returning the entry (refcnt--) the entry is kept in the operation until
+	the operation completes. If later we need the entry (to send it back to the client), the entry is
+	picked up from the operation not from the entry cache lookup
+
+https://pagure.io/389-ds-base/issue/50542
+
+Reviewed by: Ludwig Krispenz, William Brown
+
+Platforms tested: F29
+
+Flag Day: no
+
+Doc impact: no
+---
+ ldap/servers/slapd/back-ldbm/ldbm_search.c | 45 +++++++++++++++++++---
+ ldap/servers/slapd/operation.c             | 32 +++++++++++++++
+ ldap/servers/slapd/opshared.c              | 36 ++++++++++++++++-
+ ldap/servers/slapd/proto-slap.h            |  4 ++
+ ldap/servers/slapd/slap.h                  |  9 +++++
+ 5 files changed, 118 insertions(+), 8 deletions(-)
+
+diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
+index 8f3111813..c8f5719e1 100644
+--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
++++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
+@@ -551,6 +551,13 @@ ldbm_back_search(Slapi_PBlock *pb)
+                                             LDBM_SRCH_DEFAULT_RESULT, NULL, 1, &vlv_request_control, NULL, candidates);
+         }
+     }
++    /* We have the base search entry and a callback to "cache_return" it.
++     * Keep it into the operation to avoid additional cache fetch/return
++     */
++    if (e && be->be_entry_release) {
++        operation_set_target_entry(operation, (void *) e);
++        operation_set_target_entry_id(operation, e->ep_id);
++    }
+ 
+     /*
+      * If this is a persistent search then the client is only
+@@ -807,7 +814,6 @@ ldbm_back_search(Slapi_PBlock *pb)
+         }
+     }
+ 
+-    CACHE_RETURN(&inst->inst_cache, &e);
+ 
+     /*
+      * if the candidate list is an allids list, arrange for access log
+@@ -1345,6 +1351,27 @@ ldbm_back_next_search_entry(Slapi_PBlock *pb)
+     return ldbm_back_next_search_entry_ext(pb, 0);
+ }
+ 
++/* The reference on the target_entry (base search) is stored in the operation
++ * This is to prevent additional cache find/return that require cache lock.
++ *
++ * The target entry is acquired during be->be_search (when building the candidate list).
++ * and is returned once the operation completes (or fail).
++ *
++ * The others entries sent back to the client have been acquired/returned during send_results_ext.
++ * If the target entry is sent back to the client it is not returned (refcnt--) during the send_results_ext.
++ *
++ * This function returns(refcnt-- in the entry cache) the entry unless it is
++ * the target_entry (base search). target_entry will be return once the operation
++ * completes
++ */
++static void
++non_target_cache_return(Slapi_Operation *op, struct cache *cache, struct backentry **e)
++{
++    if (e && (*e != operation_get_target_entry(op))) {
++        CACHE_RETURN(cache, e);
++    }
++}
++
+ int
+ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
+ {
+@@ -1447,7 +1474,7 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
+     /* If we are using the extension, the front end will tell
+      * us when to do this so we don't do it now */
+     if (sr->sr_entry && !use_extension) {
+-        CACHE_RETURN(&inst->inst_cache, &(sr->sr_entry));
++        non_target_cache_return(op, &inst->inst_cache, &(sr->sr_entry));
+         sr->sr_entry = NULL;
+     }
+ 
+@@ -1559,7 +1586,13 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
+         }
+ 
+         /* get the entry */
+-        e = id2entry(be, id, &txn, &err);
++        e = operation_get_target_entry(op);
++        if ((e == NULL) || (id != operation_get_target_entry_id(op))) {
++            /* if the entry is not the target_entry (base search)
++             * we need to fetch it from the entry cache (it was not
++             * referenced in the operation) */
++            e = id2entry(be, id, &txn, &err);
++        }
+         if (e == NULL) {
+             if (err != 0 && err != DB_NOTFOUND) {
+                 slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_next_search_entry_ext", "next_search_entry db err %d\n",
+@@ -1679,7 +1712,7 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
+                     /* check size limit */
+                     if (slimit >= 0) {
+                         if (--slimit < 0) {
+-                            CACHE_RETURN(&inst->inst_cache, &e);
++                            non_target_cache_return(op, &inst->inst_cache, &e);
+                             slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET_SIZE_ESTIMATE, &estimate);
+                             delete_search_result_set(pb, &sr);
+                             slapi_send_ldap_result(pb, LDAP_SIZELIMIT_EXCEEDED, NULL, NULL, nentries, urls);
+@@ -1717,12 +1750,12 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
+                     rc = 0;
+                     goto bail;
+                 } else {
+-                    CACHE_RETURN(&inst->inst_cache, &(sr->sr_entry));
++                    non_target_cache_return(op, &inst->inst_cache, &(sr->sr_entry));
+                     sr->sr_entry = NULL;
+                 }
+             } else {
+                 /* Failed the filter test, and this isn't a VLV Search */
+-                CACHE_RETURN(&inst->inst_cache, &(sr->sr_entry));
++                non_target_cache_return(op, &inst->inst_cache, &(sr->sr_entry));
+                 sr->sr_entry = NULL;
+                 if (LDAP_UNWILLING_TO_PERFORM == filter_test) {
+                     /* Need to catch this error to detect the vattr loop */
+diff --git a/ldap/servers/slapd/operation.c b/ldap/servers/slapd/operation.c
+index 4a05e0a49..8186fd33b 100644
+--- a/ldap/servers/slapd/operation.c
++++ b/ldap/servers/slapd/operation.c
+@@ -354,6 +354,38 @@ operation_is_flag_set(Slapi_Operation *op, int flag)
+     return op->o_flags & flag;
+ }
+ 
++void *
++operation_get_target_entry(Slapi_Operation *op)
++{
++    PR_ASSERT(op);
++
++    return op->o_target_entry;
++}
++
++void
++operation_set_target_entry(Slapi_Operation *op, void *target_entry)
++{
++    PR_ASSERT(op);
++
++    op->o_target_entry = target_entry;
++}
++
++u_int32_t
++operation_get_target_entry_id(Slapi_Operation *op)
++{
++    PR_ASSERT(op);
++
++    return op->o_target_entry_id;
++}
++
++void
++operation_set_target_entry_id(Slapi_Operation *op, u_int32_t target_entry_id)
++{
++    PR_ASSERT(op);
++
++    op->o_target_entry_id = target_entry_id;
++}
++
+ Slapi_DN *
+ operation_get_target_spec(Slapi_Operation *op)
+ {
+diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
+index cf6cdff01..b9fc83516 100644
+--- a/ldap/servers/slapd/opshared.c
++++ b/ldap/servers/slapd/opshared.c
+@@ -193,6 +193,28 @@ slapi_attr_is_last_mod(char *attr)
+     return 0;
+ }
+ 
++/* The reference on the target_entry (base search) is stored in the operation
++ * This is to prevent additional cache find/return that require cache lock.
++ *
++ * The target entry is acquired during be->be_search (when building the candidate list).
++ * and is returned once the operation completes (or fail).
++ *
++ * The others entries sent back to the client have been acquired/returned during send_results_ext.
++ * If the target entry is sent back to the client it is not returned (refcnt--) during the send_results_ext.
++ *
++ * This function only returns (refcnt-- in the entry cache) the target_entry (base search).
++ * It is called at the operation level (op_shared_search)
++ *
++ */
++static void
++cache_return_target_entry(Slapi_PBlock *pb, Slapi_Backend *be, Slapi_Operation *operation)
++{
++    if (operation_get_target_entry(operation) && be->be_entry_release) {
++        (*be->be_entry_release)(pb, operation_get_target_entry(operation));
++        operation_set_target_entry(operation, NULL);
++        operation_set_target_entry_id(operation, 0);
++    }
++}
+ /*
+  * Returns: 0    - if the operation is successful
+  *        < 0    - if operation fails.
+@@ -252,6 +274,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
+     /* get search parameters */
+     slapi_pblock_get(pb, SLAPI_ORIGINAL_TARGET_DN, &base);
+     slapi_pblock_get(pb, SLAPI_SEARCH_TARGET_SDN, &sdn);
++    slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
+ 
+     if (NULL == sdn) {
+         sdn = slapi_sdn_new_dn_byval(base);
+@@ -276,7 +299,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
+     slapi_pblock_get(pb, SLAPI_SEARCH_SCOPE, &scope);
+     slapi_pblock_get(pb, SLAPI_SEARCH_STRFILTER, &fstr);
+     slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &attrs);
+-    slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
++
+     if (operation == NULL) {
+         op_shared_log_error_access(pb, "SRCH", base, "NULL operation");
+         send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, "NULL operation", 0, NULL);
+@@ -808,6 +831,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
+              * the error has already been sent
+              * stop the search here
+              */
++                    cache_return_target_entry(pb, be, operation);
+                     goto free_and_return;
+                 }
+ 
+@@ -815,6 +839,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
+ 
+             case SLAPI_FAIL_DISKFULL:
+                 operation_out_of_disk_space();
++                cache_return_target_entry(pb, be, operation);
+                 goto free_and_return;
+ 
+             case 0: /* search was successful and we need to send the result */
+@@ -840,6 +865,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
+                             /* no more entries && no more backends */
+                             curr_search_count = -1;
+                         } else if (rc < 0) {
++                            cache_return_target_entry(pb, be, operation);
+                             goto free_and_return;
+                         }
+                     } else {
+@@ -852,6 +878,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
+                             (pagedresults_set_search_result_set_size_estimate(pb_conn, operation, estimate, pr_idx) < 0) ||
+                             (pagedresults_set_with_sort(pb_conn, operation, with_sort, pr_idx) < 0)) {
+                             pagedresults_unlock(pb_conn, pr_idx);
++                            cache_return_target_entry(pb, be, operation);
+                             goto free_and_return;
+                         }
+                         pagedresults_unlock(pb_conn, pr_idx);
+@@ -867,6 +894,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
+                         pagedresults_set_response_control(pb, 0, estimate, -1, pr_idx);
+                         send_ldap_result(pb, 0, NULL, "Simple Paged Results Search abandoned", 0, NULL);
+                         rc = LDAP_SUCCESS;
++                        cache_return_target_entry(pb, be, operation);
+                         goto free_and_return;
+                     }
+                     pagedresults_set_response_control(pb, 0, estimate, curr_search_count, pr_idx);
+@@ -880,10 +908,14 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
+          * LDAP error should already have been sent to the client
+          * stop the search, free and return
+          */
+-                if (rc != 0)
++                if (rc != 0) {
++                    cache_return_target_entry(pb, be, operation);
+                     goto free_and_return;
++                }
+                 break;
+             }
++            /* cache return the target_entry */
++            cache_return_target_entry(pb, be, operation);
+         }
+ 
+         nentries += pnentries;
+diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
+index e37f702ea..d9fb8fd08 100644
+--- a/ldap/servers/slapd/proto-slap.h
++++ b/ldap/servers/slapd/proto-slap.h
+@@ -873,6 +873,10 @@ void operation_free(Slapi_Operation **op, Connection *conn);
+ int slapi_op_abandoned(Slapi_PBlock *pb);
+ void operation_out_of_disk_space(void);
+ int get_operation_object_type(void);
++void *operation_get_target_entry(Slapi_Operation *op);
++void operation_set_target_entry(Slapi_Operation *op, void *target_void);
++u_int32_t operation_get_target_entry_id(Slapi_Operation *op);
++void operation_set_target_entry_id(Slapi_Operation *op, u_int32_t target_entry_id);
+ Slapi_DN *operation_get_target_spec(Slapi_Operation *op);
+ void operation_set_target_spec(Slapi_Operation *op, const Slapi_DN *target_spec);
+ void operation_set_target_spec_str(Slapi_Operation *op, const char *target_spec);
+diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
+index bce720974..a8908d94c 100644
+--- a/ldap/servers/slapd/slap.h
++++ b/ldap/servers/slapd/slap.h
+@@ -1545,6 +1545,15 @@ typedef struct op
+     unsigned long o_flags;                                     /* flags for this operation      */
+     void *o_extension;                                         /* plugins are able to extend the Operation object */
+     Slapi_DN *o_target_spec;                                   /* used to decide which plugins should be called for the operation */
++    void *o_target_entry;                                      /* Only used for SEARCH operation
++                                                                * reference of search target entry (base search) in the entry cache
++                                                                * When it is set the refcnt (of the entry in the entry cache) as been increased
++                                                                */
++    u_int32_t o_target_entry_id;                               /* Only used for SEARCH operation
++                                                                * contains the ID of the o_target_entry. In send_result we have ID of the candidates, this
++                                                                * accelerates the tests as we have not to retrieve for each candidate the
++                                                                * ep_id inside the o_target_entry.
++                                                                */
+     unsigned long o_abandoned_op;                              /* operation abandoned by this operation - used to decide which plugins to invoke */
+     struct slapi_operation_parameters o_params;
+     struct slapi_operation_results o_results;
+-- 
+2.24.1
+
diff --git a/SOURCES/0017-Issue-50834-Incorrectly-setting-the-NSS-default-SSL-.patch b/SOURCES/0017-Issue-50834-Incorrectly-setting-the-NSS-default-SSL-.patch
new file mode 100644
index 0000000..aa439a6
--- /dev/null
+++ b/SOURCES/0017-Issue-50834-Incorrectly-setting-the-NSS-default-SSL-.patch
@@ -0,0 +1,35 @@
+From a1c4b869645eca6bf81e1b7bc116bbb0de389197 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Mon, 20 Jan 2020 13:16:36 -0500
+Subject: [PATCH] Issue 50834 - Incorrectly setting the NSS default SSL version
+ max
+
+Description:  We've been using the wrong function to get the NSS max
+              version We were calling SSL_VersionRangeGetSupported()
+              which gets the versions NSS "can" handle, but
+              SSL_VersionRangeGetDefault() gets the versions that
+              are actually "enabled".
+
+relates: https://pagure.io/389-ds-base/issue/50834
+
+Reviewed by: mreynolds(one line commit rule)
+---
+ ldap/servers/slapd/ssl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c
+index ed054db44..c71e3019b 100644
+--- a/ldap/servers/slapd/ssl.c
++++ b/ldap/servers/slapd/ssl.c
+@@ -1164,7 +1164,7 @@ slapd_nss_init(int init_ssl __attribute__((unused)), int config_available __attr
+     char *certdir;
+     char emin[VERSION_STR_LENGTH], emax[VERSION_STR_LENGTH];
+     /* Get the range of the supported SSL version */
+-    SSL_VersionRangeGetSupported(ssl_variant_stream, &enabledNSSVersions);
++    SSL_VersionRangeGetDefault(ssl_variant_stream, &enabledNSSVersions);
+ 
+     (void)slapi_getSSLVersion_str(enabledNSSVersions.min, emin, sizeof(emin));
+     (void)slapi_getSSLVersion_str(enabledNSSVersions.max, emax, sizeof(emax));
+-- 
+2.24.1
+
diff --git a/SOURCES/0018-Ticket-50736-RetroCL-trimming-may-crash-at-shutdown-.patch b/SOURCES/0018-Ticket-50736-RetroCL-trimming-may-crash-at-shutdown-.patch
new file mode 100644
index 0000000..38ec626
--- /dev/null
+++ b/SOURCES/0018-Ticket-50736-RetroCL-trimming-may-crash-at-shutdown-.patch
@@ -0,0 +1,263 @@
+From 5497023c6857ae0fcf046e57744a91107d362c41 Mon Sep 17 00:00:00 2001
+From: Thierry Bordaz <tbordaz@redhat.com>
+Date: Mon, 25 Nov 2019 10:59:44 +0100
+Subject: [PATCH] Ticket 50736 - RetroCL trimming may crash at shutdown if
+ trimming configuration is invalid
+
+Bug Description:
+	If config of retroCL trimming contains invalid value for trim-interval
+        and/or maxage, then the trimming initialization is skipped.
+        In such case the trimming structures are not allocated and if they
+        are freed at shutdown it triggers a crash
+
+Fix Description:
+        When trimming mechanism is stopped (at shutdown) check that
+        it was successfully initialized before freeing the structs
+
+https://pagure.io/389-ds-base/issue/50736
+
+Reviewed by: Mark Reynolds
+
+Platforms tested: F30
+
+Flag Day: no
+
+Doc impact: no
+---
+ .../suites/replication/changelog_test.py      | 185 ++++++++++++++++++
+ ldap/servers/plugins/retrocl/retrocl_trim.c   |  17 +-
+ 2 files changed, 196 insertions(+), 6 deletions(-)
+
+diff --git a/dirsrvtests/tests/suites/replication/changelog_test.py b/dirsrvtests/tests/suites/replication/changelog_test.py
+index 0b6b886f3..0d3e85bb2 100755
+--- a/dirsrvtests/tests/suites/replication/changelog_test.py
++++ b/dirsrvtests/tests/suites/replication/changelog_test.py
+@@ -16,6 +16,12 @@ from lib389.replica import Replicas
+ from lib389.idm.user import UserAccounts
+ from lib389.topologies import topology_m2 as topo
+ from lib389._constants import *
++from lib389.plugins import RetroChangelogPlugin
++from lib389.dseldif import DSEldif
++from lib389.tasks import *
++from lib389.utils import *
++
++pytestmark = pytest.mark.tier1
+ 
+ TEST_ENTRY_NAME = 'replusr'
+ NEW_RDN_NAME = 'cl5usr'
+@@ -235,6 +241,185 @@ def test_verify_changelog_offline_backup(topo):
+     _check_changelog_ldif(topo, changelog_ldif)
+ 
+ 
++@pytest.mark.ds47669
++def test_changelog_maxage(topo, changelog_init):
++    """Check nsslapd-changelog max age values
++
++    :id: d284ff27-03b2-412c-ac74-ac4f2d2fae3b
++    :setup: Replication with two master, change nsslapd-changelogdir to
++    '/var/lib/dirsrv/slapd-master1/changelog' and
++    set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on'
++    :steps:
++        1. Set nsslapd-changelogmaxage in cn=changelog5,cn=config to values - '12345','10s','30M','12h','2D','4w'
++        2. Set nsslapd-changelogmaxage in cn=changelog5,cn=config to values - '-123','xyz'
++
++    :expectedresults:
++        1. Operation should be successful
++        2. Operation should be unsuccessful
++     """
++    log.info('1. Test nsslapd-changelogmaxage in cn=changelog5,cn=config')
++
++    # bind as directory manager
++    topo.ms["master1"].log.info("Bind as %s" % DN_DM)
++    topo.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
++
++    add_and_check(topo, CHANGELOG, MAXAGE, '12345', True)
++    add_and_check(topo, CHANGELOG, MAXAGE, '10s', True)
++    add_and_check(topo, CHANGELOG, MAXAGE, '30M', True)
++    add_and_check(topo, CHANGELOG, MAXAGE, '12h', True)
++    add_and_check(topo, CHANGELOG, MAXAGE, '2D', True)
++    add_and_check(topo, CHANGELOG, MAXAGE, '4w', True)
++    add_and_check(topo, CHANGELOG, MAXAGE, '-123', False)
++    add_and_check(topo, CHANGELOG, MAXAGE, 'xyz', False)
++
++
++@pytest.mark.ds47669
++def test_ticket47669_changelog_triminterval(topo, changelog_init):
++    """Check nsslapd-changelog triminterval values
++
++    :id: 8f850c37-7e7c-49dd-a4e0-9344638616d6
++    :setup: Replication with two master, change nsslapd-changelogdir to
++    '/var/lib/dirsrv/slapd-master1/changelog' and
++    set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on'
++    :steps:
++        1. Set nsslapd-changelogtrim-interval in cn=changelog5,cn=config to values -
++           '12345','10s','30M','12h','2D','4w'
++        2. Set nsslapd-changelogtrim-interval in cn=changelog5,cn=config to values - '-123','xyz'
++
++    :expectedresults:
++        1. Operation should be successful
++        2. Operation should be unsuccessful
++     """
++    log.info('2. Test nsslapd-changelogtrim-interval in cn=changelog5,cn=config')
++
++    # bind as directory manager
++    topo.ms["master1"].log.info("Bind as %s" % DN_DM)
++    topo.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
++
++    add_and_check(topo, CHANGELOG, TRIMINTERVAL, '12345', True)
++    add_and_check(topo, CHANGELOG, TRIMINTERVAL, '10s', True)
++    add_and_check(topo, CHANGELOG, TRIMINTERVAL, '30M', True)
++    add_and_check(topo, CHANGELOG, TRIMINTERVAL, '12h', True)
++    add_and_check(topo, CHANGELOG, TRIMINTERVAL, '2D', True)
++    add_and_check(topo, CHANGELOG, TRIMINTERVAL, '4w', True)
++    add_and_check(topo, CHANGELOG, TRIMINTERVAL, '-123', False)
++    add_and_check(topo, CHANGELOG, TRIMINTERVAL, 'xyz', False)
++
++
++@pytest.mark.ds47669
++def test_changelog_compactdbinterval(topo, changelog_init):
++    """Check nsslapd-changelog compactdbinterval values
++
++    :id: 0f4b3118-9dfa-4c2a-945c-72847b42a48c
++    :setup: Replication with two master, change nsslapd-changelogdir to
++    '/var/lib/dirsrv/slapd-master1/changelog' and
++    set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on'
++    :steps:
++        1. Set nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config to values -
++           '12345','10s','30M','12h','2D','4w'
++        2. Set nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config to values -
++           '-123','xyz'
++
++    :expectedresults:
++        1. Operation should be successful
++        2. Operation should be unsuccessful
++     """
++    log.info('3. Test nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config')
++
++    # bind as directory manager
++    topo.ms["master1"].log.info("Bind as %s" % DN_DM)
++    topo.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
++
++    add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '12345', True)
++    add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '10s', True)
++    add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '30M', True)
++    add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '12h', True)
++    add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '2D', True)
++    add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '4w', True)
++    add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '-123', False)
++    add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, 'xyz', False)
++
++
++@pytest.mark.ds47669
++def test_retrochangelog_maxage(topo, changelog_init):
++    """Check nsslapd-retrochangelog max age values
++
++    :id: 0cb84d81-3e86-4dbf-84a2-66aefd8281db
++    :setup: Replication with two master, change nsslapd-changelogdir to
++    '/var/lib/dirsrv/slapd-master1/changelog' and
++    set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on'
++    :steps:
++        1. Set nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config to values -
++           '12345','10s','30M','12h','2D','4w'
++        2. Set nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config to values -
++           '-123','xyz'
++
++    :expectedresults:
++        1. Operation should be successful
++        2. Operation should be unsuccessful
++     """
++    log.info('4. Test nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config')
++
++    # bind as directory manager
++    topo.ms["master1"].log.info("Bind as %s" % DN_DM)
++    topo.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
++
++    add_and_check(topo, RETROCHANGELOG, MAXAGE, '12345', True)
++    add_and_check(topo, RETROCHANGELOG, MAXAGE, '10s', True)
++    add_and_check(topo, RETROCHANGELOG, MAXAGE, '30M', True)
++    add_and_check(topo, RETROCHANGELOG, MAXAGE, '12h', True)
++    add_and_check(topo, RETROCHANGELOG, MAXAGE, '2D', True)
++    add_and_check(topo, RETROCHANGELOG, MAXAGE, '4w', True)
++    add_and_check(topo, RETROCHANGELOG, MAXAGE, '-123', False)
++    add_and_check(topo, RETROCHANGELOG, MAXAGE, 'xyz', False)
++
++    topo.ms["master1"].log.info("ticket47669 was successfully verified.")
++
++@pytest.mark.ds50736
++def test_retrochangelog_trimming_crash(topo, changelog_init):
++    """Check that when retroCL nsslapd-retrocthangelog contains invalid
++    value, then the instance does not crash at shutdown
++
++    :id: 5d9bd7ca-e9bf-4be9-8fc8-902aa5513052
++    :setup: Replication with two master, change nsslapd-changelogdir to
++    '/var/lib/dirsrv/slapd-master1/changelog' and
++    set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on'
++    :steps:
++        1. Set nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config to value '-1'
++           This value is invalid. To disable retroCL trimming it should be set to 0
++        2. Do several restart
++        3. check there is no 'Detected Disorderly Shutdown' message (crash)
++        4. restore valid value for nsslapd-changelogmaxage '1w'
++
++    :expectedresults:
++        1. Operation should be successful
++        2. Operation should be successful
++        3. Operation should be successful
++        4. Operation should be successful
++     """
++    log.info('1. Test retroCL trimming crash in cn=Retro Changelog Plugin,cn=plugins,cn=config')
++
++    # set the nsslapd-changelogmaxage directly on dse.ldif
++    # because the set value is invalid
++    topo.ms["master1"].log.info("ticket50736 start verification")
++    topo.ms["master1"].stop()
++    retroPlugin = RetroChangelogPlugin(topo.ms["master1"])
++    dse_ldif = DSEldif(topo.ms["master1"])
++    dse_ldif.replace(retroPlugin.dn, 'nsslapd-changelogmaxage', '-1')
++    topo.ms["master1"].start()
++
++    # The crash should be systematic, but just in case do several restart
++    # with a delay to let all plugin init
++    for i in range(5):
++        time.sleep(1)
++        topo.ms["master1"].stop()
++        topo.ms["master1"].start()
++
++    assert not topo.ms["master1"].detectDisorderlyShutdown()
++
++    topo.ms["master1"].log.info("ticket 50736 was successfully verified.")
++
++
+ if __name__ == '__main__':
+     # Run isolated
+     # -s for DEBUG mode
+diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c
+index a46534984..0378eb7f6 100644
+--- a/ldap/servers/plugins/retrocl/retrocl_trim.c
++++ b/ldap/servers/plugins/retrocl/retrocl_trim.c
+@@ -481,11 +481,16 @@ retrocl_init_trimming(void)
+ void
+ retrocl_stop_trimming(void)
+ {
+-    retrocl_trimming = 0;
+-    if (retrocl_trim_ctx) {
+-        slapi_eq_cancel(retrocl_trim_ctx);
+-        retrocl_trim_ctx = NULL;
++    if (retrocl_trimming) {
++        /* RetroCL trimming config was valid and trimming struct allocated
++         * Let's free them
++         */
++        retrocl_trimming = 0;
++        if (retrocl_trim_ctx) {
++            slapi_eq_cancel(retrocl_trim_ctx);
++            retrocl_trim_ctx = NULL;
++        }
++        PR_DestroyLock(ts.ts_s_trim_mutex);
++        ts.ts_s_trim_mutex = NULL;
+     }
+-    PR_DestroyLock(ts.ts_s_trim_mutex);
+-    ts.ts_s_trim_mutex = NULL;
+ }
+-- 
+2.24.1
+
diff --git a/SOURCES/0019-Ticket-50709-Several-memory-leaks-reported-by-Valgri.patch b/SOURCES/0019-Ticket-50709-Several-memory-leaks-reported-by-Valgri.patch
new file mode 100644
index 0000000..dedd8a9
--- /dev/null
+++ b/SOURCES/0019-Ticket-50709-Several-memory-leaks-reported-by-Valgri.patch
@@ -0,0 +1,189 @@
+From ceef0b6ae9edbb60bc6324c3dc045f3a4e5fd725 Mon Sep 17 00:00:00 2001
+From: Thierry Bordaz <tbordaz@redhat.com>
+Date: Fri, 8 Nov 2019 18:16:06 +0100
+Subject: [PATCH] Ticket 50709: Several memory leaks reported by Valgrind for
+ 389-ds 1.3.9.1-10
+
+Description of the problem:
+
+	When evaluating an ACI with 'ip' subject, it adds a PRNetAddr to the subject
+	property list. When the list is free (acl__done_aclpb) the property is not freed.
+
+Description of the fix:
+
+	Add the property to the pblock (SLAPI_CONN_CLIENTNETADDR_ACLIP) so that it
+	the property is freed with acl pblock.
+
+https://pagure.io/389-ds-base/issue/50709
+
+Reviewed by: Mark Reynolds, William Brown, Ludwig Krispenz
+---
+ ldap/servers/plugins/acl/acllas.c | 54 ++++++++++++++++++++-----------
+ ldap/servers/slapd/connection.c   |  2 ++
+ ldap/servers/slapd/pblock.c       | 16 +++++++++
+ ldap/servers/slapd/slap.h         |  1 +
+ ldap/servers/slapd/slapi-plugin.h |  1 +
+ 5 files changed, 56 insertions(+), 18 deletions(-)
+
+diff --git a/ldap/servers/plugins/acl/acllas.c b/ldap/servers/plugins/acl/acllas.c
+index 3950fd405..a5602e198 100644
+--- a/ldap/servers/plugins/acl/acllas.c
++++ b/ldap/servers/plugins/acl/acllas.c
+@@ -251,6 +251,7 @@ DS_LASIpGetter(NSErr_t *errp, PList_t subject, PList_t resource, PList_t auth_in
+ {
+     struct acl_pblock *aclpb = NULL;
+     PRNetAddr *client_praddr = NULL;
++    PRNetAddr *pb_client_praddr = NULL;
+     char ip_str[256];
+     int rv = LAS_EVAL_TRUE;
+ 
+@@ -262,25 +263,39 @@ DS_LASIpGetter(NSErr_t *errp, PList_t subject, PList_t resource, PList_t auth_in
+         return LAS_EVAL_FAIL;
+     }
+ 
+-    client_praddr = (PRNetAddr *)slapi_ch_malloc(sizeof(PRNetAddr));
+-    if (client_praddr == NULL) {
+-        slapi_log_err(SLAPI_LOG_ERR, plugin_name, "DS_LASIpGetter - Failed to allocate client_praddr\n");
+-        return (LAS_EVAL_FAIL);
+-    }
++    slapi_pblock_get(aclpb->aclpb_pblock, SLAPI_CONN_CLIENTNETADDR_ACLIP, &pb_client_praddr);
++    if (pb_client_praddr == NULL) {
+ 
+-    if (slapi_pblock_get(aclpb->aclpb_pblock, SLAPI_CONN_CLIENTNETADDR, client_praddr) != 0) {
+-        slapi_log_err(SLAPI_LOG_ERR, plugin_name, "DS_LASIpGetter - Could not get client IP.\n");
+-        slapi_ch_free((void **)&client_praddr);
+-        return (LAS_EVAL_FAIL);
+-    }
++        client_praddr = (PRNetAddr *) slapi_ch_malloc(sizeof (PRNetAddr));
++        if (client_praddr == NULL) {
++            slapi_log_err(SLAPI_LOG_ERR, plugin_name, "DS_LASIpGetter - Failed to allocate client_praddr\n");
++            return (LAS_EVAL_FAIL);
++        }
+ 
+-    rv = PListInitProp(subject, 0, ACL_ATTR_IP, (void *)client_praddr, NULL);
+-    if (rv < 0) {
+-        slapi_log_err(SLAPI_LOG_ACL, plugin_name, "DS_LASIpGetter - "
+-                                                  "Couldn't set the client addr property(%d)\n",
+-                      rv);
+-        slapi_ch_free((void **)&client_praddr);
+-        return LAS_EVAL_FAIL;
++        if (slapi_pblock_get(aclpb->aclpb_pblock, SLAPI_CONN_CLIENTNETADDR, client_praddr) != 0) {
++            slapi_log_err(SLAPI_LOG_ERR, plugin_name, "DS_LASIpGetter - Could not get client IP.\n");
++            slapi_ch_free((void **) &client_praddr);
++            return (LAS_EVAL_FAIL);
++        }
++
++        rv = PListInitProp(subject, 0, ACL_ATTR_IP, (void *) client_praddr, NULL);
++        if (rv < 0) {
++            slapi_log_err(SLAPI_LOG_ACL, plugin_name, "DS_LASIpGetter - "
++                    "Couldn't set the client addr property(%d)\n",
++                    rv);
++            slapi_ch_free((void **) &client_praddr);
++            return LAS_EVAL_FAIL;
++        }
++
++    } else {
++        client_praddr = pb_client_praddr;
++        rv = PListInitProp(subject, 0, ACL_ATTR_IP, (void *) client_praddr, NULL);
++        if (rv < 0) {
++            slapi_log_err(SLAPI_LOG_ACL, plugin_name, "DS_LASIpGetter - "
++                    "Couldn't set the client addr property(%d)\n",
++                    rv);
++            return LAS_EVAL_FAIL;
++        }
+     }
+     if (PR_NetAddrToString(client_praddr, ip_str, sizeof(ip_str)) == PR_SUCCESS) {
+         slapi_log_err(SLAPI_LOG_ACL, plugin_name, "DS_LASIpGetter - "
+@@ -290,7 +305,10 @@ DS_LASIpGetter(NSErr_t *errp, PList_t subject, PList_t resource, PList_t auth_in
+         slapi_log_err(SLAPI_LOG_ACL, plugin_name, "DS_LASIpGetter - "
+                                                   "Returning client ip address 'unknown'\n");
+     }
+-
++    if (client_praddr != pb_client_praddr) {
++        /* Set it in pblock only if it is newly allocated */
++        slapi_pblock_set(aclpb->aclpb_pblock, SLAPI_CONN_CLIENTNETADDR_ACLIP, client_praddr);
++    }
+     return LAS_EVAL_TRUE;
+ }
+ 
+diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
+index 9abd546f9..b9b280e6d 100644
+--- a/ldap/servers/slapd/connection.c
++++ b/ldap/servers/slapd/connection.c
+@@ -205,6 +205,7 @@ connection_cleanup(Connection *conn)
+     conn->c_isreplication_session = 0;
+     slapi_ch_free((void **)&conn->cin_addr);
+     slapi_ch_free((void **)&conn->cin_destaddr);
++    slapi_ch_free((void **)&conn->cin_addr_aclip);
+     slapi_ch_free_string(&conn->c_ipaddr);
+     if (conn->c_domain != NULL) {
+         ber_bvecfree(conn->c_domain);
+@@ -397,6 +398,7 @@ connection_reset(Connection *conn, int ns, PRNetAddr *from, int fromLen __attrib
+             str_destip = str_unknown;
+         }
+     }
++    slapi_ch_free((void **)&conn->cin_addr_aclip);
+ 
+ 
+     if (!in_referral_mode) {
+diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
+index bc18a7b18..d2ad6147a 100644
+--- a/ldap/servers/slapd/pblock.c
++++ b/ldap/servers/slapd/pblock.c
+@@ -482,6 +482,14 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value)
+         }
+         PR_ExitMonitor(pblock->pb_conn->c_mutex);
+         break;
++	case SLAPI_CONN_CLIENTNETADDR_ACLIP:
++        if (pblock->pb_conn == NULL) {
++            break;
++        }
++        pthread_mutex_lock(&(pblock->pb_conn->c_mutex));
++        (*(PRNetAddr **) value) = pblock->pb_conn->cin_addr_aclip;
++        pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
++        break;
+     case SLAPI_CONN_SERVERNETADDR:
+         if (pblock->pb_conn == NULL) {
+             memset(value, 0, sizeof(PRNetAddr));
+@@ -2571,6 +2579,14 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
+         pblock->pb_conn->c_authtype = slapi_ch_strdup((char *)value);
+         PR_ExitMonitor(pblock->pb_conn->c_mutex);
+         break;
++	case SLAPI_CONN_CLIENTNETADDR_ACLIP:
++        if (pblock->pb_conn == NULL) {
++            break;
++        }
++        pthread_mutex_lock(&(pblock->pb_conn->c_mutex));
++        slapi_ch_free((void **)&pblock->pb_conn->cin_addr_aclip);
++        pblock->pb_conn->cin_addr_aclip = (PRNetAddr *)value;
++        pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
+     case SLAPI_CONN_IS_REPLICATION_SESSION:
+         if (pblock->pb_conn == NULL) {
+             slapi_log_err(SLAPI_LOG_ERR,
+diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
+index a8908d94c..4c53d43dc 100644
+--- a/ldap/servers/slapd/slap.h
++++ b/ldap/servers/slapd/slap.h
+@@ -1617,6 +1617,7 @@ typedef struct conn
+     char *c_external_dn;             /* client DN of this SSL session  */
+     char *c_external_authtype;       /* used for c_external_dn   */
+     PRNetAddr *cin_addr;             /* address of client on this conn */
++    PRNetAddr *cin_addr_aclip;       /* address of client allocated by acl with 'ip' subject */
+     PRNetAddr *cin_destaddr;         /* address client connected to    */
+     struct berval **c_domain;        /* DNS names of client            */
+     Operation *c_ops;                /* list of pending operations      */
+diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
+index 0bc3a6fab..679bdbb5c 100644
+--- a/ldap/servers/slapd/slapi-plugin.h
++++ b/ldap/servers/slapd/slapi-plugin.h
+@@ -6971,6 +6971,7 @@ slapi_timer_result slapi_timespec_expire_check(struct timespec *expire);
+ #define SLAPI_CONN_DN                     143
+ #define SLAPI_CONN_CLIENTNETADDR          850
+ #define SLAPI_CONN_SERVERNETADDR          851
++#define SLAPI_CONN_CLIENTNETADDR_ACLIP    853
+ #define SLAPI_CONN_IS_REPLICATION_SESSION 149
+ #define SLAPI_CONN_IS_SSL_SESSION         747
+ #define SLAPI_CONN_CERT                   743
+-- 
+2.24.1
+
diff --git a/SOURCES/0020-Ticket-50857-Memory-leak-in-ACI-using-IP-subject.patch b/SOURCES/0020-Ticket-50857-Memory-leak-in-ACI-using-IP-subject.patch
new file mode 100644
index 0000000..c6c8c9d
--- /dev/null
+++ b/SOURCES/0020-Ticket-50857-Memory-leak-in-ACI-using-IP-subject.patch
@@ -0,0 +1,43 @@
+From db358c127ff26c280bef5d85e62e3a08153438d5 Mon Sep 17 00:00:00 2001
+From: Thierry Bordaz <tbordaz@redhat.com>
+Date: Mon, 27 Jan 2020 13:49:37 +0100
+Subject: [PATCH] Ticket 50857 - Memory leak in ACI using IP subject
+
+Bug Description:
+	When a ACI is evaluated (LASIpEval) a context (cookie) is allocated.
+	At the end of the connection, the context is freed
+	via a callback (LASIpFlush).
+	The context contains two LASIpTree_t tree (ipv4 and ipv6)
+	In free callback, only ipv4 tree is freed
+
+Fix Description:
+	Free ipv6 tree in LASIpTree
+
+https://pagure.io/389-ds-base/issue/50857
+
+Reviewed by: Mark Reynolds
+
+Platforms tested: F31
+
+Flag Day: no
+
+Doc impact: no
+---
+ lib/libaccess/lasip.cpp | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/lib/libaccess/lasip.cpp b/lib/libaccess/lasip.cpp
+index 30c546df7..cdb88eec5 100644
+--- a/lib/libaccess/lasip.cpp
++++ b/lib/libaccess/lasip.cpp
+@@ -436,6 +436,7 @@ LASIpFlush(void **las_cookie)
+         return;
+ 
+     LASIpTreeDealloc(((LASIpContext_t *)*las_cookie)->treetop);
++    LASIpTreeDealloc(((LASIpContext_t *)*las_cookie)->treetop_ipv6);
+     PERM_FREE(*las_cookie);
+     *las_cookie = NULL;
+     return;
+-- 
+2.24.1
+
diff --git a/SOURCES/0021-Ticket-50709-cont-Several-memory-leaks-reported-by-V.patch b/SOURCES/0021-Ticket-50709-cont-Several-memory-leaks-reported-by-V.patch
new file mode 100644
index 0000000..2cee9d2
--- /dev/null
+++ b/SOURCES/0021-Ticket-50709-cont-Several-memory-leaks-reported-by-V.patch
@@ -0,0 +1,58 @@
+From cbe93b4bd4569181db85aeac501798985d7d1acd Mon Sep 17 00:00:00 2001
+From: Thierry Bordaz <tbordaz@redhat.com>
+Date: Fri, 24 Jan 2020 10:56:23 +0100
+Subject: [PATCH] Ticket 50709: (cont) Several memory leaks reported by
+ Valgrind for 389-ds 1.3.9.1-10
+
+Bug Description:
+	Cherry pick from master was broken because connection locking
+	uses pthread lock in master while it remains Monitor in 1.3.10
+
+Fix Description:
+	Change the locking function to use Monitor in that branch
+
+https://pagure.io/389-ds-base/issue/50709
+
+Reviewed by: thierry bordaz
+
+Platforms tested: 7.8
+
+Flag Day: no
+
+Doc impact: no
+---
+ ldap/servers/slapd/pblock.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
+index d2ad6147a..d21cf7e76 100644
+--- a/ldap/servers/slapd/pblock.c
++++ b/ldap/servers/slapd/pblock.c
+@@ -486,9 +486,9 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value)
+         if (pblock->pb_conn == NULL) {
+             break;
+         }
+-        pthread_mutex_lock(&(pblock->pb_conn->c_mutex));
++        PR_EnterMonitor(pblock->pb_conn->c_mutex);
+         (*(PRNetAddr **) value) = pblock->pb_conn->cin_addr_aclip;
+-        pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
++        PR_ExitMonitor(pblock->pb_conn->c_mutex);
+         break;
+     case SLAPI_CONN_SERVERNETADDR:
+         if (pblock->pb_conn == NULL) {
+@@ -2583,10 +2583,10 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
+         if (pblock->pb_conn == NULL) {
+             break;
+         }
+-        pthread_mutex_lock(&(pblock->pb_conn->c_mutex));
++        PR_EnterMonitor(pblock->pb_conn->c_mutex);
+         slapi_ch_free((void **)&pblock->pb_conn->cin_addr_aclip);
+         pblock->pb_conn->cin_addr_aclip = (PRNetAddr *)value;
+-        pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
++        PR_ExitMonitor(pblock->pb_conn->c_mutex);
+     case SLAPI_CONN_IS_REPLICATION_SESSION:
+         if (pblock->pb_conn == NULL) {
+             slapi_log_err(SLAPI_LOG_ERR,
+-- 
+2.24.1
+
diff --git a/SOURCES/0022-fix-for-50542-crashes-in-filter-tests.patch b/SOURCES/0022-fix-for-50542-crashes-in-filter-tests.patch
new file mode 100644
index 0000000..e0f1fac
--- /dev/null
+++ b/SOURCES/0022-fix-for-50542-crashes-in-filter-tests.patch
@@ -0,0 +1,42 @@
+From 391130c60ccedb0f7650d4454141686d293dc39e Mon Sep 17 00:00:00 2001
+From: Ludwig Krispenz <lkrispen@redhat.com>
+Date: Tue, 20 Aug 2019 10:18:22 +0200
+Subject: [PATCH] fix for 50542 crashes in filter tests
+
+The crash is when a backentry is released, there is a call to CACHE_RETURN
+and then check and free of a vlv entry.
+But CACHE_RETURN, under some conditions, can free the backentry - the following check will
+dereference a NULL entry and crashes
+
+Fix: Reverse the order of freeing vlv entry and returning entry to cache
+
+Note: Viktor did successfully runthe tests, thanks
+
+Reviewed by: ?
+---
+ ldap/servers/slapd/back-ldbm/ldbm_search.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
+index c8f5719e1..2ad8f743a 100644
+--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
++++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
+@@ -1896,13 +1896,13 @@ ldbm_back_entry_release(Slapi_PBlock *pb, void *backend_info_ptr)
+     slapi_pblock_get(pb, SLAPI_BACKEND, &be);
+     inst = (ldbm_instance *)be->be_instance_info;
+ 
+-    CACHE_RETURN(&inst->inst_cache, (struct backentry **)&backend_info_ptr);
+-
+     if (((struct backentry *)backend_info_ptr)->ep_vlventry != NULL) {
+         /* This entry was created during a vlv search whose acl check failed.  It needs to be
+          * freed here */
+         slapi_entry_free(((struct backentry *)backend_info_ptr)->ep_vlventry);
+         ((struct backentry *)backend_info_ptr)->ep_vlventry = NULL;
+     }
++    CACHE_RETURN(&inst->inst_cache, (struct backentry **)&backend_info_ptr);
++
+     return 0;
+ }
+-- 
+2.24.1
+
diff --git a/SOURCES/0023-Ticket-49623-cont-cenotaph-errors-on-modrdn-operatio.patch b/SOURCES/0023-Ticket-49623-cont-cenotaph-errors-on-modrdn-operatio.patch
new file mode 100644
index 0000000..54f76b2
--- /dev/null
+++ b/SOURCES/0023-Ticket-49623-cont-cenotaph-errors-on-modrdn-operatio.patch
@@ -0,0 +1,168 @@
+From 819eaedc4bd51d0ec34928c5443a7cd7f094c60a Mon Sep 17 00:00:00 2001
+From: Ludwig Krispenz <lkrispen@redhat.com>
+Date: Tue, 11 Feb 2020 09:47:45 +0100
+Subject: [PATCH 1/2] Ticket - 49623-cont cenotaph errors on modrdn operations
+
+Bug: In modrdn operations a cenotaph entries are created to track the time when
+	an entry had existed. But in cases where rentries were renamed in cycles
+	reusing the dns again and again this failed with an error: "faild to add cenotaph"
+
+Fix: Previous versions of cenotaphs with the same dn are not used (or maybe in very unlikely
+	scenarios) so there is no need to change the dn construction to be able to keep all
+	versions of the same cenotaph. Instead, if the creation of the cenotaph fails because
+	it already exists, the existin cenotaph is moodified with the lifespan data of the
+	cenotaph that was tried to add.
+
+Reviewed by: Thierry, thanks
+---
+ .../tests/tickets/ticket49623_2_test.py       | 66 +++++++++++++++++++
+ ldap/servers/plugins/replication/urp.c        | 42 ++++++++++--
+ 2 files changed, 104 insertions(+), 4 deletions(-)
+ create mode 100644 dirsrvtests/tests/tickets/ticket49623_2_test.py
+
+diff --git a/dirsrvtests/tests/tickets/ticket49623_2_test.py b/dirsrvtests/tests/tickets/ticket49623_2_test.py
+new file mode 100644
+index 000000000..1d3167d49
+--- /dev/null
++++ b/dirsrvtests/tests/tickets/ticket49623_2_test.py
+@@ -0,0 +1,66 @@
++# --- BEGIN COPYRIGHT BLOCK ---
++# Copyright (C) 2020 Red Hat, Inc.
++# All rights reserved.
++#
++# License: GPL (version 3 or any later version).
++# See LICENSE for details.
++# --- END COPYRIGHT BLOCK ---
++#
++import os
++import ldap
++import pytest
++import subprocess
++from lib389.tasks import *
++from lib389.utils import *
++from lib389.topologies import topology_m1
++from lib389.idm.user import UserAccounts
++from lib389._constants import DEFAULT_SUFFIX
++from contextlib import contextmanager
++
++pytestmark = pytest.mark.tier1
++
++logging.getLogger(__name__).setLevel(logging.DEBUG)
++log = logging.getLogger(__name__)
++
++
++@pytest.mark.ds49623
++@pytest.mark.bz1790986
++def test_modrdn_loop(topology_m1):
++    """Test that renaming the same entry multiple times reusing the same
++       RDN multiple times does not result in cenotaph error messages
++
++    :id: 631b2be9-5c03-44c7-9853-a87c923d5b30
++
++    :setup: Single master instance
++
++    :steps: 1. Add an entry with RDN start rdn
++            2. Rename the entry to rdn change
++            3. Rename the entry to start again
++            4. Rename the entry to rdn change
++            5. check for cenotaph error messages
++    :expectedresults:
++            1. No error messages
++    """
++
++    topo = topology_m1.ms['master1']
++    TEST_ENTRY_RDN_START = 'start'
++    TEST_ENTRY_RDN_CHANGE = 'change'
++    TEST_ENTRY_NAME = 'tuser'
++    users = UserAccounts(topo, DEFAULT_SUFFIX)
++    user_properties = {
++        'uid': TEST_ENTRY_RDN_START,
++        'cn': TEST_ENTRY_NAME,
++        'sn': TEST_ENTRY_NAME,
++        'uidNumber': '1001',
++        'gidNumber': '2001',
++        'homeDirectory': '/home/{}'.format(TEST_ENTRY_NAME)
++    }
++
++    tuser = users.create(properties=user_properties)
++    tuser.rename('uid={}'.format(TEST_ENTRY_RDN_CHANGE), newsuperior=None, deloldrdn=True)
++    tuser.rename('uid={}'.format(TEST_ENTRY_RDN_START), newsuperior=None, deloldrdn=True)
++    tuser.rename('uid={}'.format(TEST_ENTRY_RDN_CHANGE), newsuperior=None, deloldrdn=True)
++
++    log.info("Check the log messages for cenotaph error")
++    error_msg = ".*urp_fixup_add_cenotaph - failed to add cenotaph, err= 68"
++    assert not topo.ds_error_log.match(error_msg)
+diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c
+index 37fe77379..d71e1fa63 100644
+--- a/ldap/servers/plugins/replication/urp.c
++++ b/ldap/servers/plugins/replication/urp.c
+@@ -854,7 +854,7 @@ urp_post_delete_operation(Slapi_PBlock *pb)
+ }
+ 
+ static int
+-urp_fixup_add_cenotaph (Slapi_PBlock *pb, char *sessionid, CSN *opcsn)
++urp_fixup_add_cenotaph(Slapi_PBlock *pb, char *sessionid, CSN *opcsn)
+ {
+     Slapi_PBlock *add_pb;
+     Slapi_Entry *cenotaph = NULL;
+@@ -892,7 +892,7 @@ urp_fixup_add_cenotaph (Slapi_PBlock *pb, char *sessionid, CSN *opcsn)
+     /* slapi_sdn_free(&pre_sdn); */
+ 
+     cenotaph = slapi_entry_alloc();
+-    slapi_entry_init(cenotaph, newdn, NULL);
++    slapi_entry_init(cenotaph, slapi_ch_strdup(newdn), NULL);
+ 
+     dncsn = (CSN *)entry_get_dncsn (pre_entry);
+     slapi_entry_add_string(cenotaph, SLAPI_ATTR_OBJECTCLASS, "extensibleobject");
+@@ -914,12 +914,46 @@ urp_fixup_add_cenotaph (Slapi_PBlock *pb, char *sessionid, CSN *opcsn)
+                                     OP_FLAG_REPL_FIXUP|OP_FLAG_NOOP|OP_FLAG_CENOTAPH_ENTRY|SLAPI_OP_FLAG_BYPASS_REFERRALS);
+     slapi_add_internal_pb(add_pb);
+     slapi_pblock_get(add_pb, SLAPI_PLUGIN_INTOP_RESULT, &ret);
++    slapi_pblock_destroy(add_pb);
++
++    if (ret == LDAP_ALREADY_EXISTS) {
++        /* the cenotaph already exists, probably because of a loop
++         * in renaming entries. Update it with new csns
++         */
++        slapi_log_err(SLAPI_LOG_REPL, sessionid,
++                       "urp_fixup_add_cenotaph - cenotaph (%s) already exists, updating\n", newdn);
++        Slapi_PBlock *mod_pb = slapi_pblock_new();
++        Slapi_Mods smods;
++        Slapi_DN *sdn = slapi_sdn_new_dn_byval(newdn);
++        slapi_mods_init(&smods, 4);
++        slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "cenotaphfrom", csn_as_string(dncsn, PR_FALSE, csnstr));
++        slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "cenotaphto", csn_as_string(opcsn, PR_FALSE, csnstr));
++        slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "nstombstonecsn", csn_as_string(opcsn, PR_FALSE, csnstr));
++
++        slapi_modify_internal_set_pb_ext(
++            mod_pb,
++            sdn,
++            slapi_mods_get_ldapmods_byref(&smods),
++            NULL, /* Controls */
++            NULL,
++            repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION),
++            OP_FLAG_REPL_FIXUP|OP_FLAG_NOOP|OP_FLAG_CENOTAPH_ENTRY|SLAPI_OP_FLAG_BYPASS_REFERRALS);
++
++        slapi_modify_internal_pb(mod_pb);
++        slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &ret);
++        if (ret != LDAP_SUCCESS) {
++            slapi_log_err(SLAPI_LOG_ERR, sessionid,
++                       "urp_fixup_add_cenotaph - failed to modify cenotaph, err= %d\n", ret);
++        }
++        slapi_mods_done(&smods);
++        slapi_sdn_free(&sdn);
++        slapi_pblock_destroy(mod_pb);
+ 
+-    if (ret != LDAP_SUCCESS) {
++    } else if (ret != LDAP_SUCCESS) {
+         slapi_log_err(SLAPI_LOG_ERR, sessionid,
+                        "urp_fixup_add_cenotaph - failed to add cenotaph, err= %d\n", ret);
+     }
+-    slapi_pblock_destroy(add_pb);
++    slapi_ch_free_string(&newdn);
+ 
+     return ret;
+ }
+-- 
+2.25.1
+
diff --git a/SPECS/389-ds-base.spec b/SPECS/389-ds-base.spec
index f131749..d718cc4 100644
--- a/SPECS/389-ds-base.spec
+++ b/SPECS/389-ds-base.spec
@@ -39,7 +39,7 @@
 Summary:          389 Directory Server (%{variant})
 Name:             389-ds-base
 Version:          1.3.10.1
-Release:          %{?relprefix}5%{?prerel}%{?dist}
+Release:          %{?relprefix}9%{?prerel}%{?dist}
 License:          GPLv3+
 URL:              https://www.port389.org/
 Group:            System Environment/Daemons
@@ -161,6 +161,14 @@ Patch12:          0012-Issue-50536-Audit-log-heading-written-to-log-after-e.patc
 Patch13:          0013-Issue-50636-Crash-during-sasl-bind.patch
 Patch14:          0014-Ticket-49850-cont-fix-crash-in-ldbm_non_leaf.patch
 Patch15:          0015-Ticket-49624-cont-DB-Deadlock-on-modrdn-appears-to-c.patch
+Patch16:          0016-Ticket-50542-Entry-cache-contention-during-base-sear.patch
+Patch17:          0017-Issue-50834-Incorrectly-setting-the-NSS-default-SSL-.patch
+Patch18:          0018-Ticket-50736-RetroCL-trimming-may-crash-at-shutdown-.patch
+Patch19:          0019-Ticket-50709-Several-memory-leaks-reported-by-Valgri.patch
+Patch20:          0020-Ticket-50857-Memory-leak-in-ACI-using-IP-subject.patch
+Patch21:          0021-Ticket-50709-cont-Several-memory-leaks-reported-by-V.patch
+Patch22:          0022-fix-for-50542-crashes-in-filter-tests.patch
+Patch23:          0023-Ticket-49623-cont-cenotaph-errors-on-modrdn-operatio.patch
 
 %description
 389 Directory Server is an LDAPv3 compliant server.  The base package includes
@@ -513,6 +521,26 @@ fi
 %{_sysconfdir}/%{pkgname}/dirsrvtests
 
 %changelog
+* Mon Apr 6 2020 Mark Reynolds <mreynolds@redhat.com> - 1.3.10.1-9
+- Bump version to 1.3.10.1-9
+- Resolves: Bug 1817933 - cenotaph errors on modrdn operations
+
+* Fri Mar 6 2020 Mark Reynolds <mreynolds@redhat.com> - 1.3.10.1-8
+- Bump version to 1.3.10.1-8
+- Resolves: Bug 1809160 - Entry cache contention during base search (Fix crash - part 2)
+
+* Mon Mar 2 2020 Mark Reynolds <mreynolds@redhat.com> - 1.3.10.1-7
+- Bump version to 1.3.10.1-7
+- Resolves: Bug 1803023 - Several memory leaks reported by Valgrind (fix regression)
+
+* Mon Mar 2 2020 Mark Reynolds <mreynolds@redhat.com> - 1.3.10.1-6
+- Bump version to 1.3.10.1-6
+- Resolves: Bug 1801694 - ns-slapd is crashing while restarting ipactl
+- Resolves: Bug 1803023 - Several memory leaks reported by Valgrind for 389-ds 1.3.9.1-10
+- Resolves: Bug 1803052 - Memory leak in ACI using IP subject
+- Resolves: Bug 1801703 - Regression: NSS has interop problems as server when using limited cipher list
+- Resolves: Bug 1809160 - Entry cache contention during base search
+
 * Fri Feb 7 2020 Mark Reynolds <mreynolds@redhat.com> - 1.3.10.1-5
 - Bump version to 1.3.10.1-5
 - Resolves: Bug 1744623 - DB Deadlock on modrdn appears to corrupt database and entry cache(cont)