diff --git a/SOURCES/0001-Issue-51076-prevent-unnecessarily-duplication-of-the.patch b/SOURCES/0001-Issue-51076-prevent-unnecessarily-duplication-of-the.patch
new file mode 100644
index 0000000..cba92a9
--- /dev/null
+++ b/SOURCES/0001-Issue-51076-prevent-unnecessarily-duplication-of-the.patch
@@ -0,0 +1,43 @@
+From 97ecf0190f264a2d87750bc2d26ebf011542e3e1 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Fri, 8 May 2020 10:52:43 -0400
+Subject: [PATCH 01/12] Issue 51076 - prevent unnecessarily duplication of the
+ target entry
+
+Bug Description:  For any update operation the MEP plugin was calling
+                  slapi_search_internal_get_entry() which duplicates
+                  the entry it returns.  In this case the entry is just
+                  read from and discarded, but this entry is already
+                  in the pblock (the PRE OP ENTRY).
+
+Fix Description:  Just grab the PRE OP ENTRY from the pblock and use
+                  that to read the attribute values from.  This saves
+                  two entry duplications for every update operation
+                  from MEP.
+
+fixes:  https://pagure.io/389-ds-base/issue/51076
+
+Reviewed by: tbordaz & firstyear(Thanks!!)
+---
+ ldap/servers/plugins/mep/mep.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/ldap/servers/plugins/mep/mep.c b/ldap/servers/plugins/mep/mep.c
+index ca9a64b3b..401d95e3a 100644
+--- a/ldap/servers/plugins/mep/mep.c
++++ b/ldap/servers/plugins/mep/mep.c
+@@ -2165,9 +2165,8 @@ mep_pre_op(Slapi_PBlock *pb, int modop)
+                 if (e && free_entry) {
+                     slapi_entry_free(e);
+                 }
+-
+-                slapi_search_internal_get_entry(sdn, 0, &e, mep_get_plugin_id());
+-                free_entry = 1;
++                slapi_pblock_get(pb, SLAPI_ENTRY_PRE_OP, &e);
++                free_entry = 0;
+             }
+ 
+             if (e && mep_is_managed_entry(e)) {
+-- 
+2.26.2
+
diff --git a/SOURCES/0002-Ticket-51082-abort-when-a-empty-valueset-is-freed.patch b/SOURCES/0002-Ticket-51082-abort-when-a-empty-valueset-is-freed.patch
new file mode 100644
index 0000000..822c8d2
--- /dev/null
+++ b/SOURCES/0002-Ticket-51082-abort-when-a-empty-valueset-is-freed.patch
@@ -0,0 +1,116 @@
+From 1426f086623404ab2eacb04de7e6414177c0993a Mon Sep 17 00:00:00 2001
+From: Thierry Bordaz <tbordaz@redhat.com>
+Date: Mon, 11 May 2020 17:11:49 +0200
+Subject: [PATCH 02/12] Ticket 51082 - abort when a empty valueset is freed
+
+Bug Description:
+	A large valueset (more than 10 values) manages a sorted array of values.
+        replication purges old values from a valueset (valueset_array_purge). If it purges all the values
+        the valueset is freed (slapi_valueset_done).
+        A problem is that the counter of values, in the valueset, is still reflecting the initial number
+        of values (before the purge). When the valueset is freed (because empty) a safety checking
+        detects incoherent values based on the wrong counter.
+
+Fix Description:
+	When all the values have been purge reset the counter before freeing the valueset
+
+https://pagure.io/389-ds-base/issue/51082
+
+Reviewed by: Mark Reynolds
+
+Platforms tested: F30
+
+Flag Day: no
+
+Doc impact: no
+---
+ .../suites/replication/acceptance_test.py     | 57 +++++++++++++++++++
+ ldap/servers/slapd/valueset.c                 |  4 ++
+ 2 files changed, 61 insertions(+)
+
+diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
+index c8e0a4c93..5009f4e7c 100644
+--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
++++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
+@@ -500,6 +500,63 @@ def test_warining_for_invalid_replica(topo_m4):
+     assert topo_m4.ms["master1"].ds_error_log.match('.*nsds5ReplicaBackoffMax.*10.*invalid.*')
+ 
+ 
++@pytest.mark.ds51082
++def test_csnpurge_large_valueset(topo_m2):
++    """Test csn generator test
++
++    :id: 63e2bdb2-0a8f-4660-9465-7b80a9f72a74
++    :setup: MMR with 2 masters
++    :steps:
++        1. Create a test_user
++        2. add a large set of values (more than 10)
++        3. delete all the values (more than 10)
++        4. configure the replica to purge those values (purgedelay=5s)
++        5. Waiting for 6 second
++        6. do a series of update
++    :expectedresults:
++        1. Should succeeds
++        2. Should succeeds
++        3. Should succeeds
++        4. Should succeeds
++        5. Should succeeds
++        6. Should not crash
++    """
++    m1 = topo_m2.ms["master2"]
++
++    test_user = UserAccount(m1, TEST_ENTRY_DN)
++    if test_user.exists():
++        log.info('Deleting entry {}'.format(TEST_ENTRY_DN))
++        test_user.delete()
++    test_user.create(properties={
++        'uid': TEST_ENTRY_NAME,
++        'cn': TEST_ENTRY_NAME,
++        'sn': TEST_ENTRY_NAME,
++        'userPassword': TEST_ENTRY_NAME,
++        'uidNumber' : '1000',
++        'gidNumber' : '2000',
++        'homeDirectory' : '/home/mmrepl_test',
++    })
++
++    # create a large value set so that it is sorted
++    for i in range(1,20):
++        test_user.add('description', 'value {}'.format(str(i)))
++
++    # delete all values of the valueset
++    for i in range(1,20):
++        test_user.remove('description', 'value {}'.format(str(i)))
++
++    # set purging delay to 5 second and wait more that 5second
++    replicas = Replicas(m1)
++    replica = replicas.list()[0]
++    log.info('nsds5ReplicaPurgeDelay to 5')
++    replica.set('nsds5ReplicaPurgeDelay', '5')
++    time.sleep(6)
++
++    # add some new values to the valueset containing entries that should be purged
++    for i in range(21,25):
++        test_user.add('description', 'value {}'.format(str(i)))
++
++
+ if __name__ == '__main__':
+     # Run isolated
+     # -s for DEBUG mode
+diff --git a/ldap/servers/slapd/valueset.c b/ldap/servers/slapd/valueset.c
+index 2af3ee18d..12027ecb8 100644
+--- a/ldap/servers/slapd/valueset.c
++++ b/ldap/servers/slapd/valueset.c
+@@ -801,6 +801,10 @@ valueset_array_purge(const Slapi_Attr *a, Slapi_ValueSet *vs, const CSN *csn)
+             }
+         }
+     } else {
++        /* empty valueset - reset the vs->num so that further
++         * checking will not abort
++         */
++        vs->num = 0;
+         slapi_valueset_done(vs);
+     }
+ 
+-- 
+2.26.2
+
diff --git a/SOURCES/0003-Issue-51091-healthcheck-json-report-fails-when-mappi.patch b/SOURCES/0003-Issue-51091-healthcheck-json-report-fails-when-mappi.patch
new file mode 100644
index 0000000..b3a1a82
--- /dev/null
+++ b/SOURCES/0003-Issue-51091-healthcheck-json-report-fails-when-mappi.patch
@@ -0,0 +1,45 @@
+From 7a62e72b81d75ebb844835619ecc97dbf5e21058 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Thu, 14 May 2020 09:38:20 -0400
+Subject: [PATCH 03/12] Issue 51091 - healthcheck json report fails when
+ mapping tree is deleted
+
+Description:  We were passing the bename in bytes and not as a utf8 string.
+              This caused the json dumping to fail.
+
+relates: https://pagure.io/389-ds-base/issue/51091
+
+Reviewed by: firstyear(Thanks!)
+---
+ src/lib389/lib389/backend.py | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
+index e472d3de5..4f752f414 100644
+--- a/src/lib389/lib389/backend.py
++++ b/src/lib389/lib389/backend.py
+@@ -11,7 +11,7 @@ import copy
+ import ldap
+ from lib389._constants import *
+ from lib389.properties import *
+-from lib389.utils import normalizeDN, ensure_str, ensure_bytes,  assert_c
++from lib389.utils import normalizeDN, ensure_str, assert_c
+ from lib389 import Entry
+ 
+ # Need to fix this ....
+@@ -488,10 +488,10 @@ class Backend(DSLdapObject):
+ 
+         # Check for the missing mapping tree.
+         suffix = self.get_attr_val_utf8('nsslapd-suffix')
+-        bename = self.get_attr_val_bytes('cn')
++        bename = self.get_attr_val_utf8('cn')
+         try:
+             mt = self._mts.get(suffix)
+-            if mt.get_attr_val_bytes('nsslapd-backend') != bename and mt.get_attr_val('nsslapd-state') != ensure_bytes('backend'):
++            if mt.get_attr_val_utf8('nsslapd-backend') != bename and mt.get_attr_val_utf8('nsslapd-state') != 'backend':
+                 raise ldap.NO_SUCH_OBJECT("We have a matching suffix, but not a backend or correct database name.")
+         except ldap.NO_SUCH_OBJECT:
+             result = DSBLE0001
+-- 
+2.26.2
+
diff --git a/SOURCES/0004-Issue-51076-remove-unnecessary-slapi-entry-dups.patch b/SOURCES/0004-Issue-51076-remove-unnecessary-slapi-entry-dups.patch
new file mode 100644
index 0000000..c931a79
--- /dev/null
+++ b/SOURCES/0004-Issue-51076-remove-unnecessary-slapi-entry-dups.patch
@@ -0,0 +1,943 @@
+From f13d630ff98eb5b5505f1db3e7f207175b51b237 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Tue, 12 May 2020 13:48:30 -0400
+Subject: [PATCH 04/12] Issue 51076 - remove unnecessary slapi entry dups
+
+Description:  So the problem is that slapi_search_internal_get_entry()
+              duplicates the entry twice.  It does that as a convenience
+              where it will allocate a pblock, do the search, copy
+              the entry, free search results from the pblock, and then
+              free the pblock itself.  I basically split this function
+              into two functions.  One function allocates the pblock,
+              does the search and returns the entry.  The other function
+              frees the entries and pblock.
+
+              99% of time when we call slapi_search_internal_get_entry()
+              we are just reading it and freeing it.  It's not being
+              consumed.  In these cases we can use the two function
+              approach eliminates an extra slapi_entry_dup().  Over the
+              time of an operation/connection we can save quite a bit
+              of mallocing/freeing.  This could also help with memory
+              fragmentation.
+
+ASAN: passed
+
+relates: https://pagure.io/389-ds-base/issue/51076
+
+Reviewed by: firstyear & tbordaz(Thanks!)
+---
+ ldap/servers/plugins/acctpolicy/acct_config.c |  6 +--
+ ldap/servers/plugins/acctpolicy/acct_plugin.c | 36 +++++++-------
+ ldap/servers/plugins/acctpolicy/acct_util.c   |  6 +--
+ ldap/servers/plugins/automember/automember.c  | 17 +++----
+ ldap/servers/plugins/dna/dna.c                | 23 ++++-----
+ ldap/servers/plugins/memberof/memberof.c      | 16 +++----
+ .../plugins/pam_passthru/pam_ptconfig.c       | 10 ++--
+ .../servers/plugins/pam_passthru/pam_ptimpl.c |  7 +--
+ .../plugins/pam_passthru/pam_ptpreop.c        |  9 ++--
+ .../plugins/replication/repl5_tot_protocol.c  |  5 +-
+ ldap/servers/plugins/uiduniq/uid.c            | 23 ++++-----
+ ldap/servers/slapd/daemon.c                   | 11 ++---
+ ldap/servers/slapd/modify.c                   | 12 +++--
+ ldap/servers/slapd/plugin_internal_op.c       | 48 +++++++++++++++++++
+ ldap/servers/slapd/resourcelimit.c            | 13 ++---
+ ldap/servers/slapd/schema.c                   |  7 ++-
+ ldap/servers/slapd/slapi-plugin.h             | 23 ++++++++-
+ 17 files changed, 161 insertions(+), 111 deletions(-)
+
+diff --git a/ldap/servers/plugins/acctpolicy/acct_config.c b/ldap/servers/plugins/acctpolicy/acct_config.c
+index fe35ba5a0..01e4f319f 100644
+--- a/ldap/servers/plugins/acctpolicy/acct_config.c
++++ b/ldap/servers/plugins/acctpolicy/acct_config.c
+@@ -37,6 +37,7 @@ static int acct_policy_entry2config(Slapi_Entry *e,
+ int
+ acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void *plugin_id)
+ {
++    Slapi_PBlock *entry_pb = NULL;
+     acctPluginCfg *newcfg;
+     Slapi_Entry *config_entry = NULL;
+     Slapi_DN *config_sdn = NULL;
+@@ -44,8 +45,7 @@ acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void *
+ 
+     /* Retrieve the config entry */
+     config_sdn = slapi_sdn_new_normdn_byref(PLUGIN_CONFIG_DN);
+-    rc = slapi_search_internal_get_entry(config_sdn, NULL, &config_entry,
+-                                         plugin_id);
++    rc = slapi_search_get_entry(&entry_pb, config_sdn, NULL, &config_entry, plugin_id);
+     slapi_sdn_free(&config_sdn);
+ 
+     if (rc != LDAP_SUCCESS || config_entry == NULL) {
+@@ -60,7 +60,7 @@ acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void *
+     rc = acct_policy_entry2config(config_entry, newcfg);
+     config_unlock();
+ 
+-    slapi_entry_free(config_entry);
++    slapi_search_get_entry_done(&entry_pb);
+ 
+     return (rc);
+ }
+diff --git a/ldap/servers/plugins/acctpolicy/acct_plugin.c b/ldap/servers/plugins/acctpolicy/acct_plugin.c
+index 2a876ad72..c3c32b074 100644
+--- a/ldap/servers/plugins/acctpolicy/acct_plugin.c
++++ b/ldap/servers/plugins/acctpolicy/acct_plugin.c
+@@ -209,6 +209,7 @@ done:
+ int
+ acct_bind_preop(Slapi_PBlock *pb)
+ {
++    Slapi_PBlock *entry_pb = NULL;
+     const char *dn = NULL;
+     Slapi_DN *sdn = NULL;
+     Slapi_Entry *target_entry = NULL;
+@@ -236,8 +237,7 @@ acct_bind_preop(Slapi_PBlock *pb)
+         goto done;
+     }
+ 
+-    ldrc = slapi_search_internal_get_entry(sdn, NULL, &target_entry,
+-                                           plugin_id);
++    ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &target_entry, plugin_id);
+ 
+     /* There was a problem retrieving the entry */
+     if (ldrc != LDAP_SUCCESS) {
+@@ -275,7 +275,7 @@ done:
+         slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, NULL, 0, NULL);
+     }
+ 
+-    slapi_entry_free(target_entry);
++    slapi_search_get_entry_done(&entry_pb);
+ 
+     free_acctpolicy(&policy);
+ 
+@@ -293,6 +293,7 @@ done:
+ int
+ acct_bind_postop(Slapi_PBlock *pb)
+ {
++    Slapi_PBlock *entry_pb = NULL;
+     char *dn = NULL;
+     int ldrc, tracklogin = 0;
+     int rc = 0; /* Optimistic default */
+@@ -327,8 +328,7 @@ acct_bind_postop(Slapi_PBlock *pb)
+        covered by an account policy to decide whether we should track */
+     if (tracklogin == 0) {
+         sdn = slapi_sdn_new_normdn_byref(dn);
+-        ldrc = slapi_search_internal_get_entry(sdn, NULL, &target_entry,
+-                                               plugin_id);
++        ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &target_entry, plugin_id);
+ 
+         if (ldrc != LDAP_SUCCESS) {
+             slapi_log_err(SLAPI_LOG_ERR, POST_PLUGIN_NAME,
+@@ -355,7 +355,7 @@ done:
+         slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, NULL, 0, NULL);
+     }
+ 
+-    slapi_entry_free(target_entry);
++    slapi_search_get_entry_done(&entry_pb);
+ 
+     slapi_sdn_free(&sdn);
+ 
+@@ -370,11 +370,11 @@ done:
+ static int
+ acct_pre_op(Slapi_PBlock *pb, int modop)
+ {
++    Slapi_PBlock *entry_pb = NULL;
+     Slapi_DN *sdn = 0;
+     Slapi_Entry *e = 0;
+     Slapi_Mods *smods = 0;
+     LDAPMod **mods;
+-    int free_entry = 0;
+     char *errstr = NULL;
+     int ret = SLAPI_PLUGIN_SUCCESS;
+ 
+@@ -384,28 +384,25 @@ acct_pre_op(Slapi_PBlock *pb, int modop)
+ 
+     if (acct_policy_dn_is_config(sdn)) {
+         /* Validate config changes, but don't apply them.
+-     * This allows us to reject invalid config changes
+-     * here at the pre-op stage.  Applying the config
+-     * needs to be done at the post-op stage. */
++         * This allows us to reject invalid config changes
++         * here at the pre-op stage.  Applying the config
++         * needs to be done at the post-op stage. */
+ 
+         if (LDAP_CHANGETYPE_ADD == modop) {
+             slapi_pblock_get(pb, SLAPI_ADD_ENTRY, &e);
+ 
+-            /* If the entry doesn't exist, just bail and
+-     * let the server handle it. */
++            /* If the entry doesn't exist, just bail and let the server handle it. */
+             if (e == NULL) {
+                 goto bail;
+             }
+         } else if (LDAP_CHANGETYPE_MODIFY == modop) {
+             /* Fetch the entry being modified so we can
+-     * create the resulting entry for validation. */
++             * create the resulting entry for validation. */
+             if (sdn) {
+-                slapi_search_internal_get_entry(sdn, 0, &e, get_identity());
+-                free_entry = 1;
++                slapi_search_get_entry(&entry_pb, sdn, 0, &e, get_identity());
+             }
+ 
+-            /* If the entry doesn't exist, just bail and
+-     * let the server handle it. */
++            /* If the entry doesn't exist, just bail and let the server handle it. */
+             if (e == NULL) {
+                 goto bail;
+             }
+@@ -418,7 +415,7 @@ acct_pre_op(Slapi_PBlock *pb, int modop)
+             /* Apply the  mods to create the resulting entry. */
+             if (mods && (slapi_entry_apply_mods(e, mods) != LDAP_SUCCESS)) {
+                 /* The mods don't apply cleanly, so we just let this op go
+-     * to let the main server handle it. */
++                 * to let the main server handle it. */
+                 goto bailmod;
+             }
+         } else if (modop == LDAP_CHANGETYPE_DELETE) {
+@@ -439,8 +436,7 @@ bailmod:
+     }
+ 
+ bail:
+-    if (free_entry && e)
+-        slapi_entry_free(e);
++    slapi_search_get_entry_done(&entry_pb);
+ 
+     if (ret) {
+         slapi_log_err(SLAPI_LOG_PLUGIN, PRE_PLUGIN_NAME,
+diff --git a/ldap/servers/plugins/acctpolicy/acct_util.c b/ldap/servers/plugins/acctpolicy/acct_util.c
+index f25a3202d..f432092fe 100644
+--- a/ldap/servers/plugins/acctpolicy/acct_util.c
++++ b/ldap/servers/plugins/acctpolicy/acct_util.c
+@@ -85,6 +85,7 @@ get_attr_string_val(Slapi_Entry *target_entry, char *attr_name)
+ int
+ get_acctpolicy(Slapi_PBlock *pb __attribute__((unused)), Slapi_Entry *target_entry, void *plugin_id, acctPolicy **policy)
+ {
++    Slapi_PBlock *entry_pb = NULL;
+     Slapi_DN *sdn = NULL;
+     Slapi_Entry *policy_entry = NULL;
+     Slapi_Attr *attr;
+@@ -123,8 +124,7 @@ get_acctpolicy(Slapi_PBlock *pb __attribute__((unused)), Slapi_Entry *target_ent
+     }
+ 
+     sdn = slapi_sdn_new_dn_byref(policy_dn);
+-    ldrc = slapi_search_internal_get_entry(sdn, NULL, &policy_entry,
+-                                           plugin_id);
++    ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &policy_entry, plugin_id);
+     slapi_sdn_free(&sdn);
+ 
+     /* There should be a policy but it can't be retrieved; fatal error */
+@@ -160,7 +160,7 @@ dopolicy:
+ done:
+     config_unlock();
+     slapi_ch_free_string(&policy_dn);
+-    slapi_entry_free(policy_entry);
++    slapi_search_get_entry_done(&entry_pb);
+     return (rc);
+ }
+ 
+diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
+index 7c875c852..39350ad53 100644
+--- a/ldap/servers/plugins/automember/automember.c
++++ b/ldap/servers/plugins/automember/automember.c
+@@ -1629,13 +1629,12 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
+     char *member_value = NULL;
+     int rc = 0;
+     Slapi_DN *group_sdn;
+-    Slapi_Entry *group_entry = NULL;
+ 
+     /* First thing check that the group still exists */
+     group_sdn = slapi_sdn_new_dn_byval(group_dn);
+-    rc = slapi_search_internal_get_entry(group_sdn, NULL, &group_entry, automember_get_plugin_id());
++    rc = slapi_search_internal_get_entry(group_sdn, NULL, NULL, automember_get_plugin_id());
+     slapi_sdn_free(&group_sdn);
+-    if (rc != LDAP_SUCCESS || group_entry == NULL) {
++    if (rc != LDAP_SUCCESS) {
+         if (rc == LDAP_NO_SUCH_OBJECT) {
+             /* the automember group (default or target) does not exist, just skip this definition */
+             slapi_log_err(SLAPI_LOG_INFO, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+@@ -1647,10 +1646,8 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
+                       "automember_update_member_value - group (default or target) can not be retrieved (%s) err=%d\n",
+                       group_dn, rc);
+         }
+-        slapi_entry_free(group_entry);
+         return rc;
+     }
+-    slapi_entry_free(group_entry);
+ 
+     /* If grouping_value is dn, we need to fetch the dn instead. */
+     if (slapi_attr_type_cmp(grouping_value, "dn", SLAPI_TYPE_CMP_EXACT) == 0) {
+@@ -1752,11 +1749,11 @@ out:
+ static int
+ automember_pre_op(Slapi_PBlock *pb, int modop)
+ {
++    Slapi_PBlock *entry_pb = NULL;
+     Slapi_DN *sdn = 0;
+     Slapi_Entry *e = 0;
+     Slapi_Mods *smods = 0;
+     LDAPMod **mods;
+-    int free_entry = 0;
+     char *errstr = NULL;
+     int ret = SLAPI_PLUGIN_SUCCESS;
+ 
+@@ -1784,8 +1781,7 @@ automember_pre_op(Slapi_PBlock *pb, int modop)
+             /* Fetch the entry being modified so we can
+              * create the resulting entry for validation. */
+             if (sdn) {
+-                slapi_search_internal_get_entry(sdn, 0, &e, automember_get_plugin_id());
+-                free_entry = 1;
++                slapi_search_get_entry(&entry_pb, sdn, 0, &e, automember_get_plugin_id());
+             }
+ 
+             /* If the entry doesn't exist, just bail and
+@@ -1799,7 +1795,7 @@ automember_pre_op(Slapi_PBlock *pb, int modop)
+             smods = slapi_mods_new();
+             slapi_mods_init_byref(smods, mods);
+ 
+-            /* Apply the  mods to create the resulting entry. */
++            /* Apply the mods to create the resulting entry. */
+             if (mods && (slapi_entry_apply_mods(e, mods) != LDAP_SUCCESS)) {
+                 /* The mods don't apply cleanly, so we just let this op go
+                  * to let the main server handle it. */
+@@ -1831,8 +1827,7 @@ bailmod:
+     }
+ 
+ bail:
+-    if (free_entry && e)
+-        slapi_entry_free(e);
++    slapi_search_get_entry_done(&entry_pb);
+ 
+     if (ret) {
+         slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
+index 1ee271359..16c625bb0 100644
+--- a/ldap/servers/plugins/dna/dna.c
++++ b/ldap/servers/plugins/dna/dna.c
+@@ -1178,7 +1178,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
+ 
+     value = slapi_entry_attr_get_charptr(e, DNA_SHARED_CFG_DN);
+     if (value) {
+-        Slapi_Entry *shared_e = NULL;
+         Slapi_DN *sdn = NULL;
+         char *normdn = NULL;
+         char *attrs[2];
+@@ -1197,10 +1196,8 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
+         /* We don't need attributes */
+         attrs[0] = "cn";
+         attrs[1] = NULL;
+-        slapi_search_internal_get_entry(sdn, attrs, &shared_e, getPluginID());
+-
+         /* Make sure that the shared config entry exists. */
+-        if (!shared_e) {
++        if(slapi_search_internal_get_entry(sdn, attrs, NULL, getPluginID()) != LDAP_SUCCESS) {
+             /* We didn't locate the shared config container entry. Log
+              * a message and skip this config entry. */
+             slapi_log_err(SLAPI_LOG_ERR, DNA_PLUGIN_SUBSYSTEM,
+@@ -1210,9 +1207,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
+             ret = DNA_FAILURE;
+             slapi_sdn_free(&sdn);
+             goto bail;
+-        } else {
+-            slapi_entry_free(shared_e);
+-            shared_e = NULL;
+         }
+ 
+         normdn = (char *)slapi_sdn_get_dn(sdn);
+@@ -1539,6 +1533,7 @@ dna_delete_shared_servers(PRCList **servers)
+ static int
+ dna_load_host_port(void)
+ {
++    Slapi_PBlock *pb = NULL;
+     int status = DNA_SUCCESS;
+     Slapi_Entry *e = NULL;
+     Slapi_DN *config_dn = NULL;
+@@ -1554,7 +1549,7 @@ dna_load_host_port(void)
+ 
+     config_dn = slapi_sdn_new_ndn_byref("cn=config");
+     if (config_dn) {
+-        slapi_search_internal_get_entry(config_dn, attrs, &e, getPluginID());
++        slapi_search_get_entry(&pb, config_dn, attrs, &e, getPluginID());
+         slapi_sdn_free(&config_dn);
+     }
+ 
+@@ -1562,8 +1557,8 @@ dna_load_host_port(void)
+         hostname = slapi_entry_attr_get_charptr(e, "nsslapd-localhost");
+         portnum = slapi_entry_attr_get_charptr(e, "nsslapd-port");
+         secureportnum = slapi_entry_attr_get_charptr(e, "nsslapd-secureport");
+-        slapi_entry_free(e);
+     }
++    slapi_search_get_entry_done(&pb);
+ 
+     if (!hostname || !portnum) {
+         status = DNA_FAILURE;
+@@ -2876,6 +2871,7 @@ bail:
+ static int
+ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
+ {
++    Slapi_PBlock *entry_pb = NULL;
+     char *replica_dn = NULL;
+     Slapi_DN *replica_sdn = NULL;
+     Slapi_DN *range_sdn = NULL;
+@@ -2912,8 +2908,7 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
+         attrs[2] = 0;
+ 
+         /* Find cn=replica entry via search */
+-        slapi_search_internal_get_entry(replica_sdn, attrs, &e, getPluginID());
+-
++        slapi_search_get_entry(&entry_pb, replica_sdn, attrs, &e, getPluginID());
+         if (e) {
+             /* Check if the passed in bind dn matches any of the replica bind dns. */
+             Slapi_Value *bind_dn_sv = slapi_value_new_string(bind_dn);
+@@ -2927,6 +2922,7 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
+                 attrs[0] = "member";
+                 attrs[1] = "uniquemember";
+                 attrs[2] = 0;
++                slapi_search_get_entry_done(&entry_pb);
+                 for (i = 0; bind_group_dn != NULL && bind_group_dn[i] != NULL; i++) {
+                     if (ret) {
+                         /* already found a member, just free group */
+@@ -2934,14 +2930,14 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
+                         continue;
+                     }
+                     bind_group_sdn = slapi_sdn_new_normdn_passin(bind_group_dn[i]);
+-                    slapi_search_internal_get_entry(bind_group_sdn, attrs, &bind_group_entry, getPluginID());
++                    slapi_search_get_entry(&entry_pb, bind_group_sdn, attrs, &bind_group_entry, getPluginID());
+                     if (bind_group_entry) {
+                         ret = slapi_entry_attr_has_syntax_value(bind_group_entry, "member", bind_dn_sv);
+                         if (ret == 0) {
+                             ret = slapi_entry_attr_has_syntax_value(bind_group_entry, "uniquemember", bind_dn_sv);
+                         }
+                     }
+-                    slapi_entry_free(bind_group_entry);
++                    slapi_search_get_entry_done(&entry_pb);
+                     slapi_sdn_free(&bind_group_sdn);
+                 }
+                 slapi_ch_free((void **)&bind_group_dn);
+@@ -2956,7 +2952,6 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
+     }
+ 
+ done:
+-    slapi_entry_free(e);
+     slapi_sdn_free(&range_sdn);
+     slapi_sdn_free(&replica_sdn);
+ 
+diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
+index 40bd4b380..e9e1ec4c7 100644
+--- a/ldap/servers/plugins/memberof/memberof.c
++++ b/ldap/servers/plugins/memberof/memberof.c
+@@ -884,7 +884,7 @@ memberof_postop_modrdn(Slapi_PBlock *pb)
+             pre_sdn = slapi_entry_get_sdn(pre_e);
+             post_sdn = slapi_entry_get_sdn(post_e);
+         }
+-        
++
+         if (pre_sdn && post_sdn && slapi_sdn_compare(pre_sdn, post_sdn) == 0) {
+             /* Regarding memberof plugin, this rename is a no-op
+              * but it can be expensive to process it. So skip it
+@@ -1466,6 +1466,7 @@ memberof_modop_one_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_op, Slapi
+ int
+ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_op, Slapi_DN *group_sdn, Slapi_DN *op_this_sdn, Slapi_DN *replace_with_sdn, Slapi_DN *op_to_sdn, memberofstringll *stack)
+ {
++    Slapi_PBlock *entry_pb = NULL;
+     int rc = 0;
+     LDAPMod mod;
+     LDAPMod replace_mod;
+@@ -1515,8 +1516,7 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o
+     }
+ 
+     /* determine if this is a group op or single entry */
+-    slapi_search_internal_get_entry(op_to_sdn, config->groupattrs,
+-                                    &e, memberof_get_plugin_id());
++    slapi_search_get_entry(&entry_pb, op_to_sdn, config->groupattrs, &e, memberof_get_plugin_id());
+     if (!e) {
+         /* In the case of a delete, we need to worry about the
+          * missing entry being a nested group.  There's a small
+@@ -1751,7 +1751,7 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o
+ bail:
+     slapi_value_free(&to_dn_val);
+     slapi_value_free(&this_dn_val);
+-    slapi_entry_free(e);
++    slapi_search_get_entry_done(&entry_pb);
+     return rc;
+ }
+ 
+@@ -2368,6 +2368,7 @@ bail:
+ int
+ memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Value *memberdn)
+ {
++    Slapi_PBlock *pb = NULL;
+     int rc = 0;
+     Slapi_DN *sdn = 0;
+     Slapi_Entry *group_e = 0;
+@@ -2376,8 +2377,8 @@ memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Va
+ 
+     sdn = slapi_sdn_new_normdn_byref(slapi_value_get_string(groupdn));
+ 
+-    slapi_search_internal_get_entry(sdn, config->groupattrs,
+-                                    &group_e, memberof_get_plugin_id());
++    slapi_search_get_entry(&pb, sdn, config->groupattrs,
++                           &group_e, memberof_get_plugin_id());
+ 
+     if (group_e) {
+         /* See if memberdn is referred to by any of the group attributes. */
+@@ -2388,9 +2389,8 @@ memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Va
+                 break;
+             }
+         }
+-
+-        slapi_entry_free(group_e);
+     }
++    slapi_search_get_entry_done(&pb);
+ 
+     slapi_sdn_free(&sdn);
+     return rc;
+diff --git a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
+index 46a76d884..cbec2ec40 100644
+--- a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
++++ b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
+@@ -749,22 +749,22 @@ pam_passthru_get_config(Slapi_DN *bind_sdn)
+             if (pam_passthru_check_suffix(cfg, bind_sdn) == LDAP_SUCCESS) {
+                 if (cfg->slapi_filter) {
+                     /* A filter is configured, so see if the bind entry is a match. */
++                    Slapi_PBlock *entry_pb = NULL;
+                     Slapi_Entry *test_e = NULL;
+ 
+                     /* Fetch the bind entry */
+-                    slapi_search_internal_get_entry(bind_sdn, NULL, &test_e,
+-                                                    pam_passthruauth_get_plugin_identity());
++                    slapi_search_get_entry(&entry_pb, bind_sdn, NULL, &test_e,
++                                           pam_passthruauth_get_plugin_identity());
+ 
+                     /* If the entry doesn't exist, just fall through to the main server code */
+                     if (test_e) {
+                         /* Evaluate the filter. */
+                         if (LDAP_SUCCESS == slapi_filter_test_simple(test_e, cfg->slapi_filter)) {
+                             /* This is a match. */
+-                            slapi_entry_free(test_e);
++                            slapi_search_get_entry_done(&entry_pb);
+                             goto done;
+                         }
+-
+-                        slapi_entry_free(test_e);
++                        slapi_search_get_entry_done(&entry_pb);
+                     }
+                 } else {
+                     /* There is no filter to check, so this is a match. */
+diff --git a/ldap/servers/plugins/pam_passthru/pam_ptimpl.c b/ldap/servers/plugins/pam_passthru/pam_ptimpl.c
+index 7f5fb02c4..5b43f8d1f 100644
+--- a/ldap/servers/plugins/pam_passthru/pam_ptimpl.c
++++ b/ldap/servers/plugins/pam_passthru/pam_ptimpl.c
+@@ -81,11 +81,12 @@ derive_from_bind_dn(Slapi_PBlock *pb __attribute__((unused)), const Slapi_DN *bi
+ static char *
+ derive_from_bind_entry(Slapi_PBlock *pb, const Slapi_DN *bindsdn, MyStrBuf *pam_id, char *map_ident_attr, int *locked)
+ {
++	Slapi_PBlock *entry_pb = NULL;
+     Slapi_Entry *entry = NULL;
+     char *attrs[] = {NULL, NULL};
+     attrs[0] = map_ident_attr;
+-    int rc = slapi_search_internal_get_entry((Slapi_DN *)bindsdn, attrs, &entry,
+-                                             pam_passthruauth_get_plugin_identity());
++    int32_t rc = slapi_search_get_entry(&entry_pb, (Slapi_DN *)bindsdn, attrs, &entry,
++                                        pam_passthruauth_get_plugin_identity());
+ 
+     if (rc != LDAP_SUCCESS) {
+         slapi_log_err(SLAPI_LOG_ERR, PAM_PASSTHRU_PLUGIN_SUBSYSTEM,
+@@ -108,7 +109,7 @@ derive_from_bind_entry(Slapi_PBlock *pb, const Slapi_DN *bindsdn, MyStrBuf *pam_
+         init_my_str_buf(pam_id, val);
+     }
+ 
+-    slapi_entry_free(entry);
++    slapi_search_get_entry_done(&entry_pb);
+ 
+     return pam_id->str;
+ }
+diff --git a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c
+index 3d0067531..5bca823ff 100644
+--- a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c
++++ b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c
+@@ -526,6 +526,7 @@ done:
+ static int
+ pam_passthru_preop(Slapi_PBlock *pb, int modtype)
+ {
++	Slapi_PBlock *entry_pb = NULL;
+     Slapi_DN *sdn = NULL;
+     Slapi_Entry *e = NULL;
+     LDAPMod **mods;
+@@ -555,8 +556,8 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype)
+         case LDAP_CHANGETYPE_MODIFY:
+             /* Fetch the entry being modified so we can
+              * create the resulting entry for validation. */
+-            slapi_search_internal_get_entry(sdn, 0, &e,
+-                                            pam_passthruauth_get_plugin_identity());
++            slapi_search_get_entry(&entry_pb, sdn, 0, &e,
++                                   pam_passthruauth_get_plugin_identity());
+ 
+             /* If the entry doesn't exist, just bail and
+              * let the server handle it. */
+@@ -576,9 +577,6 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype)
+                     /* Don't bail here, as we need to free the entry. */
+                 }
+             }
+-
+-            /* Free the entry. */
+-            slapi_entry_free(e);
+             break;
+         case LDAP_CHANGETYPE_DELETE:
+         case LDAP_CHANGETYPE_MODDN:
+@@ -591,6 +589,7 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype)
+     }
+ 
+ bail:
++    slapi_search_get_entry_done(&entry_pb);
+     /* If we are refusing the operation, return the result to the client. */
+     if (ret) {
+         slapi_send_ldap_result(pb, ret, NULL, returntext, 0, NULL);
+diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c
+index 3b65d6b20..a25839f21 100644
+--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c
++++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c
+@@ -469,7 +469,8 @@ retry:
+          */
+         /* Get suffix */
+         Slapi_Entry *suffix = NULL;
+-        rc = slapi_search_internal_get_entry(area_sdn, NULL, &suffix, repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION));
++        Slapi_PBlock *suffix_pb = NULL;
++        rc = slapi_search_get_entry(&suffix_pb, area_sdn, NULL, &suffix, repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION));
+         if (rc) {
+             slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "repl5_tot_run -  Unable to "
+                                                            "get the suffix entry \"%s\".\n",
+@@ -517,7 +518,7 @@ retry:
+                                      LDAP_SCOPE_SUBTREE, "(parentid>=1)", NULL, 0, ctrls, NULL,
+                                      repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), OP_FLAG_BULK_IMPORT);
+         cb_data.num_entries = 0UL;
+-        slapi_entry_free(suffix);
++        slapi_search_get_entry_done(&suffix_pb);
+     } else {
+         /* Original total update */
+         /* we need to provide managedsait control so that referral entries can
+diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c
+index d7ccf0e07..e69012204 100644
+--- a/ldap/servers/plugins/uiduniq/uid.c
++++ b/ldap/servers/plugins/uiduniq/uid.c
+@@ -1254,6 +1254,7 @@ preop_modify(Slapi_PBlock *pb)
+ static int
+ preop_modrdn(Slapi_PBlock *pb)
+ {
++    Slapi_PBlock *entry_pb = NULL;
+     int result = LDAP_SUCCESS;
+     Slapi_Entry *e = NULL;
+     Slapi_Value *sv_requiredObjectClass = NULL;
+@@ -1351,7 +1352,7 @@ preop_modrdn(Slapi_PBlock *pb)
+ 
+     /* Get the entry that is being renamed so we can make a dummy copy
+      * of what it will look like after the rename. */
+-    err = slapi_search_internal_get_entry(sdn, NULL, &e, plugin_identity);
++    err = slapi_search_get_entry(&entry_pb, sdn, NULL, &e, plugin_identity);
+     if (err != LDAP_SUCCESS) {
+         result = uid_op_error(35);
+         /* We want to return a no such object error if the target doesn't exist. */
+@@ -1371,24 +1372,24 @@ preop_modrdn(Slapi_PBlock *pb)
+ 
+ 
+     /*
+-         * Check if it has the required object class
+-         */
++     * Check if it has the required object class
++     */
+     if (requiredObjectClass &&
+         !slapi_entry_attr_has_syntax_value(e, SLAPI_ATTR_OBJECTCLASS, sv_requiredObjectClass)) {
+         break;
+     }
+ 
+     /*
+-         * Find any unique attribute data in the new RDN
+-         */
++     * Find any unique attribute data in the new RDN
++     */
+     for (i = 0; attrNames && attrNames[i]; i++) {
+         err = slapi_entry_attr_find(e, attrNames[i], &attr);
+         if (!err) {
+             /*
+-                 * Passed all the requirements - this is an operation we
+-                 * need to enforce uniqueness on. Now find all parent entries
+-                 * with the marker object class, and do a search for each one.
+-                 */
++             * Passed all the requirements - this is an operation we
++             * need to enforce uniqueness on. Now find all parent entries
++             * with the marker object class, and do a search for each one.
++             */
+             if (NULL != markerObjectClass) {
+                 /* Subtree defined by location of marker object class */
+                 result = findSubtreeAndSearch(slapi_entry_get_sdn(e), attrNames, attr, NULL,
+@@ -1407,8 +1408,8 @@ preop_modrdn(Slapi_PBlock *pb)
+     END
+         /* Clean-up */
+         slapi_value_free(&sv_requiredObjectClass);
+-    if (e)
+-        slapi_entry_free(e);
++
++    slapi_search_get_entry_done(&entry_pb);
+ 
+     if (result) {
+         slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name,
+diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
+index 65f23363a..a70f40316 100644
+--- a/ldap/servers/slapd/daemon.c
++++ b/ldap/servers/slapd/daemon.c
+@@ -1916,18 +1916,13 @@ slapd_bind_local_user(Connection *conn)
+             char *root_dn = config_get_ldapi_root_dn();
+ 
+             if (root_dn) {
++                Slapi_PBlock *entry_pb = NULL;
+                 Slapi_DN *edn = slapi_sdn_new_dn_byref(
+                     slapi_dn_normalize(root_dn));
+                 Slapi_Entry *e = 0;
+ 
+                 /* root might be locked too! :) */
+-                ret = slapi_search_internal_get_entry(
+-                    edn, 0,
+-                    &e,
+-                    (void *)plugin_get_default_component_id()
+-
+-                        );
+-
++                ret = slapi_search_get_entry(&entry_pb, edn, 0, &e, (void *)plugin_get_default_component_id());
+                 if (0 == ret && e) {
+                     ret = slapi_check_account_lock(
+                         0, /* pb not req */
+@@ -1955,7 +1950,7 @@ slapd_bind_local_user(Connection *conn)
+             root_map_free:
+                 /* root_dn consumed by bind creds set */
+                 slapi_sdn_free(&edn);
+-                slapi_entry_free(e);
++                slapi_search_get_entry_done(&entry_pb);
+                 ret = 0;
+             }
+         }
+diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
+index bbc0ab71a..259bedfff 100644
+--- a/ldap/servers/slapd/modify.c
++++ b/ldap/servers/slapd/modify.c
+@@ -592,6 +592,7 @@ modify_internal_pb(Slapi_PBlock *pb)
+ static void
+ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
+ {
++    Slapi_PBlock *entry_pb = NULL;
+     Slapi_Backend *be = NULL;
+     Slapi_Entry *pse;
+     Slapi_Entry *referral;
+@@ -723,7 +724,7 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
+      * 2. If yes, then if the mods contain any passwdpolicy specific attributes.
+      * 3. If yes, then it invokes corrosponding checking function.
+      */
+-    if (!repl_op && !internal_op && normdn && (e = get_entry(pb, normdn))) {
++    if (!repl_op && !internal_op && normdn && slapi_search_get_entry(&entry_pb, sdn, NULL, &e, NULL) == LDAP_SUCCESS) {
+         Slapi_Value target;
+         slapi_value_init(&target);
+         slapi_value_set_string(&target, "passwordpolicy");
+@@ -1072,7 +1073,7 @@ free_and_return : {
+     slapi_entry_free(epre);
+     slapi_entry_free(epost);
+ }
+-    slapi_entry_free(e);
++    slapi_search_get_entry_done(&entry_pb);
+ 
+     if (be)
+         slapi_be_Unlock(be);
+@@ -1202,12 +1203,13 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M
+     if (!internal_op) {
+         /* slapi_acl_check_mods needs an array of LDAPMods, but
+          * we're really only interested in the one password mod. */
++        Slapi_PBlock *entry_pb = NULL;
+         LDAPMod *mods[2];
+         mods[0] = mod;
+         mods[1] = NULL;
+ 
+         /* We need to actually fetch the target here to use for ACI checking. */
+-        slapi_search_internal_get_entry(&sdn, NULL, &e, (void *)plugin_get_default_component_id());
++        slapi_search_get_entry(&entry_pb, &sdn, NULL, &e, NULL);
+ 
+         /* Create a bogus entry with just the target dn if we were unable to
+          * find the actual entry.  This will only be used for checking the ACIs. */
+@@ -1238,9 +1240,12 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M
+             }
+             send_ldap_result(pb, res, NULL, errtxt, 0, NULL);
+             slapi_ch_free_string(&errtxt);
++            slapi_search_get_entry_done(&entry_pb);
+             rc = -1;
+             goto done;
+         }
++        /* done with slapi entry e */
++        slapi_search_get_entry_done(&entry_pb);
+ 
+         /*
+          * If this mod is being performed by a password administrator/rootDN,
+@@ -1353,7 +1358,6 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M
+     valuearray_free(&values);
+ 
+ done:
+-    slapi_entry_free(e);
+     slapi_sdn_done(&sdn);
+     slapi_ch_free_string(&proxydn);
+     slapi_ch_free_string(&proxystr);
+diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c
+index 9da266b61..a140e7988 100644
+--- a/ldap/servers/slapd/plugin_internal_op.c
++++ b/ldap/servers/slapd/plugin_internal_op.c
+@@ -882,3 +882,51 @@ slapi_search_internal_get_entry(Slapi_DN *dn, char **attrs, Slapi_Entry **ret_en
+     int_search_pb = NULL;
+     return rc;
+ }
++
++int32_t
++slapi_search_get_entry(Slapi_PBlock **pb, Slapi_DN *dn, char **attrs, Slapi_Entry **ret_entry, void *component_identity)
++{
++    Slapi_Entry **entries = NULL;
++    int32_t rc = 0;
++    void *component = component_identity;
++
++    if (ret_entry) {
++        *ret_entry = NULL;
++    }
++
++    if (component == NULL) {
++        component = (void *)plugin_get_default_component_id();
++    }
++
++    if (*pb == NULL) {
++        *pb = slapi_pblock_new();
++    }
++    slapi_search_internal_set_pb(*pb, slapi_sdn_get_dn(dn), LDAP_SCOPE_BASE,
++        "(|(objectclass=*)(objectclass=ldapsubentry))",
++        attrs, 0, NULL, NULL, component, 0 );
++    slapi_search_internal_pb(*pb);
++    slapi_pblock_get(*pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
++    if (LDAP_SUCCESS == rc) {
++        slapi_pblock_get(*pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
++        if (NULL != entries && NULL != entries[0]) {
++            /* Only need to dup the entry if the caller passed ret_entry in. */
++            if (ret_entry) {
++                *ret_entry = entries[0];
++            }
++        } else {
++            rc = LDAP_NO_SUCH_OBJECT;
++        }
++    }
++
++    return rc;
++}
++
++void
++slapi_search_get_entry_done(Slapi_PBlock **pb)
++{
++    if (pb && *pb) {
++        slapi_free_search_results_internal(*pb);
++        slapi_pblock_destroy(*pb);
++        *pb = NULL;
++    }
++}
+diff --git a/ldap/servers/slapd/resourcelimit.c b/ldap/servers/slapd/resourcelimit.c
+index 705344c84..9c2619716 100644
+--- a/ldap/servers/slapd/resourcelimit.c
++++ b/ldap/servers/slapd/resourcelimit.c
+@@ -305,22 +305,17 @@ reslimit_get_ext(Slapi_Connection *conn, const char *logname, SLAPIResLimitConnD
+ int
+ reslimit_update_from_dn(Slapi_Connection *conn, Slapi_DN *dn)
+ {
+-    Slapi_Entry *e;
++    Slapi_PBlock *pb = NULL;
++    Slapi_Entry *e = NULL;
+     int rc;
+ 
+-    e = NULL;
+     if (dn != NULL) {
+-
+         char **attrs = reslimit_get_registered_attributes();
+-        (void)slapi_search_internal_get_entry(dn, attrs, &e, reslimit_componentid);
++        slapi_search_get_entry(&pb, dn, attrs, &e, reslimit_componentid);
+         charray_free(attrs);
+     }
+-
+     rc = reslimit_update_from_entry(conn, e);
+-
+-    if (NULL != e) {
+-        slapi_entry_free(e);
+-    }
++    slapi_search_get_entry_done(&pb);
+ 
+     return (rc);
+ }
+diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c
+index d44b03b0e..bf7e59f75 100644
+--- a/ldap/servers/slapd/schema.c
++++ b/ldap/servers/slapd/schema.c
+@@ -341,6 +341,7 @@ schema_policy_add_action(Slapi_Entry *entry, char *attrName, schema_item_t **lis
+ static void
+ schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica)
+ {
++    Slapi_PBlock *pb = NULL;
+     Slapi_DN sdn;
+     Slapi_Entry *entry = NULL;
+     schema_item_t *schema_item, *next;
+@@ -369,8 +370,7 @@ schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica)
+ 
+     /* Load the replication policy of the schema  */
+     slapi_sdn_init_dn_byref(&sdn, dn);
+-    if (slapi_search_internal_get_entry(&sdn, NULL, &entry, plugin_get_default_component_id()) == LDAP_SUCCESS) {
+-
++    if (slapi_search_get_entry(&pb, &sdn, NULL, &entry, plugin_get_default_component_id()) == LDAP_SUCCESS) {
+         /* fill the policies (accept/reject) regarding objectclass */
+         schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_OBJECTCLASS_ACCEPT, &replica->objectclasses);
+         schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_OBJECTCLASS_REJECT, &replica->objectclasses);
+@@ -378,9 +378,8 @@ schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica)
+         /* fill the policies (accept/reject) regarding attribute */
+         schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_ATTRIBUTE_ACCEPT, &replica->attributes);
+         schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_ATTRIBUTE_REJECT, &replica->attributes);
+-
+-        slapi_entry_free(entry);
+     }
++    slapi_search_get_entry_done(&pb);
+     slapi_sdn_done(&sdn);
+ }
+ 
+diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
+index 0e3857068..be1e52e4d 100644
+--- a/ldap/servers/slapd/slapi-plugin.h
++++ b/ldap/servers/slapd/slapi-plugin.h
+@@ -5972,7 +5972,7 @@ void slapi_seq_internal_set_pb(Slapi_PBlock *pb, char *ibase, int type, char *at
+ 
+ /*
+  * slapi_search_internal_get_entry() finds an entry given a dn.  It returns
+- * an LDAP error code (LDAP_SUCCESS if all goes well).
++ * an LDAP error code (LDAP_SUCCESS if all goes well).  Caller must free ret_entry
+  */
+ int slapi_search_internal_get_entry(Slapi_DN *dn, char **attrlist, Slapi_Entry **ret_entry, void *caller_identity);
+ 
+@@ -8296,6 +8296,27 @@ uint64_t slapi_atomic_decr_64(uint64_t *ptr, int memorder);
+ /* helper function */
+ const char * slapi_fetch_attr(Slapi_Entry *e, const char *attrname, char *default_val);
+ 
++/**
++ * Get a Slapi_Entry via an internal search.  The caller then needs to call
++ * slapi_get_entry_done() to free any resources allocated to get the entry
++ *
++ * \param pb - slapi_pblock pointer (the function will allocate if necessary)
++ * \param dn - Slapi_DN of the entry to retrieve
++ * \param attrs - char list of attributes to get
++ * \param ret_entry - pointer to a Slapi_entry wer the returned entry is stored
++ * \param component_identity - plugin component
++ *
++ * \return - ldap result code
++ */
++int32_t slapi_search_get_entry(Slapi_PBlock **pb, Slapi_DN *dn, char **attrs, Slapi_Entry **ret_entry, void *component_identity);
++
++/**
++ * Free the resources allocated by slapi_search_get_entry()
++ *
++ * \param pb - slapi_pblock pointer
++ */
++void slapi_search_get_entry_done(Slapi_PBlock **pb);
++
+ #ifdef __cplusplus
+ }
+ #endif
+-- 
+2.26.2
+
diff --git a/SOURCES/0005-Issue-51086-Improve-dscreate-instance-name-validatio.patch b/SOURCES/0005-Issue-51086-Improve-dscreate-instance-name-validatio.patch
new file mode 100644
index 0000000..f3d3571
--- /dev/null
+++ b/SOURCES/0005-Issue-51086-Improve-dscreate-instance-name-validatio.patch
@@ -0,0 +1,96 @@
+From 9710c327b3034d7a9d112306961c9cec98083df5 Mon Sep 17 00:00:00 2001
+From: Simon Pichugin <simon.pichugin@gmail.com>
+Date: Mon, 18 May 2020 22:33:45 +0200
+Subject: [PATCH 05/12] Issue 51086 - Improve dscreate instance name validation
+
+Bug Description: When creating an instance using dscreate, it doesn't enforce
+max name length. The ldapi socket name contains name of the instance. If it's
+too long, we can hit limits, and the file name will be truncated. Also, it
+doesn't sanitize the instance name, it's possible to create an instance with
+non-ascii symbols in its name.
+
+Fix Description: Add more checks to 'dscreate from-file' installation.
+Add a limitation for nsslapd-ldapifilepath string lenght because it is
+limited by sizeof((*ports_info.i_listenaddr)->local.path)) it is copied to.
+
+https://pagure.io/389-ds-base/issue/51086
+
+Reviewed by: firstyear, mreynolds (Thanks!)
+---
+ ldap/servers/slapd/libglobs.c       | 12 ++++++++++++
+ src/cockpit/389-console/src/ds.jsx  |  8 ++++++--
+ src/lib389/lib389/instance/setup.py |  9 +++++++++
+ 3 files changed, 27 insertions(+), 2 deletions(-)
+
+diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
+index 0d3d9a924..fbf90d92d 100644
+--- a/ldap/servers/slapd/libglobs.c
++++ b/ldap/servers/slapd/libglobs.c
+@@ -2390,11 +2390,23 @@ config_set_ldapi_filename(const char *attrname, char *value, char *errorbuf, int
+ {
+     int retVal = LDAP_SUCCESS;
+     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
++    /*
++     * LDAPI file path length is limited by sizeof((*ports_info.i_listenaddr)->local.path))
++     * which is set in main.c inside of "#if defined(ENABLE_LDAPI)" block
++     * ports_info.i_listenaddr is sizeof(PRNetAddr) and our required sizes is 8 bytes less
++     */
++    size_t result_size = sizeof(PRNetAddr) - 8;
+ 
+     if (config_value_is_null(attrname, value, errorbuf, 0)) {
+         return LDAP_OPERATIONS_ERROR;
+     }
+ 
++    if (strlen(value) >= result_size) {
++        slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "%s: \"%s\" is invalid, its length must be less than %d",
++                              attrname, value, result_size);
++        return LDAP_OPERATIONS_ERROR;
++    }
++
+     if (apply) {
+         CFG_LOCK_WRITE(slapdFrontendConfig);
+ 
+diff --git a/src/cockpit/389-console/src/ds.jsx b/src/cockpit/389-console/src/ds.jsx
+index 90d9e5abd..53aa5cb79 100644
+--- a/src/cockpit/389-console/src/ds.jsx
++++ b/src/cockpit/389-console/src/ds.jsx
+@@ -793,10 +793,14 @@ class CreateInstanceModal extends React.Component {
+             return;
+         }
+         newServerId = newServerId.replace(/^slapd-/i, ""); // strip "slapd-"
+-        if (newServerId.length > 128) {
++        if (newServerId === "admin") {
++            addNotification("warning", "Instance Name 'admin' is reserved, please choose a different name");
++            return;
++        }
++        if (newServerId.length > 80) {
+             addNotification(
+                 "warning",
+-                "Instance name is too long, it must not exceed 128 characters"
++                "Instance name is too long, it must not exceed 80 characters"
+             );
+             return;
+         }
+diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
+index 803992275..f5fc5495d 100644
+--- a/src/lib389/lib389/instance/setup.py
++++ b/src/lib389/lib389/instance/setup.py
+@@ -567,6 +567,15 @@ class SetupDs(object):
+ 
+         # We need to know the prefix before we can do the instance checks
+         assert_c(slapd['instance_name'] is not None, "Configuration instance_name in section [slapd] not found")
++        assert_c(len(slapd['instance_name']) <= 80, "Server identifier should not be longer than 80 symbols")
++        assert_c(all(ord(c) < 128 for c in slapd['instance_name']), "Server identifier can not contain non ascii characters")
++        assert_c(' ' not in slapd['instance_name'], "Server identifier can not contain a space")
++        assert_c(slapd['instance_name'] != 'admin', "Server identifier \"admin\" is reserved, please choose a different identifier")
++
++        # Check that valid characters are used
++        safe = re.compile(r'^[#%:\w@_-]+$').search
++        assert_c(bool(safe(slapd['instance_name'])), "Server identifier has invalid characters, please choose a different value")
++
+         # Check if the instance exists or not.
+         # Should I move this import? I think this prevents some recursion
+         from lib389 import DirSrv
+-- 
+2.26.2
+
diff --git a/SOURCES/0006-Issue-51102-RFE-ds-replcheck-make-online-timeout-con.patch b/SOURCES/0006-Issue-51102-RFE-ds-replcheck-make-online-timeout-con.patch
new file mode 100644
index 0000000..5bb0635
--- /dev/null
+++ b/SOURCES/0006-Issue-51102-RFE-ds-replcheck-make-online-timeout-con.patch
@@ -0,0 +1,254 @@
+From c0cb15445c1434b3d317b1c06ab1a0ba8dbc6f04 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Tue, 19 May 2020 15:11:53 -0400
+Subject: [PATCH 06/12] Issue 51102 - RFE - ds-replcheck - make online timeout
+ configurable
+
+Bug Description:  When doing an online check with replicas that are very
+                  far apart the connection can time out as the hardcoded
+                  timeout is 5 seconds.
+
+Fix Description:  Change the default timeout to never timeout, and add an
+                  CLI option to specify a specific timeout.
+
+                  Also caught all the possible LDAP exceptions so we can
+                  cleanly "fail".  Fixed some python syntax issues, and
+                  improved the entry inconsistency report
+
+relates: https://pagure.io/389-ds-base/issue/51102
+
+Reviewed by: firstyear & spichugi(Thanks!)
+---
+ ldap/admin/src/scripts/ds-replcheck | 90 ++++++++++++++++++-----------
+ 1 file changed, 57 insertions(+), 33 deletions(-)
+
+diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck
+index 30bcfd65d..5bb7dfce3 100755
+--- a/ldap/admin/src/scripts/ds-replcheck
++++ b/ldap/admin/src/scripts/ds-replcheck
+@@ -1,7 +1,7 @@
+ #!/usr/bin/python3
+ 
+ # --- BEGIN COPYRIGHT BLOCK ---
+-# Copyright (C) 2018 Red Hat, Inc.
++# Copyright (C) 2020 Red Hat, Inc.
+ # All rights reserved.
+ #
+ # License: GPL (version 3 or any later version).
+@@ -21,10 +21,9 @@ import getpass
+ import signal
+ from ldif import LDIFRecordList
+ from ldap.ldapobject import SimpleLDAPObject
+-from ldap.cidict import cidict
+ from ldap.controls import SimplePagedResultsControl
+ from lib389._entry import Entry
+-from lib389.utils import ensure_str, ensure_list_str, ensure_int
++from lib389.utils import ensure_list_str, ensure_int
+ 
+ VERSION = "2.0"
+ RUV_FILTER = '(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))'
+@@ -185,11 +184,11 @@ def report_conflict(entry, attr, opts):
+     report = True
+ 
+     if 'nscpentrywsi' in entry.data:
+-        found = False
+         for val in entry.data['nscpentrywsi']:
+             if val.lower().startswith(attr + ';'):
+                 if (opts['starttime'] - extract_time(val)) <= opts['lag']:
+                     report = False
++                    break
+ 
+     return report
+ 
+@@ -321,6 +320,9 @@ def ldif_search(LDIF, dn):
+     count = 0
+     ignore_list = ['conflictcsn', 'modifytimestamp', 'modifiersname']
+     val = ""
++    attr = ""
++    state_attr = ""
++    part_dn = ""
+     result['entry'] = None
+     result['conflict'] = None
+     result['tombstone'] = False
+@@ -570,6 +572,7 @@ def cmp_entry(mentry, rentry, opts):
+                         if val.lower().startswith(mattr + ';'):
+                             if not found:
+                                 diff['diff'].append("      Master:")
++                            diff['diff'].append("        - Value:      %s" % (val.split(':')[1].lstrip()))
+                             diff['diff'].append("        - State Info: %s" % (val))
+                             diff['diff'].append("        - Date:       %s\n" % (time.ctime(extract_time(val))))
+                             found = True
+@@ -588,6 +591,7 @@ def cmp_entry(mentry, rentry, opts):
+                         if val.lower().startswith(mattr + ';'):
+                             if not found:
+                                 diff['diff'].append("      Replica:")
++                            diff['diff'].append("        - Value:      %s" % (val.split(':')[1].lstrip()))
+                             diff['diff'].append("        - State Info: %s" % (val))
+                             diff['diff'].append("        - Date:       %s\n" % (time.ctime(extract_time(val))))
+                             found = True
+@@ -654,7 +658,6 @@ def do_offline_report(opts, output_file=None):
+     rconflicts = []
+     rtombstones = 0
+     mtombstones = 0
+-    idx = 0
+ 
+     # Open LDIF files
+     try:
+@@ -926,7 +929,7 @@ def validate_suffix(ldapnode, suffix, hostname):
+     :return - True if suffix exists, otherwise False
+     """
+     try:
+-        master_basesuffix = ldapnode.search_s(suffix, ldap.SCOPE_BASE )
++        ldapnode.search_s(suffix, ldap.SCOPE_BASE)
+     except ldap.NO_SUCH_OBJECT:
+         print("Error: Failed to validate suffix in {}. {} does not exist.".format(hostname, suffix))
+         return False
+@@ -968,12 +971,12 @@ def connect_to_replicas(opts):
+     replica = SimpleLDAPObject(ruri)
+ 
+     # Set timeouts
+-    master.set_option(ldap.OPT_NETWORK_TIMEOUT,5.0)
+-    master.set_option(ldap.OPT_TIMEOUT,5.0)
+-    replica.set_option(ldap.OPT_NETWORK_TIMEOUT,5.0)
+-    replica.set_option(ldap.OPT_TIMEOUT,5.0)
++    master.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
++    master.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
++    replica.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
++    replica.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
+ 
+-    # Setup Secure Conenction
++    # Setup Secure Connection
+     if opts['certdir'] is not None:
+         # Setup Master
+         if opts['mprotocol'] != LDAPI:
+@@ -1003,7 +1006,7 @@ def connect_to_replicas(opts):
+     try:
+         master.simple_bind_s(opts['binddn'], opts['bindpw'])
+     except ldap.SERVER_DOWN as e:
+-        print("Cannot connect to %r" % muri)
++        print(f"Cannot connect to {muri} ({str(e)})")
+         sys.exit(1)
+     except ldap.LDAPError as e:
+         print("Error: Failed to authenticate to Master: ({}).  "
+@@ -1014,7 +1017,7 @@ def connect_to_replicas(opts):
+     try:
+         replica.simple_bind_s(opts['binddn'], opts['bindpw'])
+     except ldap.SERVER_DOWN as e:
+-        print("Cannot connect to %r" % ruri)
++        print(f"Cannot connect to {ruri} ({str(e)})")
+         sys.exit(1)
+     except ldap.LDAPError as e:
+         print("Error: Failed to authenticate to Replica: ({}).  "
+@@ -1218,7 +1221,6 @@ def do_online_report(opts, output_file=None):
+     """
+     m_done = False
+     r_done = False
+-    done = False
+     report = {}
+     report['diff'] = []
+     report['m_missing'] = []
+@@ -1257,15 +1259,22 @@ def do_online_report(opts, output_file=None):
+ 
+     # Read the results and start comparing
+     while not m_done or not r_done:
+-        if not m_done:
+-            m_rtype, m_rdata, m_rmsgid, m_rctrls = master.result3(master_msgid)
+-        elif not r_done:
+-            m_rdata = []
+-
+-        if not r_done:
+-            r_rtype, r_rdata, r_rmsgid, r_rctrls = replica.result3(replica_msgid)
+-        elif not m_done:
+-            r_rdata = []
++        try:
++            if not m_done:
++                m_rtype, m_rdata, m_rmsgid, m_rctrls = master.result3(master_msgid)
++            elif not r_done:
++                m_rdata = []
++        except ldap.LDAPError as e:
++            print("Error: Problem getting the results from the master: %s", str(e))
++            sys.exit(1)
++        try:
++            if not r_done:
++                r_rtype, r_rdata, r_rmsgid, r_rctrls = replica.result3(replica_msgid)
++            elif not m_done:
++                r_rdata = []
++        except ldap.LDAPError as e:
++            print("Error: Problem getting the results from the replica: %s", str(e))
++            sys.exit(1)
+ 
+         # Convert entries
+         mresult = convert_entries(m_rdata)
+@@ -1291,11 +1300,15 @@ def do_online_report(opts, output_file=None):
+                 ]
+             if m_pctrls:
+                 if m_pctrls[0].cookie:
+-                    # Copy cookie from response control to request control
+-                    req_pr_ctrl.cookie = m_pctrls[0].cookie
+-                    master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
+-                        "(|(objectclass=*)(objectclass=ldapsubentry))",
+-                        ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
++                    try:
++                        # Copy cookie from response control to request control
++                        req_pr_ctrl.cookie = m_pctrls[0].cookie
++                        master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
++                            "(|(objectclass=*)(objectclass=ldapsubentry))",
++                            ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
++                    except ldap.LDAPError as e:
++                        print("Error: Problem searching the master: %s", str(e))
++                        sys.exit(1)
+                 else:
+                     m_done = True  # No more pages available
+             else:
+@@ -1311,11 +1324,15 @@ def do_online_report(opts, output_file=None):
+ 
+             if r_pctrls:
+                 if r_pctrls[0].cookie:
+-                    # Copy cookie from response control to request control
+-                    req_pr_ctrl.cookie = r_pctrls[0].cookie
+-                    replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
+-                        "(|(objectclass=*)(objectclass=ldapsubentry))",
+-                        ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
++                    try:
++                        # Copy cookie from response control to request control
++                        req_pr_ctrl.cookie = r_pctrls[0].cookie
++                        replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
++                            "(|(objectclass=*)(objectclass=ldapsubentry))",
++                            ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
++                    except ldap.LDAPError as e:
++                        print("Error: Problem searching the replica: %s", str(e))
++                        sys.exit(1)
+                 else:
+                     r_done = True  # No more pages available
+             else:
+@@ -1426,6 +1443,9 @@ def init_online_params(args):
+         # prompt for password
+         opts['bindpw'] = getpass.getpass('Enter password: ')
+ 
++    # lastly handle the timeout
++    opts['timeout'] = int(args.timeout)
++
+     return opts
+ 
+ 
+@@ -1553,6 +1573,8 @@ def main():
+     state_parser.add_argument('-y', '--pass-file', help='A text file containing the clear text password for the bind dn', dest='pass_file', default=None)
+     state_parser.add_argument('-Z', '--cert-dir', help='The certificate database directory for secure connections',
+                               dest='certdir', default=None)
++    state_parser.add_argument('-t', '--timeout', help='The timeout for the LDAP connections.  Default is no timeout.',
++                              type=int, dest='timeout', default=-1)
+ 
+     # Online mode
+     online_parser = subparsers.add_parser('online', help="Compare two online replicas for differences")
+@@ -1577,6 +1599,8 @@ def main():
+     online_parser.add_argument('-p', '--page-size', help='The paged-search result grouping size (default 500 entries)',
+                                dest='pagesize', default=500)
+     online_parser.add_argument('-o', '--out-file', help='The output file', dest='file', default=None)
++    online_parser.add_argument('-t', '--timeout', help='The timeout for the LDAP connections.  Default is no timeout.',
++                               type=int, dest='timeout', default=-1)
+ 
+     # Offline LDIF mode
+     offline_parser = subparsers.add_parser('offline', help="Compare two replication LDIF files for differences (LDIF file generated by 'db2ldif -r')")
+-- 
+2.26.2
+
diff --git a/SOURCES/0007-Issue-51110-Fix-ASAN-ODR-warnings.patch b/SOURCES/0007-Issue-51110-Fix-ASAN-ODR-warnings.patch
new file mode 100644
index 0000000..df8423c
--- /dev/null
+++ b/SOURCES/0007-Issue-51110-Fix-ASAN-ODR-warnings.patch
@@ -0,0 +1,428 @@
+From a1cd3cf8e8b6b33ab21d5338921187a76dd9dcd0 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Fri, 22 May 2020 15:41:45 -0400
+Subject: [PATCH 07/12] Issue 51110 - Fix ASAN ODR warnings
+
+Description: Fixed ODR issues with glboal attributes which were duplicated from
+             the core server into the replication and retrocl plugins.
+
+relates: https://pagure.io/389-ds-base/issue/51110
+
+Reviewed by: firstyear(Thanks!)
+---
+ ldap/servers/plugins/replication/repl5.h      | 17 +++---
+ .../plugins/replication/repl_globals.c        | 17 +++---
+ ldap/servers/plugins/replication/replutil.c   | 16 +++---
+ ldap/servers/plugins/retrocl/retrocl.h        | 22 ++++----
+ ldap/servers/plugins/retrocl/retrocl_cn.c     | 12 ++---
+ ldap/servers/plugins/retrocl/retrocl_po.c     | 52 +++++++++----------
+ ldap/servers/plugins/retrocl/retrocl_trim.c   | 30 +++++------
+ 7 files changed, 82 insertions(+), 84 deletions(-)
+
+diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
+index 873dd8a16..72b7089e3 100644
+--- a/ldap/servers/plugins/replication/repl5.h
++++ b/ldap/servers/plugins/replication/repl5.h
+@@ -280,15 +280,14 @@ struct berval *NSDS90StartReplicationRequest_new(const char *protocol_oid,
+ int multimaster_extop_NSDS50ReplicationEntry(Slapi_PBlock *pb);
+ 
+ /* From repl_globals.c */
+-extern char *attr_changenumber;
+-extern char *attr_targetdn;
+-extern char *attr_changetype;
+-extern char *attr_newrdn;
+-extern char *attr_deleteoldrdn;
+-extern char *attr_changes;
+-extern char *attr_newsuperior;
+-extern char *attr_changetime;
+-extern char *attr_dataversion;
++extern char *repl_changenumber;
++extern char *repl_targetdn;
++extern char *repl_changetype;
++extern char *repl_newrdn;
++extern char *repl_deleteoldrdn;
++extern char *repl_changes;
++extern char *repl_newsuperior;
++extern char *repl_changetime;
+ extern char *attr_csn;
+ extern char *changetype_add;
+ extern char *changetype_delete;
+diff --git a/ldap/servers/plugins/replication/repl_globals.c b/ldap/servers/plugins/replication/repl_globals.c
+index 355a0ffa1..c615c77da 100644
+--- a/ldap/servers/plugins/replication/repl_globals.c
++++ b/ldap/servers/plugins/replication/repl_globals.c
+@@ -48,15 +48,14 @@ char *changetype_delete = CHANGETYPE_DELETE;
+ char *changetype_modify = CHANGETYPE_MODIFY;
+ char *changetype_modrdn = CHANGETYPE_MODRDN;
+ char *changetype_moddn = CHANGETYPE_MODDN;
+-char *attr_changenumber = ATTR_CHANGENUMBER;
+-char *attr_targetdn = ATTR_TARGETDN;
+-char *attr_changetype = ATTR_CHANGETYPE;
+-char *attr_newrdn = ATTR_NEWRDN;
+-char *attr_deleteoldrdn = ATTR_DELETEOLDRDN;
+-char *attr_changes = ATTR_CHANGES;
+-char *attr_newsuperior = ATTR_NEWSUPERIOR;
+-char *attr_changetime = ATTR_CHANGETIME;
+-char *attr_dataversion = ATTR_DATAVERSION;
++char *repl_changenumber = ATTR_CHANGENUMBER;
++char *repl_targetdn = ATTR_TARGETDN;
++char *repl_changetype = ATTR_CHANGETYPE;
++char *repl_newrdn = ATTR_NEWRDN;
++char *repl_deleteoldrdn = ATTR_DELETEOLDRDN;
++char *repl_changes = ATTR_CHANGES;
++char *repl_newsuperior = ATTR_NEWSUPERIOR;
++char *repl_changetime = ATTR_CHANGETIME;
+ char *attr_csn = ATTR_CSN;
+ char *type_copyingFrom = TYPE_COPYINGFROM;
+ char *type_copiedFrom = TYPE_COPIEDFROM;
+diff --git a/ldap/servers/plugins/replication/replutil.c b/ldap/servers/plugins/replication/replutil.c
+index de1e77880..39f821d12 100644
+--- a/ldap/servers/plugins/replication/replutil.c
++++ b/ldap/servers/plugins/replication/replutil.c
+@@ -64,14 +64,14 @@ get_cleattrs()
+ {
+     if (cleattrs[0] == NULL) {
+         cleattrs[0] = type_objectclass;
+-        cleattrs[1] = attr_changenumber;
+-        cleattrs[2] = attr_targetdn;
+-        cleattrs[3] = attr_changetype;
+-        cleattrs[4] = attr_newrdn;
+-        cleattrs[5] = attr_deleteoldrdn;
+-        cleattrs[6] = attr_changes;
+-        cleattrs[7] = attr_newsuperior;
+-        cleattrs[8] = attr_changetime;
++        cleattrs[1] = repl_changenumber;
++        cleattrs[2] = repl_targetdn;
++        cleattrs[3] = repl_changetype;
++        cleattrs[4] = repl_newrdn;
++        cleattrs[5] = repl_deleteoldrdn;
++        cleattrs[6] = repl_changes;
++        cleattrs[7] = repl_newsuperior;
++        cleattrs[8] = repl_changetime;
+         cleattrs[9] = NULL;
+     }
+     return cleattrs;
+diff --git a/ldap/servers/plugins/retrocl/retrocl.h b/ldap/servers/plugins/retrocl/retrocl.h
+index 06482a14c..2ce76fcec 100644
+--- a/ldap/servers/plugins/retrocl/retrocl.h
++++ b/ldap/servers/plugins/retrocl/retrocl.h
+@@ -94,17 +94,17 @@ extern int retrocl_nattributes;
+ extern char **retrocl_attributes;
+ extern char **retrocl_aliases;
+ 
+-extern const char *attr_changenumber;
+-extern const char *attr_targetdn;
+-extern const char *attr_changetype;
+-extern const char *attr_newrdn;
+-extern const char *attr_newsuperior;
+-extern const char *attr_deleteoldrdn;
+-extern const char *attr_changes;
+-extern const char *attr_changetime;
+-extern const char *attr_objectclass;
+-extern const char *attr_nsuniqueid;
+-extern const char *attr_isreplicated;
++extern const char *retrocl_changenumber;
++extern const char *retrocl_targetdn;
++extern const char *retrocl_changetype;
++extern const char *retrocl_newrdn;
++extern const char *retrocl_newsuperior;
++extern const char *retrocl_deleteoldrdn;
++extern const char *retrocl_changes;
++extern const char *retrocl_changetime;
++extern const char *retrocl_objectclass;
++extern const char *retrocl_nsuniqueid;
++extern const char *retrocl_isreplicated;
+ 
+ extern PRLock *retrocl_internal_lock;
+ extern Slapi_RWLock *retrocl_cn_lock;
+diff --git a/ldap/servers/plugins/retrocl/retrocl_cn.c b/ldap/servers/plugins/retrocl/retrocl_cn.c
+index 709d7a857..5fc5f586d 100644
+--- a/ldap/servers/plugins/retrocl/retrocl_cn.c
++++ b/ldap/servers/plugins/retrocl/retrocl_cn.c
+@@ -62,7 +62,7 @@ handle_cnum_entry(Slapi_Entry *e, void *callback_data)
+         Slapi_Attr *chattr = NULL;
+         sval = NULL;
+         value = NULL;
+-        if (slapi_entry_attr_find(e, attr_changenumber, &chattr) == 0) {
++        if (slapi_entry_attr_find(e, retrocl_changenumber, &chattr) == 0) {
+             slapi_attr_first_value(chattr, &sval);
+             if (NULL != sval) {
+                 value = slapi_value_get_berval(sval);
+@@ -79,7 +79,7 @@ handle_cnum_entry(Slapi_Entry *e, void *callback_data)
+         chattr = NULL;
+         sval = NULL;
+         value = NULL;
+-        if (slapi_entry_attr_find(e, attr_changetime, &chattr) == 0) {
++        if (slapi_entry_attr_find(e, retrocl_changetime, &chattr) == 0) {
+             slapi_attr_first_value(chattr, &sval);
+             if (NULL != sval) {
+                 value = slapi_value_get_berval(sval);
+@@ -134,7 +134,7 @@ retrocl_get_changenumbers(void)
+     cr.cr_time = 0;
+ 
+     slapi_seq_callback(RETROCL_CHANGELOG_DN, SLAPI_SEQ_FIRST,
+-                       (char *)attr_changenumber, /* cast away const */
++                       (char *)retrocl_changenumber, /* cast away const */
+                        NULL, NULL, 0, &cr, NULL, handle_cnum_result,
+                        handle_cnum_entry, NULL);
+ 
+@@ -144,7 +144,7 @@ retrocl_get_changenumbers(void)
+     slapi_ch_free((void **)&cr.cr_time);
+ 
+     slapi_seq_callback(RETROCL_CHANGELOG_DN, SLAPI_SEQ_LAST,
+-                       (char *)attr_changenumber, /* cast away const */
++                       (char *)retrocl_changenumber, /* cast away const */
+                        NULL, NULL, 0, &cr, NULL, handle_cnum_result,
+                        handle_cnum_entry, NULL);
+ 
+@@ -185,7 +185,7 @@ retrocl_getchangetime(int type, int *err)
+         return NO_TIME;
+     }
+     slapi_seq_callback(RETROCL_CHANGELOG_DN, type,
+-                       (char *)attr_changenumber, /* cast away const */
++                       (char *)retrocl_changenumber, /* cast away const */
+                        NULL,
+                        NULL, 0, &cr, NULL,
+                        handle_cnum_result, handle_cnum_entry, NULL);
+@@ -353,7 +353,7 @@ retrocl_update_lastchangenumber(void)
+     cr.cr_cnum = 0;
+     cr.cr_time = 0;
+     slapi_seq_callback(RETROCL_CHANGELOG_DN, SLAPI_SEQ_LAST,
+-                       (char *)attr_changenumber, /* cast away const */
++                       (char *)retrocl_changenumber, /* cast away const */
+                        NULL, NULL, 0, &cr, NULL, handle_cnum_result,
+                        handle_cnum_entry, NULL);
+ 
+diff --git a/ldap/servers/plugins/retrocl/retrocl_po.c b/ldap/servers/plugins/retrocl/retrocl_po.c
+index d2af79b31..e1488f56b 100644
+--- a/ldap/servers/plugins/retrocl/retrocl_po.c
++++ b/ldap/servers/plugins/retrocl/retrocl_po.c
+@@ -25,17 +25,17 @@ modrdn2reple(Slapi_Entry *e, const char *newrdn, int deloldrdn, LDAPMod **ldm, c
+ 
+ /******************************/
+ 
+-const char *attr_changenumber = "changenumber";
+-const char *attr_targetdn = "targetdn";
+-const char *attr_changetype = "changetype";
+-const char *attr_newrdn = "newrdn";
+-const char *attr_deleteoldrdn = "deleteoldrdn";
+-const char *attr_changes = "changes";
+-const char *attr_newsuperior = "newsuperior";
+-const char *attr_changetime = "changetime";
+-const char *attr_objectclass = "objectclass";
+-const char *attr_nsuniqueid = "nsuniqueid";
+-const char *attr_isreplicated = "isreplicated";
++const char *retrocl_changenumber = "changenumber";
++const char *retrocl_targetdn = "targetdn";
++const char *retrocl_changetype = "changetype";
++const char *retrocl_newrdn = "newrdn";
++const char *retrocl_deleteoldrdn = "deleteoldrdn";
++const char *retrocl_changes = "changes";
++const char *retrocl_newsuperior = "newsuperior";
++const char *retrocl_changetime = "changetime";
++const char *retrocl_objectclass = "objectclass";
++const char *retrocl_nsuniqueid = "nsuniqueid";
++const char *retrocl_isreplicated = "isreplicated";
+ 
+ /*
+  * Function: make_changes_string
+@@ -185,7 +185,7 @@ write_replog_db(
+                   changenum, dn);
+ 
+     /* Construct the dn of this change record */
+-    edn = slapi_ch_smprintf("%s=%lu,%s", attr_changenumber, changenum, RETROCL_CHANGELOG_DN);
++    edn = slapi_ch_smprintf("%s=%lu,%s", retrocl_changenumber, changenum, RETROCL_CHANGELOG_DN);
+ 
+     /*
+      * Create the entry struct, and fill in fields common to all types
+@@ -214,7 +214,7 @@ write_replog_db(
+             attributeAlias = attributeName;
+         }
+ 
+-        if (strcasecmp(attributeName, attr_nsuniqueid) == 0) {
++        if (strcasecmp(attributeName, retrocl_nsuniqueid) == 0) {
+             Slapi_Entry *entry = NULL;
+             const char *uniqueId = NULL;
+ 
+@@ -236,7 +236,7 @@ write_replog_db(
+ 
+             extensibleObject = 1;
+ 
+-        } else if (strcasecmp(attributeName, attr_isreplicated) == 0) {
++        } else if (strcasecmp(attributeName, retrocl_isreplicated) == 0) {
+             int isReplicated = 0;
+             char *attributeValue = NULL;
+ 
+@@ -298,17 +298,17 @@ write_replog_db(
+     sprintf(chnobuf, "%lu", changenum);
+     val.bv_val = chnobuf;
+     val.bv_len = strlen(chnobuf);
+-    slapi_entry_add_values(e, attr_changenumber, vals);
++    slapi_entry_add_values(e, retrocl_changenumber, vals);
+ 
+     /* Set the targetentrydn attribute */
+     val.bv_val = dn;
+     val.bv_len = strlen(dn);
+-    slapi_entry_add_values(e, attr_targetdn, vals);
++    slapi_entry_add_values(e, retrocl_targetdn, vals);
+ 
+     /* Set the changeTime attribute */
+     val.bv_val = format_genTime(curtime);
+     val.bv_len = strlen(val.bv_val);
+-    slapi_entry_add_values(e, attr_changetime, vals);
++    slapi_entry_add_values(e, retrocl_changetime, vals);
+     slapi_ch_free((void **)&val.bv_val);
+ 
+     /*
+@@ -344,7 +344,7 @@ write_replog_db(
+             /* Set the changetype attribute */
+             val.bv_val = "delete";
+             val.bv_len = 6;
+-            slapi_entry_add_values(e, attr_changetype, vals);
++            slapi_entry_add_values(e, retrocl_changetype, vals);
+         }
+         break;
+ 
+@@ -422,7 +422,7 @@ entry2reple(Slapi_Entry *e, Slapi_Entry *oe, int optype)
+     } else {
+         return (1);
+     }
+-    slapi_entry_add_values(e, attr_changetype, vals);
++    slapi_entry_add_values(e, retrocl_changetype, vals);
+ 
+     estr = slapi_entry2str(oe, &len);
+     p = estr;
+@@ -435,7 +435,7 @@ entry2reple(Slapi_Entry *e, Slapi_Entry *oe, int optype)
+     }
+     val.bv_val = p;
+     val.bv_len = len - (p - estr); /* length + terminating \0 */
+-    slapi_entry_add_values(e, attr_changes, vals);
++    slapi_entry_add_values(e, retrocl_changes, vals);
+     slapi_ch_free_string(&estr);
+     return 0;
+ }
+@@ -471,7 +471,7 @@ mods2reple(Slapi_Entry *e, LDAPMod **ldm)
+         if (NULL != l) {
+             val.bv_val = l->ls_buf;
+             val.bv_len = l->ls_len + 1; /* string + terminating \0 */
+-            slapi_entry_add_values(e, attr_changes, vals);
++            slapi_entry_add_values(e, retrocl_changes, vals);
+             lenstr_free(&l);
+         }
+     }
+@@ -511,12 +511,12 @@ modrdn2reple(
+ 
+     val.bv_val = "modrdn";
+     val.bv_len = 6;
+-    slapi_entry_add_values(e, attr_changetype, vals);
++    slapi_entry_add_values(e, retrocl_changetype, vals);
+ 
+     if (newrdn) {
+         val.bv_val = (char *)newrdn; /* cast away const */
+         val.bv_len = strlen(newrdn);
+-        slapi_entry_add_values(e, attr_newrdn, vals);
++        slapi_entry_add_values(e, retrocl_newrdn, vals);
+     }
+ 
+     if (deloldrdn == 0) {
+@@ -526,12 +526,12 @@ modrdn2reple(
+         val.bv_val = "TRUE";
+         val.bv_len = 4;
+     }
+-    slapi_entry_add_values(e, attr_deleteoldrdn, vals);
++    slapi_entry_add_values(e, retrocl_deleteoldrdn, vals);
+ 
+     if (newsuperior) {
+         val.bv_val = (char *)newsuperior; /* cast away const */
+         val.bv_len = strlen(newsuperior);
+-        slapi_entry_add_values(e, attr_newsuperior, vals);
++        slapi_entry_add_values(e, retrocl_newsuperior, vals);
+     }
+ 
+     if (NULL != ldm) {
+@@ -540,7 +540,7 @@ modrdn2reple(
+             if (l->ls_len) {
+                 val.bv_val = l->ls_buf;
+                 val.bv_len = l->ls_len;
+-                slapi_entry_add_values(e, attr_changes, vals);
++                slapi_entry_add_values(e, retrocl_changes, vals);
+             }
+             lenstr_free(&l);
+         }
+diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c
+index 0378eb7f6..d031dc3f8 100644
+--- a/ldap/servers/plugins/retrocl/retrocl_trim.c
++++ b/ldap/servers/plugins/retrocl/retrocl_trim.c
+@@ -49,15 +49,15 @@ static const char **
+ get_cleattrs(void)
+ {
+     if (cleattrs[0] == NULL) {
+-        cleattrs[0] = attr_objectclass;
+-        cleattrs[1] = attr_changenumber;
+-        cleattrs[2] = attr_targetdn;
+-        cleattrs[3] = attr_changetype;
+-        cleattrs[4] = attr_newrdn;
+-        cleattrs[5] = attr_deleteoldrdn;
+-        cleattrs[6] = attr_changes;
+-        cleattrs[7] = attr_newsuperior;
+-        cleattrs[8] = attr_changetime;
++        cleattrs[0] = retrocl_objectclass;
++        cleattrs[1] = retrocl_changenumber;
++        cleattrs[2] = retrocl_targetdn;
++        cleattrs[3] = retrocl_changetype;
++        cleattrs[4] = retrocl_newrdn;
++        cleattrs[5] = retrocl_deleteoldrdn;
++        cleattrs[6] = retrocl_changes;
++        cleattrs[7] = retrocl_newsuperior;
++        cleattrs[8] = retrocl_changetime;
+         cleattrs[9] = NULL;
+     }
+     return cleattrs;
+@@ -81,7 +81,7 @@ delete_changerecord(changeNumber cnum)
+     char *dnbuf;
+     int delrc;
+ 
+-    dnbuf = slapi_ch_smprintf("%s=%ld, %s", attr_changenumber, cnum,
++    dnbuf = slapi_ch_smprintf("%s=%ld, %s", retrocl_changenumber, cnum,
+                               RETROCL_CHANGELOG_DN);
+     pb = slapi_pblock_new();
+     slapi_delete_internal_set_pb(pb, dnbuf, NULL /*controls*/, NULL /* uniqueid */,
+@@ -154,7 +154,7 @@ handle_getchangetime_search(Slapi_Entry *e, void *callback_data)
+         if (NULL != e) {
+             Slapi_Value *sval = NULL;
+             const struct berval *val = NULL;
+-            rc = slapi_entry_attr_find(e, attr_changetime, &attr);
++            rc = slapi_entry_attr_find(e, retrocl_changetime, &attr);
+             /* Bug 624442: Logic checking for lack of timestamp was
+                reversed. */
+             if (0 != rc || slapi_attr_first_value(attr, &sval) == -1 ||
+@@ -174,14 +174,14 @@ handle_getchangetime_search(Slapi_Entry *e, void *callback_data)
+ /*
+  * Function: get_changetime
+  * Arguments: cnum - number of change record to retrieve
+- * Returns: Taking the attr_changetime of the 'cnum' entry,
++ * Returns: Taking the retrocl_changetime of the 'cnum' entry,
+  * it converts it into time_t (parse_localTime) and returns this time value.
+  * It returns 0 in the following cases:
+- *  - changerecord entry has not attr_changetime
++ *  - changerecord entry has not retrocl_changetime
+  *  - attr_changetime attribute has no value
+  *  - attr_changetime attribute value is empty
+  *
+- * Description: Retrieve attr_changetime ("changetime") from a changerecord whose number is "cnum".
++ * Description: Retrieve retrocl_changetime ("changetime") from a changerecord whose number is "cnum".
+  */
+ static time_t
+ get_changetime(changeNumber cnum, int *err)
+@@ -198,7 +198,7 @@ get_changetime(changeNumber cnum, int *err)
+     }
+     crtp->crt_nentries = crtp->crt_err = 0;
+     crtp->crt_time = 0;
+-    PR_snprintf(fstr, sizeof(fstr), "%s=%ld", attr_changenumber, cnum);
++    PR_snprintf(fstr, sizeof(fstr), "%s=%ld", retrocl_changenumber, cnum);
+ 
+     pb = slapi_pblock_new();
+     slapi_search_internal_set_pb(pb, RETROCL_CHANGELOG_DN,
+-- 
+2.26.2
+
diff --git a/SOURCES/0008-Issue-51095-abort-operation-if-CSN-can-not-be-genera.patch b/SOURCES/0008-Issue-51095-abort-operation-if-CSN-can-not-be-genera.patch
new file mode 100644
index 0000000..a85a4bb
--- /dev/null
+++ b/SOURCES/0008-Issue-51095-abort-operation-if-CSN-can-not-be-genera.patch
@@ -0,0 +1,466 @@
+From 8d14ff153e9335b09739438344f9c3c78a496548 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Fri, 22 May 2020 10:42:11 -0400
+Subject: [PATCH 08/12] Issue 51095 - abort operation if CSN can not be
+ generated
+
+Bug Description:  If we fail to get the system time then we were using an
+                  uninitialized timespec struct which could lead to bizarre
+                  times in CSN's.
+
+Fix description:  Check if the system time function fails, and if it does
+                  then abort the update operation.
+
+relates: https://pagure.io/389-ds-base/issue/51095
+
+Reviewed by: firstyear & tbordaz(Thanks!!)
+---
+ ldap/servers/plugins/replication/repl5.h      |  2 +-
+ .../plugins/replication/repl5_replica.c       | 33 ++++++++------
+ ldap/servers/slapd/back-ldbm/ldbm_add.c       |  8 +++-
+ ldap/servers/slapd/back-ldbm/ldbm_delete.c    |  9 +++-
+ ldap/servers/slapd/back-ldbm/ldbm_modify.c    | 10 ++++-
+ ldap/servers/slapd/back-ldbm/ldbm_modrdn.c    |  8 +++-
+ ldap/servers/slapd/csngen.c                   | 18 +++++++-
+ ldap/servers/slapd/entrywsi.c                 | 15 ++++---
+ ldap/servers/slapd/slap.h                     |  2 +-
+ ldap/servers/slapd/slapi-plugin.h             |  8 ++++
+ ldap/servers/slapd/slapi-private.h            |  5 ++-
+ ldap/servers/slapd/time.c                     | 43 +++++++++++++------
+ 12 files changed, 118 insertions(+), 43 deletions(-)
+
+diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
+index 72b7089e3..638471744 100644
+--- a/ldap/servers/plugins/replication/repl5.h
++++ b/ldap/servers/plugins/replication/repl5.h
+@@ -776,7 +776,7 @@ void replica_disable_replication(Replica *r);
+ int replica_start_agreement(Replica *r, Repl_Agmt *ra);
+ int windows_replica_start_agreement(Replica *r, Repl_Agmt *ra);
+ 
+-CSN *replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn);
++int32_t replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn, CSN **opcsn);
+ int replica_get_attr(Slapi_PBlock *pb, const char *type, void *value);
+ 
+ /* mapping tree extensions manipulation */
+diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
+index 02caa88d9..f01782330 100644
+--- a/ldap/servers/plugins/replication/repl5_replica.c
++++ b/ldap/servers/plugins/replication/repl5_replica.c
+@@ -3931,11 +3931,9 @@ windows_replica_start_agreement(Replica *r, Repl_Agmt *ra)
+  * A callback function registered as op->o_csngen_handler and
+  * called by backend ops to generate opcsn.
+  */
+-CSN *
+-replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn)
++int32_t
++replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn, CSN **opcsn)
+ {
+-    CSN *opcsn = NULL;
+-
+     Replica *replica = replica_get_replica_for_op(pb);
+     if (NULL != replica) {
+         Slapi_Operation *op;
+@@ -3946,17 +3944,26 @@ replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn)
+                 CSNGen *gen = (CSNGen *)object_get_data(gen_obj);
+                 if (NULL != gen) {
+                     /* The new CSN should be greater than the base CSN */
+-                    csngen_new_csn(gen, &opcsn, PR_FALSE /* don't notify */);
+-                    if (csn_compare(opcsn, basecsn) <= 0) {
+-                        char opcsnstr[CSN_STRSIZE], basecsnstr[CSN_STRSIZE];
++                    if (csngen_new_csn(gen, opcsn, PR_FALSE /* don't notify */) != CSN_SUCCESS) {
++                        /* Failed to generate CSN we must abort */
++                        object_release(gen_obj);
++                        return -1;
++                    }
++                    if (csn_compare(*opcsn, basecsn) <= 0) {
++                        char opcsnstr[CSN_STRSIZE];
++                        char basecsnstr[CSN_STRSIZE];
+                         char opcsn2str[CSN_STRSIZE];
+ 
+-                        csn_as_string(opcsn, PR_FALSE, opcsnstr);
++                        csn_as_string(*opcsn, PR_FALSE, opcsnstr);
+                         csn_as_string(basecsn, PR_FALSE, basecsnstr);
+-                        csn_free(&opcsn);
++                        csn_free(opcsn);
+                         csngen_adjust_time(gen, basecsn);
+-                        csngen_new_csn(gen, &opcsn, PR_FALSE /* don't notify */);
+-                        csn_as_string(opcsn, PR_FALSE, opcsn2str);
++                        if (csngen_new_csn(gen, opcsn, PR_FALSE) != CSN_SUCCESS) {
++                            /* Failed to generate CSN we must abort */
++                            object_release(gen_obj);
++                            return -1;
++                        }
++                        csn_as_string(*opcsn, PR_FALSE, opcsn2str);
+                         slapi_log_err(SLAPI_LOG_WARNING, repl_plugin_name,
+                                       "replica_generate_next_csn - "
+                                       "opcsn=%s <= basecsn=%s, adjusted opcsn=%s\n",
+@@ -3966,14 +3973,14 @@ replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn)
+                      * Insert opcsn into the csn pending list.
+                      * This is the notify effect in csngen_new_csn().
+                      */
+-                    assign_csn_callback(opcsn, (void *)replica);
++                    assign_csn_callback(*opcsn, (void *)replica);
+                 }
+                 object_release(gen_obj);
+             }
+         }
+     }
+ 
+-    return opcsn;
++    return 0;
+ }
+ 
+ /*
+diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
+index d0d88bf16..ee366c74c 100644
+--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
++++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
+@@ -645,7 +645,13 @@ ldbm_back_add(Slapi_PBlock *pb)
+                          * Current op is a user request. Opcsn will be assigned
+                          * if the dn is in an updatable replica.
+                          */
+-                        opcsn = entry_assign_operation_csn(pb, e, parententry ? parententry->ep_entry : NULL);
++                        if (entry_assign_operation_csn(pb, e, parententry ? parententry->ep_entry : NULL, &opcsn) != 0) {
++                            slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_add",
++                                    "failed to generate add CSN for entry (%s), aborting operation\n",
++                                    slapi_entry_get_dn(e));
++                            ldap_result_code = LDAP_OPERATIONS_ERROR;
++                            goto error_return;
++                        }
+                     }
+                     if (opcsn != NULL) {
+                         entry_set_csn(e, opcsn);
+diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
+index 873b5b00e..fbcb57310 100644
+--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
++++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
+@@ -464,7 +464,14 @@ replace_entry:
+                      * by entry_assign_operation_csn() if the dn is in an
+                      * updatable replica.
+                      */
+-                    opcsn = entry_assign_operation_csn ( pb, e->ep_entry, NULL );
++                    if (entry_assign_operation_csn(pb, e->ep_entry, NULL, &opcsn) != 0) {
++                        slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_delete",
++                                "failed to generate delete CSN for entry (%s), aborting operation\n",
++                                slapi_entry_get_dn(e->ep_entry));
++                        retval = -1;
++                        ldap_result_code = LDAP_OPERATIONS_ERROR;
++                        goto error_return;
++                    }
+                 }
+                 if (opcsn != NULL) {
+                     if (!is_fixup_operation) {
+diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+index b0c477e3f..e9d7e87e3 100644
+--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
++++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+@@ -598,12 +598,18 @@ ldbm_back_modify(Slapi_PBlock *pb)
+                     goto error_return;
+                 }
+                 opcsn = operation_get_csn(operation);
+-                if (NULL == opcsn && operation->o_csngen_handler) {
++                if (opcsn == NULL && operation->o_csngen_handler) {
+                     /*
+                      * Current op is a user request. Opcsn will be assigned
+                      * if the dn is in an updatable replica.
+                      */
+-                    opcsn = entry_assign_operation_csn(pb, e->ep_entry, NULL);
++                    if (entry_assign_operation_csn(pb, e->ep_entry, NULL, &opcsn) != 0) {
++                        slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_modify",
++                                "failed to generate modify CSN for entry (%s), aborting operation\n",
++                                slapi_entry_get_dn(e->ep_entry));
++                        ldap_result_code = LDAP_OPERATIONS_ERROR;
++                        goto error_return;
++                    }
+                 }
+                 if (opcsn) {
+                     entry_set_maxcsn(e->ep_entry, opcsn);
+diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
+index 26698012a..fde83c99f 100644
+--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
++++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
+@@ -543,7 +543,13 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
+                      * Current op is a user request. Opcsn will be assigned
+                      * if the dn is in an updatable replica.
+                      */
+-                    opcsn = entry_assign_operation_csn(pb, e->ep_entry, parententry ? parententry->ep_entry : NULL);
++                    if (entry_assign_operation_csn(pb, e->ep_entry, parententry ? parententry->ep_entry : NULL, &opcsn) != 0) {
++                        slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_modrdn",
++                                "failed to generate modrdn CSN for entry (%s), aborting operation\n",
++                                slapi_entry_get_dn(e->ep_entry));
++                        ldap_result_code = LDAP_OPERATIONS_ERROR;
++                        goto error_return;
++                    }
+                 }
+                 if (opcsn != NULL) {
+                     entry_set_maxcsn(e->ep_entry, opcsn);
+diff --git a/ldap/servers/slapd/csngen.c b/ldap/servers/slapd/csngen.c
+index 68dbbda8e..b08d8b25c 100644
+--- a/ldap/servers/slapd/csngen.c
++++ b/ldap/servers/slapd/csngen.c
+@@ -164,6 +164,7 @@ csngen_free(CSNGen **gen)
+ int
+ csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify)
+ {
++    struct timespec now = {0};
+     int rc = CSN_SUCCESS;
+     time_t cur_time;
+     int delta;
+@@ -179,12 +180,25 @@ csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify)
+         return CSN_MEMORY_ERROR;
+     }
+ 
+-    slapi_rwlock_wrlock(gen->lock);
++    if ((rc = slapi_clock_gettime(&now)) != 0) {
++        /* Failed to get system time, we must abort */
++        slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn",
++                "Failed to get system time (%s)\n",
++                slapd_system_strerror(rc));
++        return CSN_TIME_ERROR;
++    }
++    cur_time = now.tv_sec;
+ 
+-    cur_time = slapi_current_utc_time();
++    slapi_rwlock_wrlock(gen->lock);
+ 
+     /* check if the time should be adjusted */
+     delta = cur_time - gen->state.sampled_time;
++    if (delta > _SEC_PER_DAY || delta < (-1 * _SEC_PER_DAY)) {
++        /* We had a jump larger than a day */
++        slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn",
++                "Detected large jump in CSN time.  Delta: %d (current time: %ld  vs  previous time: %ld)\n",
++                delta, cur_time, gen->state.sampled_time);
++    }
+     if (delta > 0) {
+         rc = _csngen_adjust_local_time(gen, cur_time);
+         if (rc != CSN_SUCCESS) {
+diff --git a/ldap/servers/slapd/entrywsi.c b/ldap/servers/slapd/entrywsi.c
+index 5d1d7238a..31bf65d8e 100644
+--- a/ldap/servers/slapd/entrywsi.c
++++ b/ldap/servers/slapd/entrywsi.c
+@@ -224,13 +224,12 @@ entry_add_rdn_csn(Slapi_Entry *e, const CSN *csn)
+     slapi_rdn_free(&rdn);
+ }
+ 
+-CSN *
+-entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry)
++int32_t
++entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry, CSN **opcsn)
+ {
+     Slapi_Operation *op;
+     const CSN *basecsn = NULL;
+     const CSN *parententry_dncsn = NULL;
+-    CSN *opcsn = NULL;
+ 
+     slapi_pblock_get(pb, SLAPI_OPERATION, &op);
+ 
+@@ -252,14 +251,16 @@ entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parent
+                 basecsn = parententry_dncsn;
+             }
+         }
+-        opcsn = op->o_csngen_handler(pb, basecsn);
++        if(op->o_csngen_handler(pb, basecsn, opcsn) != 0) {
++            return -1;
++        }
+ 
+-        if (NULL != opcsn) {
+-            operation_set_csn(op, opcsn);
++        if (*opcsn) {
++            operation_set_csn(op, *opcsn);
+         }
+     }
+ 
+-    return opcsn;
++    return 0;
+ }
+ 
+ /*
+diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
+index a4cae784a..cef8c789c 100644
+--- a/ldap/servers/slapd/slap.h
++++ b/ldap/servers/slapd/slap.h
+@@ -1480,7 +1480,7 @@ struct op;
+ typedef void (*result_handler)(struct conn *, struct op *, int, char *, char *, int, struct berval **);
+ typedef int (*search_entry_handler)(Slapi_Backend *, struct conn *, struct op *, struct slapi_entry *);
+ typedef int (*search_referral_handler)(Slapi_Backend *, struct conn *, struct op *, struct berval **);
+-typedef CSN *(*csngen_handler)(Slapi_PBlock *pb, const CSN *basecsn);
++typedef int32_t *(*csngen_handler)(Slapi_PBlock *pb, const CSN *basecsn, CSN **opcsn);
+ typedef int (*replica_attr_handler)(Slapi_PBlock *pb, const char *type, void **value);
+ 
+ /*
+diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
+index be1e52e4d..834a98742 100644
+--- a/ldap/servers/slapd/slapi-plugin.h
++++ b/ldap/servers/slapd/slapi-plugin.h
+@@ -6743,6 +6743,14 @@ int slapi_reslimit_get_integer_limit(Slapi_Connection *conn, int handle, int *li
+  */
+ time_t slapi_current_time(void) __attribute__((deprecated));
+ 
++/**
++ * Get the system time and check for errors.  Return
++ *
++ * \param tp - a timespec struct where the system time is set
++ * \return result code, upon success tp is set to the system time
++ */
++int32_t slapi_clock_gettime(struct timespec *tp);
++
+ /**
+  * Returns the current system time as a hr clock relative to uptime
+  * This means the clock is not affected by timezones
+diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
+index d85ee43e5..c98c1947c 100644
+--- a/ldap/servers/slapd/slapi-private.h
++++ b/ldap/servers/slapd/slapi-private.h
+@@ -233,7 +233,8 @@ enum
+     CSN_INVALID_PARAMETER, /* invalid function argument */
+     CSN_INVALID_FORMAT,    /* invalid state format */
+     CSN_LDAP_ERROR,        /* LDAP operation failed */
+-    CSN_NSPR_ERROR         /* NSPR API failure */
++    CSN_NSPR_ERROR,        /* NSPR API failure */
++    CSN_TIME_ERROR         /* Error generating new CSN due to clock failure */
+ };
+ 
+ typedef struct csngen CSNGen;
+@@ -326,7 +327,7 @@ int slapi_entries_diff(Slapi_Entry **old_entries, Slapi_Entry **new_entries, int
+ void set_attr_to_protected_list(char *attr, int flag);
+ 
+ /* entrywsi.c */
+-CSN *entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry);
++int32_t entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry, CSN **opcsn);
+ const CSN *entry_get_maxcsn(const Slapi_Entry *entry);
+ void entry_set_maxcsn(Slapi_Entry *entry, const CSN *csn);
+ const CSN *entry_get_dncsn(const Slapi_Entry *entry);
+diff --git a/ldap/servers/slapd/time.c b/ldap/servers/slapd/time.c
+index 8048a3359..545538404 100644
+--- a/ldap/servers/slapd/time.c
++++ b/ldap/servers/slapd/time.c
+@@ -61,6 +61,25 @@ poll_current_time()
+     return 0;
+ }
+ 
++/*
++ * Check if the time function returns an error.  If so return the errno
++ */
++int32_t
++slapi_clock_gettime(struct timespec *tp)
++{
++    int32_t rc = 0;
++
++    PR_ASSERT(tp && tp->tv_nsec == 0 && tp->tv_sec == 0);
++
++    if (clock_gettime(CLOCK_REALTIME, tp) != 0) {
++        rc = errno;
++    }
++
++    PR_ASSERT(rc == 0);
++
++    return rc;
++}
++
+ time_t
+ current_time(void)
+ {
+@@ -69,7 +88,7 @@ current_time(void)
+      * but this should be removed in favour of the
+      * more accurately named slapi_current_utc_time
+      */
+-    struct timespec now;
++    struct timespec now = {0};
+     clock_gettime(CLOCK_REALTIME, &now);
+     return now.tv_sec;
+ }
+@@ -83,7 +102,7 @@ slapi_current_time(void)
+ struct timespec
+ slapi_current_rel_time_hr(void)
+ {
+-    struct timespec now;
++    struct timespec now = {0};
+     clock_gettime(CLOCK_MONOTONIC, &now);
+     return now;
+ }
+@@ -91,7 +110,7 @@ slapi_current_rel_time_hr(void)
+ struct timespec
+ slapi_current_utc_time_hr(void)
+ {
+-    struct timespec ltnow;
++    struct timespec ltnow = {0};
+     clock_gettime(CLOCK_REALTIME, &ltnow);
+     return ltnow;
+ }
+@@ -99,7 +118,7 @@ slapi_current_utc_time_hr(void)
+ time_t
+ slapi_current_utc_time(void)
+ {
+-    struct timespec ltnow;
++    struct timespec ltnow = {0};
+     clock_gettime(CLOCK_REALTIME, &ltnow);
+     return ltnow.tv_sec;
+ }
+@@ -108,8 +127,8 @@ void
+ slapi_timestamp_utc_hr(char *buf, size_t bufsize)
+ {
+     PR_ASSERT(bufsize >= SLAPI_TIMESTAMP_BUFSIZE);
+-    struct timespec ltnow;
+-    struct tm utctm;
++    struct timespec ltnow = {0};
++    struct tm utctm = {0};
+     clock_gettime(CLOCK_REALTIME, &ltnow);
+     gmtime_r(&(ltnow.tv_sec), &utctm);
+     strftime(buf, bufsize, "%Y%m%d%H%M%SZ", &utctm);
+@@ -140,7 +159,7 @@ format_localTime_log(time_t t, int initsize __attribute__((unused)), char *buf,
+ {
+ 
+     long tz;
+-    struct tm *tmsp, tms;
++    struct tm *tmsp, tms = {0};
+     char tbuf[*bufsize];
+     char sign;
+     /* make sure our buffer will be big enough. Need at least 29 */
+@@ -191,7 +210,7 @@ format_localTime_hr_log(time_t t, long nsec, int initsize __attribute__((unused)
+ {
+ 
+     long tz;
+-    struct tm *tmsp, tms;
++    struct tm *tmsp, tms = {0};
+     char tbuf[*bufsize];
+     char sign;
+     /* make sure our buffer will be big enough. Need at least 39 */
+@@ -278,7 +297,7 @@ slapi_timespec_expire_check(struct timespec *expire)
+     if (expire->tv_sec == 0 && expire->tv_nsec == 0) {
+         return TIMER_CONTINUE;
+     }
+-    struct timespec now;
++    struct timespec now = {0};
+     clock_gettime(CLOCK_MONOTONIC, &now);
+     if (now.tv_sec > expire->tv_sec ||
+         (expire->tv_sec == now.tv_sec && now.tv_sec > expire->tv_nsec)) {
+@@ -293,7 +312,7 @@ format_localTime(time_t from)
+        in the syntax of a generalizedTime, except without the time zone. */
+ {
+     char *into;
+-    struct tm t;
++    struct tm t = {0};
+ 
+     localtime_r(&from, &t);
+ 
+@@ -362,7 +381,7 @@ format_genTime(time_t from)
+        in the syntax of a generalizedTime. */
+ {
+     char *into;
+-    struct tm t;
++    struct tm t = {0};
+ 
+     gmtime_r(&from, &t);
+     into = slapi_ch_malloc(SLAPI_TIMESTAMP_BUFSIZE);
+@@ -382,7 +401,7 @@ time_t
+ read_genTime(struct berval *from)
+ {
+     struct tm t = {0};
+-    time_t retTime;
++    time_t retTime = {0};
+     time_t diffsec = 0;
+     int i, gflag = 0, havesec = 0;
+ 
+-- 
+2.26.2
+
diff --git a/SOURCES/0009-Issue-51113-Allow-using-uid-for-replication-manager-.patch b/SOURCES/0009-Issue-51113-Allow-using-uid-for-replication-manager-.patch
new file mode 100644
index 0000000..e5dbb3d
--- /dev/null
+++ b/SOURCES/0009-Issue-51113-Allow-using-uid-for-replication-manager-.patch
@@ -0,0 +1,179 @@
+From 52ce524f7672563b543e84401665765cfa72dea5 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Tue, 26 May 2020 17:03:11 -0400
+Subject: [PATCH 09/12] Issue 51113 - Allow using uid for replication manager
+ entry
+
+Bug Description:  Currently it was hardcoded to only allow "cn" as
+                  the rdn attribute for the replication manager entry.
+
+Fix description:  Allow setting the rdn attribute of the replication
+                  manager DS ldap object, and include the schema that
+                  allows "uid".
+
+relates:  https://pagure.io/389-ds-base/issue/51113
+
+Reviewed by: spichugi & firstyear(Thanks!!)
+---
+ src/lib389/lib389/cli_conf/replication.py | 53 ++++++++++++-----------
+ src/lib389/lib389/replica.py              | 11 +++--
+ 2 files changed, 35 insertions(+), 29 deletions(-)
+
+diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
+index 09cb9b435..b9bc3d291 100644
+--- a/src/lib389/lib389/cli_conf/replication.py
++++ b/src/lib389/lib389/cli_conf/replication.py
+@@ -199,19 +199,21 @@ def enable_replication(inst, basedn, log, args):
+ 
+     # Create replication manager if password was provided
+     if args.bind_dn and args.bind_passwd:
+-        cn_rdn = args.bind_dn.split(",", 1)[0]
+-        cn_val = cn_rdn.split("=", 1)[1]
+-        manager = BootstrapReplicationManager(inst, dn=args.bind_dn)
++        rdn = args.bind_dn.split(",", 1)[0]
++        rdn_attr, rdn_val = rdn.split("=", 1)
++        manager = BootstrapReplicationManager(inst, dn=args.bind_dn, rdn_attr=rdn_attr)
+         try:
+             manager.create(properties={
+-                'cn': cn_val,
++                'cn': rdn_val,
++                'uid': rdn_val,
+                 'userPassword': args.bind_passwd
+             })
+         except ldap.ALREADY_EXISTS:
+             # Already there, but could have different password.  Delete and recreate
+             manager.delete()
+             manager.create(properties={
+-                'cn': cn_val,
++                'cn': rdn_val,
++                'uid': rdn_val,
+                 'userPassword': args.bind_passwd
+             })
+         except ldap.NO_SUCH_OBJECT:
+@@ -511,22 +513,23 @@ def get_cl(inst, basedn, log, args):
+ 
+ 
+ def create_repl_manager(inst, basedn, log, args):
+-    manager_cn = "replication manager"
++    manager_name = "replication manager"
+     repl_manager_password = ""
+     repl_manager_password_confirm = ""
+ 
+     if args.name:
+-        manager_cn = args.name
+-
+-    if is_a_dn(manager_cn):
+-        # A full DN was provided, make sure it uses "cn" for the RDN
+-        if manager_cn.split("=", 1)[0].lower() != "cn":
+-            raise ValueError("Replication manager DN must use \"cn\" for the rdn attribute")
+-        manager_dn = manager_cn
+-        manager_rdn = manager_dn.split(",", 1)[0]
+-        manager_cn = manager_rdn.split("=", 1)[1]
++        manager_name = args.name
++
++    if is_a_dn(manager_name):
++        # A full DN was provided
++        manager_dn = manager_name
++        manager_rdn = manager_name.split(",", 1)[0]
++        manager_attr, manager_name = manager_rdn.split("=", 1)
++        if manager_attr.lower() not in ['cn', 'uid']:
++            raise ValueError(f'The RDN attribute "{manager_attr}" is not allowed, you must use "cn" or "uid"')
+     else:
+-        manager_dn = "cn={},cn=config".format(manager_cn)
++        manager_dn = "cn={},cn=config".format(manager_name)
++        manager_attr = "cn"
+ 
+     if args.passwd:
+         repl_manager_password = args.passwd
+@@ -544,10 +547,11 @@ def create_repl_manager(inst, basedn, log, args):
+                 repl_manager_password = ""
+                 repl_manager_password_confirm = ""
+ 
+-    manager = BootstrapReplicationManager(inst, dn=manager_dn)
++    manager = BootstrapReplicationManager(inst, dn=manager_dn, rdn_attr=manager_attr)
+     try:
+         manager.create(properties={
+-            'cn': manager_cn,
++            'cn': manager_name,
++            'uid': manager_name,
+             'userPassword': repl_manager_password
+         })
+         if args.suffix:
+@@ -564,7 +568,8 @@ def create_repl_manager(inst, basedn, log, args):
+         # Already there, but could have different password.  Delete and recreate
+         manager.delete()
+         manager.create(properties={
+-            'cn': manager_cn,
++            'cn': manager_name,
++            'uid': manager_name,
+             'userPassword': repl_manager_password
+         })
+         if args.suffix:
+@@ -954,6 +959,7 @@ def get_winsync_agmt_status(inst, basedn, log, args):
+     status = agmt.status(winsync=True, use_json=args.json)
+     log.info(status)
+ 
++
+ #
+ # Tasks
+ #
+@@ -1347,8 +1353,7 @@ def create_parser(subparsers):
+     agmt_set_parser.add_argument('--wait-async-results', help="The amount of time in milliseconds the server waits if "
+                                                               "the consumer is not ready before resending data")
+     agmt_set_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after "
+-                                                          "a consumer sends back a busy response before making another "
+-                                                          "attempt to acquire access.")
++                                 "a consumer sends back a busy response before making another attempt to acquire access.")
+     agmt_set_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.")
+     agmt_set_parser.add_argument('--flow-control-window', help="Sets the maximum number of entries and updates sent by a supplier, which are not acknowledged by the consumer.")
+     agmt_set_parser.add_argument('--flow-control-pause', help="The time in milliseconds to pause after reaching the number of entries and updates set in \"--flow-control-window\"")
+@@ -1438,8 +1443,7 @@ def create_parser(subparsers):
+     winsync_agmt_add_parser.add_argument('--subtree-pair', help="Set the subtree pair: <DS_SUBTREE>:<WINDOWS_SUBTREE>")
+     winsync_agmt_add_parser.add_argument('--conn-timeout', help="The timeout used for replicaton connections")
+     winsync_agmt_add_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after "
+-                                                          "a consumer sends back a busy response before making another "
+-                                                          "attempt to acquire access.")
++                                         "a consumer sends back a busy response before making another attempt to acquire access.")
+     winsync_agmt_add_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.")
+     winsync_agmt_add_parser.add_argument('--init', action='store_true', default=False, help="Initialize the agreement after creating it.")
+ 
+@@ -1468,8 +1472,7 @@ def create_parser(subparsers):
+     winsync_agmt_set_parser.add_argument('--subtree-pair', help="Set the subtree pair: <DS_SUBTREE>:<WINDOWS_SUBTREE>")
+     winsync_agmt_set_parser.add_argument('--conn-timeout', help="The timeout used for replicaton connections")
+     winsync_agmt_set_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after "
+-                                                          "a consumer sends back a busy response before making another "
+-                                                          "attempt to acquire access.")
++                                         "a consumer sends back a busy response before making another attempt to acquire access.")
+     winsync_agmt_set_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.")
+ 
+     # Get
+diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
+index e3fc7fe1f..f8adb3ce2 100644
+--- a/src/lib389/lib389/replica.py
++++ b/src/lib389/lib389/replica.py
+@@ -1779,15 +1779,18 @@ class BootstrapReplicationManager(DSLdapObject):
+     :type instance: lib389.DirSrv
+     :param dn: The dn to create
+     :type dn: str
++    :param rdn_attr: The attribute to use for the RDN
++    :type rdn_attr: str
+     """
+-    def __init__(self, instance, dn='cn=replication manager,cn=config'):
++    def __init__(self, instance, dn='cn=replication manager,cn=config', rdn_attr='cn'):
+         super(BootstrapReplicationManager, self).__init__(instance, dn)
+-        self._rdn_attribute = 'cn'
++        self._rdn_attribute = rdn_attr
+         self._must_attributes = ['cn', 'userPassword']
+         self._create_objectclasses = [
+             'top',
+-            'netscapeServer',
+-            'nsAccount'
++            'inetUser',  # for uid
++            'netscapeServer',  # for cn
++            'nsAccount',  # for authentication attributes
+             ]
+         if ds_is_older('1.4.0'):
+             self._create_objectclasses.remove('nsAccount')
+-- 
+2.26.2
+
diff --git a/SOURCES/0010-Issue-50931-RFE-AD-filter-rewriter-for-ObjectCategor.patch b/SOURCES/0010-Issue-50931-RFE-AD-filter-rewriter-for-ObjectCategor.patch
new file mode 100644
index 0000000..966627a
--- /dev/null
+++ b/SOURCES/0010-Issue-50931-RFE-AD-filter-rewriter-for-ObjectCategor.patch
@@ -0,0 +1,34 @@
+From ec85e986ec5710682de883f0f40f539b2f9945fa Mon Sep 17 00:00:00 2001
+From: Viktor Ashirov <vashirov@redhat.com>
+Date: Wed, 27 May 2020 15:22:18 +0200
+Subject: [PATCH 10/12] Issue 50931 - RFE AD filter rewriter for ObjectCategory
+
+Bug Description:
+ASAN build fails on RHEL due to linking issues
+
+Fix Description:
+Add missing libslapd.la for librewriters.la
+
+Relates: https://pagure.io/389-ds-base/issue/50931
+
+Reviewed by: tbordaz (Thanks!)
+---
+ Makefile.am | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Makefile.am b/Makefile.am
+index 2309f3010..0e5f04f91 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -1159,7 +1159,7 @@ librewriters_la_SOURCES = \
+ 
+ librewriters_la_LDFLAGS = $(AM_LDFLAGS)
+ librewriters_la_CPPFLAGS = $(AM_CPPFLAGS) $(REWRITERS_INCLUDES) $(DSPLUGIN_CPPFLAGS)
+-librewriters_la_LIBADD = $(NSS_LINK) $(NSPR_LINK)
++librewriters_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK)
+ 
+ #------------------------
+ # libsvrcore
+-- 
+2.26.2
+
diff --git a/SOURCES/0011-Issue-50746-Add-option-to-healthcheck-to-list-all-th.patch b/SOURCES/0011-Issue-50746-Add-option-to-healthcheck-to-list-all-th.patch
new file mode 100644
index 0000000..c63a63c
--- /dev/null
+++ b/SOURCES/0011-Issue-50746-Add-option-to-healthcheck-to-list-all-th.patch
@@ -0,0 +1,1218 @@
+From 21ed5224d63e3118a39ddd5ea438367532541a8f Mon Sep 17 00:00:00 2001
+From: Matus Honek <mhonek@redhat.com>
+Date: Mon, 2 Dec 2019 14:53:31 +0100
+Subject: [PATCH 11/12] Issue 50746 - Add option to healthcheck to list all the
+ lint reports
+
+Bug Description:
+Healthcheck lacks a way to find out what checks are available.
+
+Fix Description:
+Add dsctl healthcheck options to list available checks, known error
+codes, and ability to run cehcks selectively. The checks are rather
+hierarchically structured and in some cases matchable by patterns (by
+use of asterisk).
+
+Fixes https://pagure.io/389-ds-base/issue/50746
+
+Author: Matus Honek <mhonek@redhat.com>
+
+Review by: Mark, William, Simon (thanks for the patience!)
+
+(cherry picked from commit 4a55322c7bdb0b9ff57428ad0dc2e4d943572a69)
+---
+ src/lib389/cli/dsctl                          |   1 +
+ src/lib389/lib389/_mapped_object.py           |  34 +---
+ src/lib389/lib389/_mapped_object_lint.py      | 157 ++++++++++++++++++
+ src/lib389/lib389/backend.py                  |  13 +-
+ src/lib389/lib389/cli_ctl/health.py           | 116 +++++++++----
+ src/lib389/lib389/config.py                   |  13 +-
+ src/lib389/lib389/dseldif.py                  |  29 +---
+ src/lib389/lib389/encrypted_attributes.py     |   1 -
+ src/lib389/lib389/index.py                    |   3 -
+ src/lib389/lib389/lint.py                     | 125 ++++++++------
+ src/lib389/lib389/monitor.py                  |   5 +-
+ src/lib389/lib389/nss_ssl.py                  |  23 ++-
+ src/lib389/lib389/plugins.py                  |   5 +-
+ src/lib389/lib389/replica.py                  |  10 +-
+ .../lib389/tests/mapped_object_lint_test.py   |  78 +++++++++
+ 15 files changed, 448 insertions(+), 165 deletions(-)
+ create mode 100644 src/lib389/lib389/_mapped_object_lint.py
+ create mode 100644 src/lib389/lib389/tests/mapped_object_lint_test.py
+
+diff --git a/src/lib389/cli/dsctl b/src/lib389/cli/dsctl
+index fd9bd87c1..9deda7039 100755
+--- a/src/lib389/cli/dsctl
++++ b/src/lib389/cli/dsctl
+@@ -64,6 +64,7 @@ cli_dbgen.create_parser(subparsers)
+ 
+ argcomplete.autocomplete(parser)
+ 
++
+ # handle a control-c gracefully
+ def signal_handler(signal, frame):
+     print('\n\nExiting...')
+diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
+index ce0ebfeb8..c60837601 100644
+--- a/src/lib389/lib389/_mapped_object.py
++++ b/src/lib389/lib389/_mapped_object.py
+@@ -15,6 +15,7 @@ import json
+ from functools import partial
+ from lib389._entry import Entry
+ from lib389._constants import DIRSRV_STATE_ONLINE
++from lib389._mapped_object_lint import DSLint, DSLints
+ from lib389.utils import (
+         ensure_bytes, ensure_str, ensure_int, ensure_list_bytes, ensure_list_str,
+         ensure_list_int, display_log_value, display_log_data
+@@ -82,7 +83,7 @@ class DSLogging(object):
+             self._log.setLevel(logging.INFO)
+ 
+ 
+-class DSLdapObject(DSLogging):
++class DSLdapObject(DSLogging, DSLint):
+     """A single instance of DSLdapObjects
+ 
+     :param instance: An instance
+@@ -107,7 +108,6 @@ class DSLdapObject(DSLogging):
+         self._must_attributes = None
+         # attributes, we don't want to compare
+         self._compare_exclude = ['entryid', 'modifytimestamp', 'nsuniqueid']
+-        self._lint_functions = None
+         self._server_controls = None
+         self._client_controls = None
+         self._object_filter = '(objectClass=*)'
+@@ -985,38 +985,10 @@ class DSLdapObject(DSLogging):
+         """
+         return self._create(rdn, properties, basedn, ensure=True)
+ 
+-    def lint(self):
+-        """Override this to create a linter for a type. This means that we can detect
+-        and report common administrative errors in the server from our cli and
+-        rest tools.
+-
+-        The structure of a result is::
+-
+-          {
+-            dsle: '<identifier>'. dsle == ds lint error. Will be a code unique to
+-                                this module for the error, IE DSBLE0001.
+-            severity: '[HIGH:MEDIUM:LOW]'. severity of the error.
+-            items: '(dn,dn,dn)'. List of affected DNs or names.
+-            detail: 'msg ...'. An explination of the error.
+-            fix: 'msg ...'. Steps to resolve the error.
+-          }
+-
+-        :returns: An array of these dicts, on None if there are no errors.
+-        """
+-
+-        if not self._lint_functions:
+-            return None
+-        results = []
+-        for fn in self._lint_functions:
+-            for result in fn():
+-                if result is not None:
+-                    results.append(result)
+-        return results
+-
+ 
+ # A challenge of this, is how do we manage indexes? They have two naming attributes....
+ 
+-class DSLdapObjects(DSLogging):
++class DSLdapObjects(DSLogging, DSLints):
+     """The object represents the next idea: "Everything is an instance of something
+     that exists in this way", i.e. we unite LDAP entries by some
+     set of parameters with the object.
+diff --git a/src/lib389/lib389/_mapped_object_lint.py b/src/lib389/lib389/_mapped_object_lint.py
+new file mode 100644
+index 000000000..2d03de98f
+--- /dev/null
++++ b/src/lib389/lib389/_mapped_object_lint.py
+@@ -0,0 +1,157 @@
++from abc import ABC, abstractmethod
++from functools import partial
++from inspect import signature
++from typing import (
++    Callable,
++    List,
++    Optional,
++    Tuple,
++    Union,
++    Type,
++    Generator,
++    Any
++)
++
++
++DSLintSpec = Tuple[str, Callable]
++DSLintParsedSpec = Tuple[Optional[str], Optional[str]]
++DSLintClassSpec = Generator[DSLintSpec, None, None]
++DSLintMethodSpec = Union[str, None, Type[List]]
++DSLintResults = Generator[Any, None, None]
++
++
++class DSLint():
++    """In a super-class, create a method with name beginning with `_lint_`
++    which would yield results (as described below). Such a method will
++    then be available to the `lint()` method of the class.
++
++    `lint_list`: takes a spec and yields available lints, recursively
++    `lint`:      takes a spac and runs lints according to it, yielding results if any
++
++    `spec`: is a colon-separated string, with prefix matching a method name and suffix
++            being passed down to the method.
++
++    A class inheriting from hereby class shall implement a method named `lint_uid()` which
++    returns a pretty name of the object. This is to be used by a higher level code.
++
++    Each lint method has to have a name prefix with _lint_. It may accept an optional
++    parameter `spec` in which case:
++    - it has to accept typing.List class as a parameter, in which case it shall yield
++      all possible lint specs for that method
++    - it receives the suffix provided to the `spec` of hereby `lint` method (as mentioned above)
++
++    This means that we can detect and report common administrative errors
++    in the server from our cli and rest tools.
++
++    The structure of a result shall be:
++
++        {
++        dsle: '<identifier>'. dsle == ds lint error. Will be a code unique to
++                            this module for the error, IE DSBLE0001.
++        severity: '[HIGH:MEDIUM:LOW]'. severity of the error.
++        items: '(dn,dn,dn)'. List of affected DNs or names.
++        detail: 'msg ...'. An explination of the error.
++        fix: 'msg ...'. Steps to resolve the error.
++        }
++    """
++
++    @classmethod
++    def _dslint_fname(cls, method: Callable) -> Optional[str]:
++        """Return a pretty name for a method."""
++        if callable(method) and method.__name__.startswith('_lint_'):
++            return method.__name__[len('_lint_'):]
++        else:
++            return None
++
++    @staticmethod
++    def _dslint_parse_spec(spec: Optional[str]) -> DSLintParsedSpec:
++        """Split `spec` to prefix and suffix."""
++        wanted, *rest = spec.split(':', 1) if spec else (None, None)
++        return (wanted if wanted not in [None, '*'] else None,
++                rest[0] if rest else None)
++
++    @classmethod
++    def _dslint_make_spec(cls, method: Callable, spec: Optional[str] = None) -> str:
++        """Build a new spec from prefix (`method` name) and suffix (`spec`)."""
++        fname = cls._dslint_fname(method)
++        return f'{fname}:{spec}' if spec else fname
++
++    def lint_list(self, spec: Optional[str] = None) -> DSLintClassSpec:
++        """Yield specs the object provides.
++
++        This yields from each lint method yielding all specs it can provide.
++        """
++
++        assert hasattr(self, 'lint_uid')
++
++        # Find _lint_ methods
++        # NOTE: There is a caveat: don't you dare try to getattr on a @property, or
++        #       you get it executed. That's why the following line's complexity.
++        fs = [getattr(self, f) for f in dir(self)
++              if f.startswith('_lint_') and self._dslint_fname(getattr(self, f))]
++
++        # Filter acording to the `spec`
++        wanted, rest = self._dslint_parse_spec(spec)
++        if wanted:
++            try:
++                fs = [next(filter(lambda f: self._dslint_fname(f) == wanted, fs))]
++            except StopIteration:
++                raise ValueError('there is no such lint function')
++
++        # Yield known specs
++        for f in fs:
++            fspec_t = signature(f).parameters.get('spec', None)
++            if fspec_t:
++                assert fspec_t.annotation == DSLintMethodSpec
++                for fspec in [rest] if rest else f(spec=List):
++                    yield self._dslint_make_spec(f, fspec), partial(f, spec=fspec)
++            else:
++                yield self._dslint_make_spec(f, rest), f
++
++    def lint(self, spec: DSLintMethodSpec = None) -> DSLintResults:
++        """Lint the object according to the `spec`."""
++
++        if spec == List:
++            yield from self.lint_list()
++        else:
++            for fn, f in self.lint_list(spec):
++                yield from f()
++
++
++class DSLints():
++    """This is a meta class to provide lint functionality to classes that provide
++    method `list` which returns list of objects that inherit from DSLint.
++
++    Calling `lint` or `lint_list` method yields from respective object's methods.
++
++    The `spec` is a colon-separated string. Its prefix matches the respective object's
++    `lint_uid` (or all when asterisk); the suffix is passed down to the respective
++    object's method.
++    """
++
++    def lint_list(self, spec: Optional[str] = None) -> DSLintClassSpec:
++        """Yield specs the objects returned by `list` method provide."""
++
++        assert hasattr(self, 'list')
++
++        # Filter acording to the `spec`
++        wanted, rest_spec = DSLint._dslint_parse_spec(spec)
++        if wanted in [None, '*']:
++            clss = self.list()
++        else:
++            clss = (cls for cls in self.list() if cls.lint_uid() == wanted)
++
++        # Yield known specs
++        for cls in clss:
++            for fn, f in cls.lint_list(spec=rest_spec):
++                yield (f'{cls.lint_uid()}:{fn}',
++                       partial(f, rest_spec) if rest_spec else f)
++
++    def lint(self, spec: DSLintMethodSpec = None) -> DSLintResults:
++        """Lint the objects returned by `list` method according to the `spec`."""
++
++        if spec == List:
++            yield from self.lint_list()
++        else:
++            for obj in self.list():
++                yield from obj.lint()
+diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
+index 4f752f414..8863ad1a8 100644
+--- a/src/lib389/lib389/backend.py
++++ b/src/lib389/lib389/backend.py
+@@ -393,6 +393,10 @@ class BackendLegacy(object):
+         replace = [(ldap.MOD_REPLACE, 'nsslapd-require-index', 'on')]
+         self.modify_s(dn, replace)
+ 
++    @classmethod
++    def lint_uid(cls):
++        return 'backends'
++
+ 
+ class Backend(DSLdapObject):
+     """Backend DSLdapObject with:
+@@ -413,10 +417,12 @@ class Backend(DSLdapObject):
+         self._must_attributes = ['nsslapd-suffix', 'cn']
+         self._create_objectclasses = ['top', 'extensibleObject', BACKEND_OBJECTCLASS_VALUE]
+         self._protected = False
+-        self._lint_functions = [self._lint_mappingtree, self._lint_search, self._lint_virt_attrs]
+         # Check if a mapping tree for this suffix exists.
+         self._mts = MappingTrees(self._instance)
+ 
++    def lint_uid(self):
++        return self.get_attr_val_utf8_l('cn').lower()
++
+     def _lint_virt_attrs(self):
+         """Check if any virtual attribute are incorrectly indexed"""
+         indexes = self.get_indexes()
+@@ -497,7 +503,6 @@ class Backend(DSLdapObject):
+             result = DSBLE0001
+             result['items'] = [bename, ]
+             yield result
+-        return None
+ 
+     def create_sample_entries(self, version):
+         """Creates sample entries under nsslapd-suffix value
+@@ -848,6 +853,10 @@ class Backends(DSLdapObjects):
+         self._childobject = Backend
+         self._basedn = DN_LDBM
+ 
++    @classmethod
++    def lint_uid(cls):
++        return 'backends'
++
+     def import_ldif(self, be_name, ldifs, chunk_size=None, encrypted=False, gen_uniq_id=None, only_core=False,
+                     include_suffixes=None, exclude_suffixes=None):
+         """Do an import of the suffix"""
+diff --git a/src/lib389/lib389/cli_ctl/health.py b/src/lib389/lib389/cli_ctl/health.py
+index 3d15ad85e..6333a753a 100644
+--- a/src/lib389/lib389/cli_ctl/health.py
++++ b/src/lib389/lib389/cli_ctl/health.py
+@@ -7,6 +7,9 @@
+ # --- END COPYRIGHT BLOCK ---
+ 
+ import json
++import re
++from lib389._mapped_object import DSLdapObjects
++from lib389._mapped_object_lint import DSLint
+ from lib389.cli_base import connect_instance, disconnect_instance
+ from lib389.cli_base.dsrc import dsrc_to_ldap, dsrc_arg_concat
+ from lib389.backend import Backends
+@@ -15,17 +18,17 @@ from lib389.monitor import MonitorDiskSpace
+ from lib389.replica import Replica, Changelog5
+ from lib389.nss_ssl import NssSsl
+ from lib389.dseldif import FSChecks, DSEldif
++from lib389 import lint
+ from lib389 import plugins
+ from lib389._constants import DSRC_HOME
++from functools import partial
++from typing import Iterable
+ 
+-# These get all instances, then check them all.
+-CHECK_MANY_OBJECTS = [
+-    Backends,
+-]
+ 
+ # These get single instances and check them.
+ CHECK_OBJECTS = [
+     Config,
++    Backends,
+     Encryption,
+     FSChecks,
+     plugins.ReferentialIntegrityPlugin,
+@@ -52,44 +55,51 @@ def _format_check_output(log, result, idx):
+     log.info(result['fix'])
+ 
+ 
+-def health_check_run(inst, log, args):
+-    """Connect to the local server using LDAPI, and perform various health checks
+-    """
++def _list_targets(inst):
++    for c in CHECK_OBJECTS:
++        o = c(inst)
++        yield o.lint_uid(), o
++
++
++def _list_errors(log):
++    for r in map(partial(getattr, lint),
++                 filter(partial(re.match, r'^DS'),
++                        dir(lint))):
++        log.info(f"{r['dsle']} :: {r['description']}")
+ 
+-    # update the args for connect_instance()
+-    args.basedn = None
+-    args.binddn = None
+-    args.bindpw = None
+-    args.starttls = None
+-    args.pwdfile = None
+-    args.prompt = False
+-    dsrc_inst = dsrc_to_ldap(DSRC_HOME, args.instance, log.getChild('dsrc'))
+-    dsrc_inst = dsrc_arg_concat(args, dsrc_inst)
+-    try:
+-        inst = connect_instance(dsrc_inst=dsrc_inst, verbose=args.verbose, args=args)
+-    except Exception as e:
+-        raise ValueError('Failed to connect to Directory Server instance: ' + str(e))
+ 
++def _list_checks(inst, specs: Iterable[str]):
++    o_uids = dict(_list_targets(inst))
++    for s in specs:
++        wanted, rest = DSLint._dslint_parse_spec(s)
++        if wanted == '*':
++            raise ValueError('Unexpected spec selector asterisk')
++
++        if wanted in o_uids:
++            for l in o_uids[wanted].lint_list(rest):
++                yield o_uids[wanted], l
++        else:
++            raise ValueError('No such object specifier')
++
++
++def _print_checks(inst, specs: Iterable[str]) -> None:
++    for o, s in _list_checks(inst, specs):
++        print(f'{o.lint_uid()}:{s[0]}')
++
++
++def _run(inst, log, args, checks):
+     if not args.json:
+         log.info("Beginning lint report, this could take a while ...")
++
+     report = []
+-    for lo in CHECK_MANY_OBJECTS:
++    for o, s in checks:
+         if not args.json:
+-            log.info("Checking %s ..." % lo.__name__)
+-        lo_inst = lo(inst)
+-        for clo in lo_inst.list():
+-            result = clo.lint()
+-            if result is not None:
+-                report += result
+-    for lo in CHECK_OBJECTS:
+-        if not args.json:
+-            log.info("Checking %s ..." % lo.__name__)
+-        lo_inst = lo(inst)
+-        result = lo_inst.lint()
+-        if result is not None:
+-            report += result
++            log.info(f"Checking {o.lint_uid()}:{s[0]} ...")
++        report += o.lint(s[0]) or []
++
+     if not args.json:
+         log.info("Healthcheck complete.")
++
+     count = len(report)
+     if count == 0:
+         if not args.json:
+@@ -110,6 +120,37 @@ def health_check_run(inst, log, args):
+         else:
+             log.info(json.dumps(report, indent=4))
+ 
++
++def health_check_run(inst, log, args):
++    """Connect to the local server using LDAPI, and perform various health checks
++    """
++
++    if args.list_errors:
++        _list_errors(log)
++        return
++
++    # update the args for connect_instance()
++    args.basedn = None
++    args.binddn = None
++    args.bindpw = None
++    args.starttls = None
++    args.pwdfile = None
++    args.prompt = False
++    dsrc_inst = dsrc_to_ldap(DSRC_HOME, args.instance, log.getChild('dsrc'))
++    dsrc_inst = dsrc_arg_concat(args, dsrc_inst)
++    try:
++        inst = connect_instance(dsrc_inst=dsrc_inst, verbose=args.verbose, args=args)
++    except Exception as e:
++        raise ValueError('Failed to connect to Directory Server instance: ' + str(e))
++
++    checks = args.check or dict(_list_targets(inst)).keys()
++
++    if args.list_checks or args.dry_run:
++        _print_checks(inst, checks)
++        return
++
++    _run(inst, log, args, _list_checks(inst, checks))
++
+     disconnect_instance(inst)
+ 
+ 
+@@ -120,4 +161,9 @@ def create_parser(subparsers):
+         "remote Directory Server as this tool needs access to local resources, "
+         "otherwise the report may be inaccurate.")
+     run_healthcheck_parser.set_defaults(func=health_check_run)
+-
++    run_healthcheck_parser.add_argument('--list-checks', action='store_true', help='List of known checks')
++    run_healthcheck_parser.add_argument('--list-errors', action='store_true', help='List of known error codes')
++    run_healthcheck_parser.add_argument('--dry-run', action='store_true', help='Do not execute the actual check, only list what would be done')
++    run_healthcheck_parser.add_argument('--check', nargs='+', default=None,
++                                        help='Areas to check. These can be obtained by --list-checks. Every element on the left of the colon (:)'
++                                             ' may be replaced by an asterisk if multiple options on the right are available.')
+diff --git a/src/lib389/lib389/config.py b/src/lib389/lib389/config.py
+index a29d0244c..aa4c92beb 100644
+--- a/src/lib389/lib389/config.py
++++ b/src/lib389/lib389/config.py
+@@ -54,7 +54,6 @@ class Config(DSLdapObject):
+         ]
+         self._compare_exclude  = self._compare_exclude + config_compare_exclude
+         self._rdn_attribute = 'cn'
+-        self._lint_functions = [self._lint_hr_timestamp, self._lint_passwordscheme]
+ 
+     @property
+     def dn(self):
+@@ -197,6 +196,10 @@ class Config(DSLdapObject):
+         fields = 'nsslapd-security nsslapd-ssl-check-hostname'.split()
+         return self._instance.getEntry(DN_CONFIG, attrlist=fields)
+ 
++    @classmethod
++    def lint_uid(cls):
++        return 'config'
++
+     def _lint_hr_timestamp(self):
+         hr_timestamp = self.get_attr_val('nsslapd-logging-hr-timestamps-enabled')
+         if ensure_bytes('on') != hr_timestamp:
+@@ -242,20 +245,22 @@ class Encryption(DSLdapObject):
+         self._rdn_attribute = 'cn'
+         self._must_attributes = ['cn']
+         self._protected = True
+-        self._lint_functions = [self._lint_check_tls_version]
+ 
+     def create(self, rdn=None, properties={'cn': 'encryption', 'nsSSLClientAuth': 'allowed'}):
+         if rdn is not None:
+             self._log.debug("dn on cn=encryption is not None. This is a mistake.")
+         super(Encryption, self).create(properties=properties)
+ 
++    @classmethod
++    def lint_uid(cls):
++        return 'encryption'
++
+     def _lint_check_tls_version(self):
+         tls_min = self.get_attr_val('sslVersionMin')
+         if tls_min is not None and tls_min < ensure_bytes('TLS1.1'):
+             report = copy.deepcopy(DSELE0001)
+             report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid)
+             yield report
+-        yield None
+ 
+     @property
+     def ciphers(self):
+@@ -487,7 +492,6 @@ class LDBMConfig(DSLdapObject):
+         self._dn = DN_CONFIG_LDBM
+         # config_compare_exclude = []
+         self._rdn_attribute = 'cn'
+-        self._lint_functions = []
+         self._protected = True
+ 
+ 
+@@ -506,5 +510,4 @@ class BDB_LDBMConfig(DSLdapObject):
+         self._dn = DN_CONFIG_LDBM_BDB
+         self._config_compare_exclude = []
+         self._rdn_attribute = 'cn'
+-        self._lint_functions = []
+         self._protected = True
+diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py
+index 5378e6ee9..96c9af9d1 100644
+--- a/src/lib389/lib389/dseldif.py
++++ b/src/lib389/lib389/dseldif.py
+@@ -16,6 +16,7 @@ from datetime import timedelta
+ from stat import ST_MODE
+ # from lib389.utils import print_nice_time
+ from lib389.paths import Paths
++from lib389._mapped_object_lint import DSLint
+ from lib389.lint import (
+     DSPERMLE0001,
+     DSPERMLE0002,
+@@ -25,7 +26,7 @@ from lib389.lint import (
+ )
+ 
+ 
+-class DSEldif(object):
++class DSEldif(DSLint):
+     """A class for working with dse.ldif file
+ 
+     :param instance: An instance
+@@ -58,15 +59,10 @@ class DSEldif(object):
+                         processed_line = line
+                 else:
+                     processed_line = processed_line[:-1] + line[1:]
+-        self._lint_functions = [self._lint_nsstate]
+ 
+-    def lint(self):
+-        results = []
+-        for fn in self._lint_functions:
+-            for result in fn():
+-                if result is not None:
+-                    results.append(result)
+-        return results
++    @classmethod
++    def lint_uid(cls):
++        return 'dseldif'
+ 
+     def _lint_nsstate(self):
+         suffixes = self.readNsState()
+@@ -320,7 +316,7 @@ class DSEldif(object):
+         return states
+ 
+ 
+-class FSChecks(object):
++class FSChecks(DSLint):
+     """This is for the healthcheck feature, check commonly used system config files the
+     server uses.  This is here for lack of a better place to add this class.
+     """
+@@ -344,17 +340,10 @@ class FSChecks(object):
+                 'report': DSPERMLE0002
+             },
+         ]
+-        self._lint_functions = [self._lint_file_perms]
+ 
+-    def lint(self):
+-        """Run a lint/healthcheck for this class
+-        """
+-        results = []
+-        for fn in self._lint_functions:
+-            for result in fn():
+-                if result is not None:
+-                    results.append(result)
+-        return results
++    @classmethod
++    def lint_uid(cls):
++        return 'fschecks'
+ 
+     def _lint_file_perms(self):
+         """Test file permissions are safe
+diff --git a/src/lib389/lib389/encrypted_attributes.py b/src/lib389/lib389/encrypted_attributes.py
+index 9afd2e66b..2fa26cef9 100644
+--- a/src/lib389/lib389/encrypted_attributes.py
++++ b/src/lib389/lib389/encrypted_attributes.py
+@@ -27,7 +27,6 @@ class EncryptedAttr(DSLdapObject):
+         self._must_attributes = ['cn', 'nsEncryptionAlgorithm']
+         self._create_objectclasses = ['top', 'nsAttributeEncryption']
+         self._protected = False
+-        self._lint_functions = []
+ 
+ 
+ class EncryptedAttrs(DSLdapObjects):
+diff --git a/src/lib389/lib389/index.py b/src/lib389/lib389/index.py
+index 6932883b7..a3d019d27 100644
+--- a/src/lib389/lib389/index.py
++++ b/src/lib389/lib389/index.py
+@@ -41,7 +41,6 @@ class Index(DSLdapObject):
+         self._must_attributes = ['cn', 'nsSystemIndex', 'nsIndexType']
+         self._create_objectclasses = ['top', 'nsIndex']
+         self._protected = False
+-        self._lint_functions = []
+ 
+ 
+ class Indexes(DSLdapObjects):
+@@ -77,7 +76,6 @@ class VLVSearch(DSLdapObject):
+         self._must_attributes = ['cn', 'vlvbase', 'vlvscope', 'vlvfilter']
+         self._create_objectclasses = ['top', 'vlvSearch']
+         self._protected = False
+-        self._lint_functions = []
+         self._be_name = None
+ 
+     def get_sorts(self):
+@@ -163,7 +161,6 @@ class VLVIndex(DSLdapObject):
+         self._must_attributes = ['cn', 'vlvsort']
+         self._create_objectclasses = ['top', 'vlvIndex']
+         self._protected = False
+-        self._lint_functions = []
+ 
+ 
+ class VLVIndexes(DSLdapObjects):
+diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py
+index b5a305bc3..a103feec7 100644
+--- a/src/lib389/lib389/lint.py
++++ b/src/lib389/lib389/lint.py
+@@ -14,8 +14,9 @@
+ DSBLE0001 = {
+     'dsle': 'DSBLE0001',
+     'severity': 'MEDIUM',
+-    'items' : [],
+-    'detail' : """This backend may be missing the correct mapping tree references. Mapping Trees allow
++    'description': 'Possibly incorrect mapping tree.',
++    'items': [],
++    'detail': """This backend may be missing the correct mapping tree references. Mapping Trees allow
+ the directory server to determine which backend an operation is routed to in the
+ abscence of other information. This is extremely important for correct functioning
+ of LDAP ADD for example.
+@@ -32,7 +33,7 @@ objectClass: extensibleObject
+ objectClass: nsMappingTree
+ 
+ """,
+-    'fix' : """Either you need to create the mapping tree, or you need to repair the related
++    'fix': """Either you need to create the mapping tree, or you need to repair the related
+ mapping tree. You will need to do this by hand by editing cn=config, or stopping
+ the instance and editing dse.ldif.
+ """
+@@ -41,25 +42,28 @@ the instance and editing dse.ldif.
+ DSBLE0002 = {
+     'dsle': 'DSBLE0002',
+     'severity': 'HIGH',
+-    'items' : [],
+-    'detail' : """Unable to query the backend.  LDAP error (ERROR)""",
+-    'fix' : """Check the server's error and access logs for more information."""
++    'description': 'Unable to query backend.',
++    'items': [],
++    'detail': """Unable to query the backend.  LDAP error (ERROR)""",
++    'fix': """Check the server's error and access logs for more information."""
+ }
+ 
+ DSBLE0003 = {
+     'dsle': 'DSBLE0003',
+     'severity': 'LOW',
+-    'items' : [],
+-    'detail' : """The backend database has not been initialized yet""",
+-    'fix' : """You need to import an LDIF file, or create the suffix entry, in order to initialize the database."""
++    'description': 'Uninitialized backend database.',
++    'items': [],
++    'detail': """The backend database has not been initialized yet""",
++    'fix': """You need to import an LDIF file, or create the suffix entry, in order to initialize the database."""
+ }
+ 
+ # Config checks
+ DSCLE0001 = {
+-    'dsle' : 'DSCLE0001',
+-    'severity' : 'LOW',
++    'dsle': 'DSCLE0001',
++    'severity': 'LOW',
++    'description': 'Different log timestamp format.',
+     'items': ['cn=config', ],
+-    'detail' : """nsslapd-logging-hr-timestamps-enabled changes the log format in directory server from
++    'detail': """nsslapd-logging-hr-timestamps-enabled changes the log format in directory server from
+ 
+ [07/Jun/2017:17:15:58 +1000]
+ 
+@@ -70,7 +74,7 @@ to
+ This actually provides a performance improvement. Additionally, this setting will be
+ removed in a future release.
+ """,
+-    'fix' : """Set nsslapd-logging-hr-timestamps-enabled to on.
++    'fix': """Set nsslapd-logging-hr-timestamps-enabled to on.
+ You can use 'dsconf' to set this attribute.  Here is an example:
+ 
+     # dsconf slapd-YOUR_INSTANCE config replace nsslapd-logging-hr-timestamps-enabled=on"""
+@@ -79,8 +83,9 @@ You can use 'dsconf' to set this attribute.  Here is an example:
+ DSCLE0002 = {
+     'dsle': 'DSCLE0002',
+     'severity': 'HIGH',
+-    'items' : ['cn=config', ],
+-    'detail' : """Password storage schemes in Directory Server define how passwords are hashed via a
++    'description': 'Weak passwordStorageScheme.',
++    'items': ['cn=config', ],
++    'detail': """Password storage schemes in Directory Server define how passwords are hashed via a
+ one-way mathematical function for storage. Knowing the hash it is difficult to gain
+ the input, but knowing the input you can easily compare the hash.
+ 
+@@ -112,14 +117,15 @@ You can also use 'dsconf' to replace these values.  Here is an example:
+ DSELE0001 = {
+     'dsle': 'DSELE0001',
+     'severity': 'MEDIUM',
+-    'items' : ['cn=encryption,cn=config', ],
++    'description': 'Weak TLS protocol version.',
++    'items': ['cn=encryption,cn=config', ],
+     'detail': """This Directory Server may not be using strong TLS protocol versions. TLS1.0 is known to
+ have a number of issues with the protocol. Please see:
+ 
+ https://tools.ietf.org/html/rfc7457
+ 
+ It is advised you set this value to the maximum possible.""",
+-    'fix' : """There are two options for setting the TLS minimum version allowed.  You,
++    'fix': """There are two options for setting the TLS minimum version allowed.  You,
+ can set "sslVersionMin" in "cn=encryption,cn=config" to a version greater than "TLS1.0"
+ You can also use 'dsconf' to set this value.  Here is an example:
+ 
+@@ -137,7 +143,8 @@ minimum version, but doing this affects the entire system:
+ DSRILE0001 = {
+     'dsle': 'DSRILE0001',
+     'severity': 'LOW',
+-    'items' : ['cn=referential integrity postoperation,cn=plugins,cn=config', ],
++    'description': 'Referential integrity plugin may be slower.',
++    'items': ['cn=referential integrity postoperation,cn=plugins,cn=config', ],
+     'detail': """The referential integrity plugin has an asynchronous processing mode.
+ This is controlled by the update-delay flag.  When this value is 0, referential
+ integrity plugin processes these changes inside of the operation that modified
+@@ -151,7 +158,7 @@ delays to your server by batching changes rather than smaller updates during syn
+ 
+ We advise that you set this value to 0, and enable referint on all masters as it provides a more predictable behaviour.
+ """,
+-    'fix' : """Set referint-update-delay to 0.
++    'fix': """Set referint-update-delay to 0.
+ 
+ You can use 'dsconf' to set this value.  Here is an example:
+ 
+@@ -164,12 +171,13 @@ You must restart the Directory Server for this change to take effect."""
+ DSRILE0002 = {
+     'dsle': 'DSRILE0002',
+     'severity': 'HIGH',
+-    'items' : ['cn=referential integrity postoperation,cn=plugins,cn=config'],
++    'description': 'Referential integrity plugin configured with unindexed attribute.',
++    'items': ['cn=referential integrity postoperation,cn=plugins,cn=config'],
+     'detail': """The referential integrity plugin is configured to use an attribute (ATTR)
+ that does not have an "equality" index in backend (BACKEND).
+ Failure to have the proper indexing will lead to unindexed searches which
+ cause high CPU and can significantly slow the server down.""",
+-    'fix' : """Check the attributes set in "referint-membership-attr" to make sure they have
++    'fix': """Check the attributes set in "referint-membership-attr" to make sure they have
+ an index defined that has at least the equality "eq" index type.  You will
+ need to reindex the database after adding the missing index type. Here is an
+ example using dsconf:
+@@ -182,12 +190,13 @@ example using dsconf:
+ DSDSLE0001 = {
+     'dsle': 'DSDSLE0001',
+     'severity': 'HIGH',
+-    'items' : ['Server', 'cn=config'],
++    'description': 'Low disk space.',
++    'items': ['Server', 'cn=config'],
+     'detail': """The disk partition used by the server (PARTITION), either for the database, the
+ configuration files, or the logs is over 90% full.  If the partition becomes
+ completely filled serious problems can occur with the database or the server's
+ stability.""",
+-    'fix' : """Attempt to free up disk space.  Also try removing old rotated logs, or disable any
++    'fix': """Attempt to free up disk space.  Also try removing old rotated logs, or disable any
+ verbose logging levels that might have been set.  You might consider enabling
+ the "Disk Monitoring" feature in cn=config to help prevent a disorderly shutdown
+ of the server:
+@@ -210,9 +219,10 @@ Please see the Administration guide for more information:
+ DSREPLLE0001 = {
+     'dsle': 'DSREPLLE0001',
+     'severity': 'HIGH',
+-    'items' : ['Replication', 'Agreement'],
++    'description': 'Replication agreement not set to be synchronized.',
++    'items': ['Replication', 'Agreement'],
+     'detail': """The replication agreement (AGMT) under "SUFFIX" is not in synchronization.""",
+-    'fix' : """You may need to reinitialize this replication agreement.  Please check the errors
++    'fix': """You may need to reinitialize this replication agreement.  Please check the errors
+ log for more information.  If you do need to reinitialize the agreement you can do so
+ using dsconf.  Here is an example:
+ 
+@@ -223,9 +233,10 @@ using dsconf.  Here is an example:
+ DSREPLLE0002 = {
+     'dsle': 'DSREPLLE0002',
+     'severity': 'LOW',
+-    'items' : ['Replication', 'Conflict Entries'],
++    'description': 'Replication conflict entries found.',
++    'items': ['Replication', 'Conflict Entries'],
+     'detail': "There were COUNT conflict entries found under the replication suffix \"SUFFIX\".",
+-    'fix' : """While conflict entries are expected to occur in an MMR environment, they
++    'fix': """While conflict entries are expected to occur in an MMR environment, they
+ should be resolved.  In regards to conflict entries there is always the original/counterpart
+ entry that has a normal DN, and then the conflict version of that entry.  Technically both
+ entries are valid, you as the administrator, needs to decide which entry you want to keep.
+@@ -253,38 +264,42 @@ can use the CLI tool "dsconf" to resolve the conflict.  Here is an example:
+ DSREPLLE0003 = {
+     'dsle': 'DSREPLLE0003',
+     'severity': 'MEDIUM',
+-    'items' : ['Replication', 'Agreement'],
++    'description': 'Unsynchronized replication agreement.',
++    'items': ['Replication', 'Agreement'],
+     'detail': """The replication agreement (AGMT) under "SUFFIX" is not in synchronization.
+ Status message: MSG""",
+-    'fix' : """Replication is not in synchronization but it may recover.  Continue to
++    'fix': """Replication is not in synchronization but it may recover.  Continue to
+ monitor this agreement."""
+ }
+ 
+ DSREPLLE0004 = {
+     'dsle': 'DSREPLLE0004',
+     'severity': 'MEDIUM',
+-    'items' : ['Replication', 'Agreement'],
++    'description': 'Unable to get replication agreement status.',
++    'items': ['Replication', 'Agreement'],
+     'detail': """Failed to get the agreement status for agreement (AGMT) under "SUFFIX".  Error (ERROR).""",
+-    'fix' : """None"""
++    'fix': """None"""
+ }
+ 
+ DSREPLLE0005 = {
+     'dsle': 'DSREPLLE0005',
+     'severity': 'MEDIUM',
+-    'items' : ['Replication', 'Agreement'],
++    'description': 'Replication consumer not reachable.',
++    'items': ['Replication', 'Agreement'],
+     'detail': """The replication agreement (AGMT) under "SUFFIX" is not in synchronization,
+ because the consumer server is not reachable.""",
+-    'fix' : """Check if the consumer is running, and also check the errors log for more information."""
++    'fix': """Check if the consumer is running, and also check the errors log for more information."""
+ }
+ 
+ # Replication changelog
+ DSCLLE0001 = {
+     'dsle': 'DSCLLE0001',
+     'severity': 'LOW',
+-    'items' : ['Replication', 'Changelog'],
++    'description': 'Changelog trimming not configured.',
++    'items': ['Replication', 'Changelog'],
+     'detail': """The replication changelog does have any kind of trimming configured.  This will
+ lead to the changelog size growing indefinitely.""",
+-    'fix' : """Configure changelog trimming, preferably by setting the maximum age of a changelog
++    'fix': """Configure changelog trimming, preferably by setting the maximum age of a changelog
+ record.  Here is an example:
+ 
+     # dsconf slapd-YOUR_INSTANCE replication set-changelog --max-age 30d"""
+@@ -294,27 +309,30 @@ record.  Here is an example:
+ DSCERTLE0001 = {
+     'dsle': 'DSCERTLE0001',
+     'severity': 'MEDIUM',
+-    'items' : ['Expiring Certificate'],
++    'description': 'Certificate about to expire.',
++    'items': ['Expiring Certificate'],
+     'detail': """The certificate (CERT) will expire in less than 30 days""",
+-    'fix' : """Renew the certificate before it expires to prevent disruptions with TLS connections."""
++    'fix': """Renew the certificate before it expires to prevent disruptions with TLS connections."""
+ }
+ 
+ DSCERTLE0002 = {
+     'dsle': 'DSCERTLE0002',
+     'severity': 'HIGH',
+-    'items' : ['Expired Certificate'],
++    'description': 'Certificate expired.',
++    'items': ['Expired Certificate'],
+     'detail': """The certificate (CERT) has expired""",
+-    'fix' : """Renew or remove the certificate."""
++    'fix': """Renew or remove the certificate."""
+ }
+ 
+ # Virtual Attrs & COS.  Note - ATTR and SUFFIX are replaced by the reporting function
+ DSVIRTLE0001 = {
+     'dsle': 'DSVIRTLE0001',
+     'severity': 'HIGH',
+-    'items' : ['Virtual Attributes'],
++    'description': 'Virtual attribute indexed.',
++    'items': ['Virtual Attributes'],
+     'detail': """You should not index virtual attributes, and as this will break searches that
+ use the attribute in a filter.""",
+-    'fix' : """Remove the index for this attribute from the backend configuration.
++    'fix': """Remove the index for this attribute from the backend configuration.
+ Here is an example using 'dsconf' to remove an index:
+ 
+     # dsconf slapd-YOUR_INSTANCE backend index delete --attr ATTR SUFFIX"""
+@@ -324,10 +342,11 @@ Here is an example using 'dsconf' to remove an index:
+ DSPERMLE0001 = {
+     'dsle': 'DSPERMLE0001',
+     'severity': 'MEDIUM',
+-    'items' : ['File Permissions'],
++    'description': 'Incorrect file permissions.',
++    'items': ['File Permissions'],
+     'detail': """The file "FILE" does not have the expected permissions (PERMS).  This
+ can cause issues with replication and chaining.""",
+-    'fix' : """Change the file permissions:
++    'fix': """Change the file permissions:
+ 
+     # chmod PERMS FILE"""
+ }
+@@ -336,10 +355,11 @@ can cause issues with replication and chaining.""",
+ DSPERMLE0002 = {
+     'dsle': 'DSPERMLE0002',
+     'severity': 'HIGH',
+-    'items' : ['File Permissions'],
++    'description': 'Incorrect security database file permissions.',
++    'items': ['File Permissions'],
+     'detail': """The file "FILE" does not have the expected permissions (PERMS).  The
+ security database pin/password files should only be readable by Directory Server user.""",
+-    'fix' : """Change the file permissions:
++    'fix': """Change the file permissions:
+ 
+     # chmod PERMS FILE"""
+ }
+@@ -348,11 +368,12 @@ security database pin/password files should only be readable by Directory Server
+ DSSKEWLE0001 = {
+     'dsle': 'DSSKEWLE0001',
+     'severity': 'Low',
+-    'items' : ['Replication'],
++    'description': 'Medium time skew.',
++    'items': ['Replication'],
+     'detail': """The time skew is over 6 hours.  If this time skew continues to increase
+ to 24 hours then replication can potentially stop working.  Please continue to
+ monitor the time skew offsets for increasing values.""",
+-    'fix' : """Monitor the time skew and avoid making changes to the system time.
++    'fix': """Monitor the time skew and avoid making changes to the system time.
+ Also look at https://access.redhat.com/documentation/en-us/red_hat_directory_server/11/html/administration_guide/managing_replication-troubleshooting_replication_related_problems
+ and find the paragraph "Too much time skew"."""
+ }
+@@ -360,13 +381,14 @@ and find the paragraph "Too much time skew"."""
+ DSSKEWLE0002 = {
+     'dsle': 'DSSKEWLE0002',
+     'severity': 'Medium',
+-    'items' : ['Replication'],
++    'description': 'Major time skew.',
++    'items': ['Replication'],
+     'detail': """The time skew is over 12 hours.  If this time skew continues to increase
+ to 24 hours then replication can potentially stop working.  Please continue to
+ monitor the time skew offsets for increasing values.  Setting nsslapd-ignore-time-skew
+ to "on" on each replica will allow replication to continue, but if the time skew
+ continues to increase other more serious replication problems can occur.""",
+-    'fix' : """Monitor the time skew and avoid making changes to the system time.
++    'fix': """Monitor the time skew and avoid making changes to the system time.
+ If you get close to 24 hours of time skew replication may stop working.
+ In that case configure the server to ignore the time skew until the system
+ times can be fixed/synchronized:
+@@ -380,12 +402,13 @@ and find the paragraph "Too much time skew"."""
+ DSSKEWLE0003 = {
+     'dsle': 'DSSKEWLE0003',
+     'severity': 'High',
+-    'items' : ['Replication'],
++    'description': 'Extensive time skew.',
++    'items': ['Replication'],
+     'detail': """The time skew is over 24 hours.  Setting nsslapd-ignore-time-skew
+ to "on" on each replica will allow replication to continue, but if the
+ time skew continues to increase other serious replication problems can
+ occur.""",
+-    'fix' : """Avoid making changes to the system time, and make sure the clocks
++    'fix': """Avoid making changes to the system time, and make sure the clocks
+ on all the replicas are correct.  If you haven't set the server's
+ "ignore time skew" setting then do the following on all the replicas
+ until the time issues have been resolved:
+diff --git a/src/lib389/lib389/monitor.py b/src/lib389/lib389/monitor.py
+index 73750c3c2..4ac7d7174 100644
+--- a/src/lib389/lib389/monitor.py
++++ b/src/lib389/lib389/monitor.py
+@@ -358,7 +358,10 @@ class MonitorDiskSpace(DSLdapObject):
+     def __init__(self, instance, dn=None):
+         super(MonitorDiskSpace, self).__init__(instance=instance, dn=dn)
+         self._dn = "cn=disk space,cn=monitor"
+-        self._lint_functions = [self._lint_disk_space]
++
++    @classmethod
++    def lint_uid(cls):
++        return 'monitor-disk-space'
+ 
+     def _lint_disk_space(self):
+         partitions = self.get_attr_vals_utf8_l("dsDisk")
+diff --git a/src/lib389/lib389/nss_ssl.py b/src/lib389/lib389/nss_ssl.py
+index d14e7ce6f..e257424fd 100644
+--- a/src/lib389/lib389/nss_ssl.py
++++ b/src/lib389/lib389/nss_ssl.py
+@@ -21,6 +21,7 @@ import subprocess
+ from datetime import datetime, timedelta
+ from subprocess import check_output, run, PIPE
+ from lib389.passwd import password_generate
++from lib389._mapped_object_lint import DSLint
+ from lib389.lint import DSCERTLE0001, DSCERTLE0002
+ from lib389.utils import ensure_str, format_cmd_list
+ import uuid
+@@ -42,7 +43,7 @@ VALID_MIN = 61  # Days
+ log = logging.getLogger(__name__)
+ 
+ 
+-class NssSsl(object):
++class NssSsl(DSLint):
+     def __init__(self, dirsrv=None, dbpassword=None, dbpath=None):
+         self.dirsrv = dirsrv
+         self._certdb = dbpath
+@@ -56,18 +57,14 @@ class NssSsl(object):
+         else:
+             self.dbpassword = dbpassword
+ 
+-        self.db_files = {"dbm_backend": ["%s/%s" % (self._certdb, f) for f in ("key3.db", "cert8.db", "secmod.db")],
+-                         "sql_backend": ["%s/%s" % (self._certdb, f) for f in ("key4.db", "cert9.db", "pkcs11.txt")],
+-                         "support": ["%s/%s" % (self._certdb, f) for f in ("noise.txt", PIN_TXT, PWD_TXT)]}
+-        self._lint_functions = [self._lint_certificate_expiration,]
+-
+-    def lint(self):
+-        results = []
+-        for fn in self._lint_functions:
+-            for result in fn():
+-                if result is not None:
+-                    results.append(result)
+-        return results
++        self.db_files = {group: [f"{self._certdb}/{f}" for f in files]
++                         for group, files in {"dbm_backend": ("key3.db", "cert8.db", "secmod.db"),
++                                              "sql_backend": ("key4.db", "cert9.db", "pkcs11.txt"),
++                                              "support": ("noise.txt", PIN_TXT, PWD_TXT)}.items()}
++
++    @classmethod
++    def lint_uid(cls):
++        return 'ssl'
+ 
+     def _lint_certificate_expiration(self):
+         """Check all the certificates in the db if they will expire within 30 days
+diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
+index f68a1d114..89e660287 100644
+--- a/src/lib389/lib389/plugins.py
++++ b/src/lib389/lib389/plugins.py
+@@ -431,7 +431,6 @@ class ReferentialIntegrityPlugin(Plugin):
+             'referint-logfile',
+             'referint-membership-attr',
+         ])
+-        self._lint_functions = [self._lint_update_delay, self._lint_attr_indexes]
+ 
+     def create(self, rdn=None, properties=None, basedn=None):
+         """Create an instance of the plugin"""
+@@ -443,6 +442,10 @@ class ReferentialIntegrityPlugin(Plugin):
+             properties['referint-logfile'] = referint_log
+         return super(ReferentialIntegrityPlugin, self).create(rdn, properties, basedn)
+ 
++    @classmethod
++    def lint_uid(cls):
++        return 'refint'
++
+     def _lint_update_delay(self):
+         if self.status():
+             delay = self.get_attr_val_int("referint-update-delay")
+diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
+index f8adb3ce2..f575e58d5 100644
+--- a/src/lib389/lib389/replica.py
++++ b/src/lib389/lib389/replica.py
+@@ -1049,7 +1049,10 @@ class Changelog5(DSLdapObject):
+                 'extensibleobject',
+             ]
+         self._protected = False
+-        self._lint_functions = [self._lint_cl_trimming]
++
++    @classmethod
++    def lint_uid(cls):
++        return 'changelog'
+ 
+     def _lint_cl_trimming(self):
+         """Check that cl trimming is at least defined to prevent unbounded growth"""
+@@ -1120,7 +1123,10 @@ class Replica(DSLdapObject):
+             self._create_objectclasses.append('extensibleobject')
+         self._protected = False
+         self._suffix = None
+-        self._lint_functions = [self._lint_agmts_status, self._lint_conflicts]
++
++    @classmethod
++    def lint_uid(cls):
++        return 'replication'
+ 
+     def _lint_agmts_status(self):
+         replicas = Replicas(self._instance).list()
+diff --git a/src/lib389/lib389/tests/mapped_object_lint_test.py b/src/lib389/lib389/tests/mapped_object_lint_test.py
+new file mode 100644
+index 000000000..a4ca0ea3c
+--- /dev/null
++++ b/src/lib389/lib389/tests/mapped_object_lint_test.py
+@@ -0,0 +1,78 @@
++from typing import List
++
++import pytest
++
++from lib389._mapped_object_lint import (
++    DSLint,
++    DSLints,
++    DSLintMethodSpec
++)
++
++
++def test_dslint():
++    class DS(DSLint):
++        def lint_uid(self) -> str:
++            return self.param
++
++        def __init__(self, param):
++            self.param = param
++            self.suffixes = ['suffixA', 'suffixB']
++
++        def _lint_nsstate(self, spec: DSLintMethodSpec = None):
++            if spec == List:
++                yield from self.suffixes
++            else:
++                to_lint = [spec] if spec else self._lint_nsstate(spec=List)
++                for tl in to_lint:
++                    if tl == 'suffixA':
++                        pass
++                    elif tl == 'suffixB':
++                        yield 'suffixB is bad'
++                    else:
++                        raise ValueError('There is no such suffix')
++
++        def _lint_second(self):
++            yield from ()
++
++        def _lint_third(self):
++            yield from ['this is a fail']
++
++    class DSs(DSLints):
++        def list(self):
++            for i in [DS("ma"), DS("mb")]:
++                yield i
++
++    # single
++    inst = DS("a")
++    inst_lints = {'nsstate:suffixA', 'nsstate:suffixB', 'second', 'third'}
++
++    assert inst.param == "a"
++
++    assert set(dict(inst.lint_list()).keys()) == inst_lints
++
++    assert set(dict(inst.lint_list('nsstate')).keys()) \
++        == {f'nsstate:suffix{s}' for s in "AB"}
++
++    assert list(inst._lint_nsstate(spec=List)) == ['suffixA', 'suffixB']
++    assert list(inst.lint()) == ['suffixB is bad', 'this is a fail']
++
++    assert list(inst.lint('nsstate')) == ['suffixB is bad']
++    assert list(inst.lint('nsstate:suffixA')) == []
++    assert list(inst.lint('nsstate:suffixB')) == ['suffixB is bad']
++    with pytest.raises(ValueError):
++        list(inst.lint('nonexistent'))
++
++    # multiple
++    insts = DSs()
++
++    assert insts.lint_list
++    assert insts.lint
++
++    assert set(dict(insts.lint_list()).keys()) \
++        == {f'{m}:{s}' for m in ['ma', 'mb'] for s in inst_lints}
++    assert set(dict(insts.lint_list('*')).keys()) \
++        == {f'{m}:{s}' for m in ['ma', 'mb'] for s in inst_lints}
++    assert set(dict(insts.lint_list('*:nsstate')).keys()) \
++        == {f'{m}:nsstate:suffix{s}' for m in ['ma', 'mb'] for s in "AB"}
++    assert set(dict(insts.lint_list('mb:nsstate')).keys()) \
++        == {f'mb:nsstate:suffix{s}' for s in "AB"}
+-- 
+2.26.2
+
diff --git a/SOURCES/0012-Issue-50984-Memory-leaks-in-disk-monitoring.patch b/SOURCES/0012-Issue-50984-Memory-leaks-in-disk-monitoring.patch
new file mode 100644
index 0000000..3e61905
--- /dev/null
+++ b/SOURCES/0012-Issue-50984-Memory-leaks-in-disk-monitoring.patch
@@ -0,0 +1,54 @@
+From 2540354b7eb6fa03db7d36a5b755001b0852aa1b Mon Sep 17 00:00:00 2001
+From: Simon Pichugin <spichugi@redhat.com>
+Date: Thu, 26 Mar 2020 19:33:47 +0100
+Subject: [PATCH] Issue 50984 - Memory leaks in disk monitoring
+
+Description: Memory leaks are reported by the disk monitoring test suite.
+The direct leak is related to char **dirs array which is not freed at all.
+Free the array when we clean up or go to shutdown.
+Fix disk_monitoring_test.py::test_below_half_of_the_threshold_not_starting_after_shutdown.
+It should accept different exception when the instance is not started.
+
+https://pagure.io/389-ds-base/issue/50984
+
+Reviewed by: firstyear (Thanks!)
+---
+ ldap/servers/slapd/daemon.c | 2 --
+ ldap/servers/slapd/main.c   | 1 -
+ 2 files changed, 3 deletions(-)
+
+diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
+index a70f40316..542d31037 100644
+--- a/ldap/servers/slapd/daemon.c
++++ b/ldap/servers/slapd/daemon.c
+@@ -613,7 +613,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
+                         }
+                     }
+                     slapi_ch_array_free(dirs);
+-                    dirs = NULL;
+                     return;
+                 }
+                 /*
+@@ -713,7 +712,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
+             }
+         }
+         slapi_ch_array_free(dirs);
+-        dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */
+         g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL);
+         return;
+ }
+diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
+index e54b8e1c5..1f8b01959 100644
+--- a/ldap/servers/slapd/main.c
++++ b/ldap/servers/slapd/main.c
+@@ -958,7 +958,6 @@ main(int argc, char **argv)
+             goto cleanup;
+         }
+         slapi_ch_array_free(dirs);
+-        dirs = NULL;
+     }
+     /* log the max fd limit as it is typically set in env/systemd */
+     slapi_log_err(SLAPI_LOG_INFO, "main",
+-- 
+2.26.2
+
diff --git a/SOURCES/0013-Issue-50984-Memory-leaks-in-disk-monitoring.patch b/SOURCES/0013-Issue-50984-Memory-leaks-in-disk-monitoring.patch
new file mode 100644
index 0000000..d554989
--- /dev/null
+++ b/SOURCES/0013-Issue-50984-Memory-leaks-in-disk-monitoring.patch
@@ -0,0 +1,52 @@
+From a720e002751815323a295e11e77c56d7ce38314e Mon Sep 17 00:00:00 2001
+From: Simon Pichugin <spichugi@redhat.com>
+Date: Fri, 27 Mar 2020 11:35:55 +0100
+Subject: [PATCH] Issue 50984 - Memory leaks in disk monitoring
+
+Description: Reset dirs pointer every time we free it.
+The code may be changed in the future so we should make it
+more robust.
+
+https://pagure.io/389-ds-base/issue/50984
+
+Reviewed by: spichugi, tbordaz (one line commit rule)
+---
+ ldap/servers/slapd/daemon.c | 2 ++
+ ldap/servers/slapd/main.c   | 1 +
+ 2 files changed, 3 insertions(+)
+
+diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
+index 542d31037..a70f40316 100644
+--- a/ldap/servers/slapd/daemon.c
++++ b/ldap/servers/slapd/daemon.c
+@@ -613,6 +613,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
+                         }
+                     }
+                     slapi_ch_array_free(dirs);
++                    dirs = NULL;
+                     return;
+                 }
+                 /*
+@@ -712,6 +713,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
+             }
+         }
+         slapi_ch_array_free(dirs);
++        dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */
+         g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL);
+         return;
+ }
+diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
+index 1f8b01959..e54b8e1c5 100644
+--- a/ldap/servers/slapd/main.c
++++ b/ldap/servers/slapd/main.c
+@@ -958,6 +958,7 @@ main(int argc, char **argv)
+             goto cleanup;
+         }
+         slapi_ch_array_free(dirs);
++        dirs = NULL;
+     }
+     /* log the max fd limit as it is typically set in env/systemd */
+     slapi_log_err(SLAPI_LOG_INFO, "main",
+-- 
+2.26.2
+
diff --git a/SOURCES/0014-Issue-50201-nsIndexIDListScanLimit-accepts-any-value.patch b/SOURCES/0014-Issue-50201-nsIndexIDListScanLimit-accepts-any-value.patch
new file mode 100644
index 0000000..704cff6
--- /dev/null
+++ b/SOURCES/0014-Issue-50201-nsIndexIDListScanLimit-accepts-any-value.patch
@@ -0,0 +1,569 @@
+From f60364cd9472edc61e7d327d13dca67eadf0c5b2 Mon Sep 17 00:00:00 2001
+From: Simon Pichugin <simon.pichugin@gmail.com>
+Date: Tue, 28 Apr 2020 23:44:20 +0200
+Subject: [PATCH] Issue 50201 - nsIndexIDListScanLimit accepts any value
+
+Bug Description: Setting of nsIndexIDListScanLimit like
+'limit=2 limit=3' are detected and logged in error logs.
+But the invalid value is successfully applied in the config entry
+and the operation itself is successful.
+The impact is limited because the index will be used following
+idlistscanlimit rather than invalid definition nsIndexIDListScanLimit.
+
+Fix Description: Print the errors to the user when he tries to add
+or to modify index config entry with malformed values.
+Change tests accordingly.
+
+https://pagure.io/389-ds-base/issue/50201
+
+Reviewed by: mreynolds, tbordaz (Thanks!)
+---
+ .../suites/filter/filterscanlimit_test.py     | 87 ++++++++-----------
+ ldap/servers/slapd/back-ldbm/instance.c       |  4 +-
+ ldap/servers/slapd/back-ldbm/ldbm_attr.c      | 33 ++++++-
+ .../slapd/back-ldbm/ldbm_index_config.c       | 59 +++++++++----
+ ldap/servers/slapd/back-ldbm/ldif2ldbm.c      |  2 +-
+ .../servers/slapd/back-ldbm/proto-back-ldbm.h |  2 +-
+ 6 files changed, 114 insertions(+), 73 deletions(-)
+
+diff --git a/dirsrvtests/tests/suites/filter/filterscanlimit_test.py b/dirsrvtests/tests/suites/filter/filterscanlimit_test.py
+index dd9c6ee4e..0198f6533 100644
+--- a/dirsrvtests/tests/suites/filter/filterscanlimit_test.py
++++ b/dirsrvtests/tests/suites/filter/filterscanlimit_test.py
+@@ -11,6 +11,7 @@ This script will test different type of Filers.
+ """
+ 
+ import os
++import ldap
+ import pytest
+ 
+ from lib389._constants import DEFAULT_SUFFIX, PW_DM
+@@ -19,11 +20,10 @@ from lib389.idm.user import UserAccounts
+ from lib389.idm.organizationalunit import OrganizationalUnits
+ from lib389.index import Index
+ from lib389.idm.account import Accounts
+-from lib389.idm.group import UniqueGroups, Group
++from lib389.idm.group import UniqueGroups
+ 
+ pytestmark = pytest.mark.tier1
+ 
+-
+ GIVEN_NAME = 'cn=givenname,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
+ CN_NAME = 'cn=sn,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
+ UNIQMEMBER = 'cn=uniquemember,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
+@@ -39,7 +39,6 @@ LIST_OF_USER_ACCOUNTING = [
+     "Judy Wallace",
+     "Marcus Ward",
+     "Judy McFarland",
+-    "Anuj Hall",
+     "Gern Triplett",
+     "Emanuel Johnson",
+     "Brad Walker",
+@@ -57,7 +56,6 @@ LIST_OF_USER_ACCOUNTING = [
+     "Randy Ulrich",
+     "Richard Francis",
+     "Morgan White",
+-    "Anuj Maddox",
+     "Jody Jensen",
+     "Mike Carter",
+     "Gern Tyler",
+@@ -77,8 +75,6 @@ LIST_OF_USER_HUMAN = [
+     "Robert Daugherty",
+     "Torrey Mason",
+     "Brad Talbot",
+-    "Anuj Jablonski",
+-    "Harry Miller",
+     "Jeffrey Campaigne",
+     "Stephen Triplett",
+     "John Falena",
+@@ -107,8 +103,7 @@ LIST_OF_USER_HUMAN = [
+     "Tobias Schmith",
+     "Jon Goldstein",
+     "Janet Lutz",
+-    "Karl Cope",
+-]
++    "Karl Cope"]
+ 
+ LIST_OF_USER_TESTING = [
+     "Andy Bergin",
+@@ -122,8 +117,7 @@ LIST_OF_USER_TESTING = [
+     "Alan White",
+     "Daniel Ward",
+     "Lee Stockton",
+-    "Matthew Vaughan"
+-]
++    "Matthew Vaughan"]
+ 
+ LIST_OF_USER_DEVELOPMENT = [
+     "Kelly Winters",
+@@ -143,7 +137,6 @@ LIST_OF_USER_DEVELOPMENT = [
+     "Timothy Kelly",
+     "Sue Mason",
+     "Chris Alexander",
+-    "Anuj Jensen",
+     "Martin Talbot",
+     "Scott Farmer",
+     "Allison Jensen",
+@@ -152,9 +145,7 @@ LIST_OF_USER_DEVELOPMENT = [
+     "Dan Langdon",
+     "Ashley Knutson",
+     "Jon Bourke",
+-    "Pete Hunt",
+-
+-]
++    "Pete Hunt"]
+ 
+ LIST_OF_USER_PAYROLL = [
+     "Ashley Chassin",
+@@ -164,12 +155,17 @@ LIST_OF_USER_PAYROLL = [
+     "Patricia Shelton",
+     "Dietrich Swain",
+     "Allison Hunter",
+-    "Anne-Louise Barnes"
++    "Anne-Louise Barnes"]
+ 
+-]
++LIST_OF_USER_PEOPLE = [
++    'Sam Carter',
++    'Tom Morris',
++    'Kevin Vaughan',
++    'Rich Daugherty',
++    'Harry Miller',
++    'Sam Schmith']
+ 
+ 
+-@pytest.mark.skip(reason="https://pagure.io/389-ds-base/issue/50201")
+ def test_invalid_configuration(topo):
+     """"
+     Error handling for invalid configuration
+@@ -190,10 +186,7 @@ def test_invalid_configuration(topo):
+               'limit=0 flags=AND flags=AND',
+               'limit=0 type=eq values=foo values=foo',
+               'limit=0 type=eq values=foo,foo',
+-              'limit=0 type=sub',
+-              'limit=0 type=eq values=notvalid',
+               'limit',
+-              'limit=0 type=eq values=notavaliddn',
+               'limit=0 type=pres values=bogus',
+               'limit=0 type=eq,sub values=bogus',
+               'limit=',
+@@ -203,7 +196,8 @@ def test_invalid_configuration(topo):
+               'limit=-2',
+               'type=eq',
+               'limit=0 type=bogus']:
+-        Index(topo.standalone, GIVEN_NAME).replace('nsIndexIDListScanLimit', i)
++        with pytest.raises(ldap.UNWILLING_TO_PERFORM):
++            Index(topo.standalone, GIVEN_NAME).replace('nsIndexIDListScanLimit', i)
+ 
+ 
+ def test_idlistscanlimit(topo):
+@@ -247,28 +241,24 @@ def test_idlistscanlimit(topo):
+                  (LIST_OF_USER_HUMAN, users_human),
+                  (LIST_OF_USER_TESTING, users_testing),
+                  (LIST_OF_USER_DEVELOPMENT, users_development),
+-                 (LIST_OF_USER_PAYROLL, users_payroll)]:
++                 (LIST_OF_USER_PAYROLL, users_payroll),
++                 (LIST_OF_USER_PEOPLE, users_people)]:
+         for demo1 in data[0]:
++            fn = demo1.split()[0]
++            sn = demo1.split()[1]
++            uid = ''.join([fn[:1], sn]).lower()
+             data[1].create(properties={
+-                'uid': demo1,
++                'uid': uid,
+                 'cn': demo1,
+-                'sn': demo1.split()[1],
++                'sn': sn,
+                 'uidNumber': str(1000),
+                 'gidNumber': '2000',
+-                'homeDirectory': '/home/' + demo1,
+-                'givenname': demo1.split()[0],
+-                'userpassword': PW_DM
++                'homeDirectory': f'/home/{uid}',
++                'givenname': fn,
++                'userpassword': PW_DM,
++                'mail': f'{uid}@test.com'
+             })
+ 
+-    users_people.create(properties={
+-        'uid': 'scarter',
+-        'cn': 'Sam Carter',
+-        'sn': 'Carter',
+-        'uidNumber': str(1000),
+-        'gidNumber': '2000',
+-        'homeDirectory': '/home/' + 'scarter',
+-        'mail': 'scarter@anuj.com',
+-    })
+     try:
+         # Change log levels
+         errorlog_value = topo.standalone.config.get_attr_val_utf8('nsslapd-errorlog-level')
+@@ -297,16 +287,12 @@ def test_idlistscanlimit(topo):
+ 
+         Index(topo.standalone, UNIQMEMBER).\
+         replace('nsIndexIDListScanLimit',
+-                'limit=0 type=eq values=uid=kvaughan,ou=People,'
+-                'dc=example,dc=com,uid=rdaugherty,ou=People,dc=example,dc=com')
++                'limit=0 type=eq values=uid=kvaughan\2Cou=People\2Cdc=example\2Cdc=com,'
++                'uid=rdaugherty\2Cou=People\2Cdc=example\2Cdc=com')
+ 
+         Index(topo.standalone, OBJECTCLASS).\
+         replace('nsIndexIDListScanLimit', 'limit=0 type=eq flags=AND values=inetOrgPerson')
+ 
+-        Index(topo.standalone, MAIL).\
+-        replace('nsIndexIDListScanLimit',
+-                'cn=mail,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config')
+-
+         # Search with filter
+         for i in ['(sn=Lutz)',
+                   '(sn=*ter)',
+@@ -321,22 +307,24 @@ def test_idlistscanlimit(topo):
+                   '(&(sn=*)(cn=*))',
+                   '(sn=Hunter)',
+                   '(&(givenname=Richard)(objectclass=organizationalPerson))',
+-                  '(givenname=Anuj)',
++                  '(givenname=Morgan)',
+                   '(&(givenname=*)(cn=*))',
+                   '(givenname=*)']:
+             assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter(f'{i}')
+ 
+-        # Creating Group
+-        Group(topo.standalone, 'cn=Accounting Managers,ou=groups,dc=example,dc=com').\
+-        add('uniquemember',
++        # Creating Groups and adding members
++        groups = UniqueGroups(topo.standalone, DEFAULT_SUFFIX)
++        accounting_managers = groups.ensure_state(properties={'cn': 'Accounting Managers'})
++        hr_managers = groups.ensure_state(properties={'cn': 'HR Managers'})
++
++        accounting_managers.add('uniquemember',
+             ['uid=scarter, ou=People, dc=example,dc=com',
+              'uid=tmorris, ou=People, dc=example,dc=com',
+              'uid=kvaughan, ou=People, dc=example,dc=com',
+              'uid=rdaugherty, ou=People, dc=example,dc=com',
+              'uid=hmiller, ou=People, dc=example,dc=com'])
+ 
+-        Group(topo.standalone, 'cn=HR Managers,ou=groups,dc=example,dc=com').\
+-        add('uniquemember',
++        hr_managers.add('uniquemember',
+             ['uid=kvaughan, ou=People, dc=example,dc=com',
+              'uid=cschmith, ou=People, dc=example,dc=com'])
+ 
+@@ -403,10 +391,9 @@ def test_idlistscanlimit(topo):
+                       '(&(sn=*)(cn=*))',
+                       '(sn=Hunter)',
+                       '(&(givenname=Richard)(objectclass=organizationalPerson))',
+-                      '(givenname=Anuj)',
++                      '(givenname=Morgan)',
+                       '(&(givenname=*)(cn=*))',
+                       '(givenname=*)']:
+-
+             assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter(value)
+ 
+     finally:
+diff --git a/ldap/servers/slapd/back-ldbm/instance.c b/ldap/servers/slapd/back-ldbm/instance.c
+index 04c28ff39..07655a8ec 100644
+--- a/ldap/servers/slapd/back-ldbm/instance.c
++++ b/ldap/servers/slapd/back-ldbm/instance.c
+@@ -231,7 +231,7 @@ ldbm_instance_create_default_indexes(backend *be)
+ 
+     /* ldbm_instance_config_add_index_entry(inst, 2, argv); */
+     e = ldbm_instance_init_config_entry(LDBM_PSEUDO_ATTR_DEFAULT, "none", 0, 0, 0);
+-    attr_index_config(be, "ldbm index init", 0, e, 1, 0);
++    attr_index_config(be, "ldbm index init", 0, e, 1, 0, NULL);
+     slapi_entry_free(e);
+ 
+     if (!entryrdn_get_noancestorid()) {
+@@ -240,7 +240,7 @@ ldbm_instance_create_default_indexes(backend *be)
+          * but we still want to use the attr index file APIs.
+          */
+         e = ldbm_instance_init_config_entry(LDBM_ANCESTORID_STR, "eq", 0, 0, 0);
+-        attr_index_config(be, "ldbm index init", 0, e, 1, 0);
++        attr_index_config(be, "ldbm index init", 0, e, 1, 0, NULL);
+         slapi_entry_free(e);
+     }
+ 
+diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
+index b9e130d77..f0d418572 100644
+--- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c
++++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
+@@ -633,6 +633,18 @@ attr_index_idlistsize_config(Slapi_Entry *e, struct attrinfo *ai, char *returnte
+     return rc;
+ }
+ 
++/*
++ * Function that process index attributes and modifies attrinfo structure
++ *
++ * Called while adding default indexes, during db2index execution and
++ * when we add/modify/delete index config entry
++ *
++ * If char *err_buf is not NULL, it will additionally print all error messages to STDERR
++ * It is used when we add/modify/delete index config entry, so the user would have a better verbose
++ *
++ * returns -1, 1 on a failure
++ *         0 on success
++ */
+ int
+ attr_index_config(
+     backend *be,
+@@ -640,7 +652,8 @@ attr_index_config(
+     int lineno,
+     Slapi_Entry *e,
+     int init __attribute__((unused)),
+-    int indextype_none)
++    int indextype_none,
++    char *err_buf)
+ {
+     ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
+     int j = 0;
+@@ -662,6 +675,7 @@ attr_index_config(
+         slapi_attr_first_value(attr, &sval);
+         attrValue = slapi_value_get_berval(sval);
+     } else {
++        slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: missing indexing arguments\n");
+         slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "Missing indexing arguments\n");
+         return -1;
+     }
+@@ -705,6 +719,10 @@ attr_index_config(
+                 }
+                 a->ai_indexmask = INDEX_OFFLINE; /* note that the index isn't available */
+             } else {
++                slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
++                                      "Error: %s: line %d: unknown index type \"%s\" (ignored) in entry (%s), "
++                                      "valid index types are \"pres\", \"eq\", \"approx\", or \"sub\"\n",
++                                      fname, lineno, attrValue->bv_val, slapi_entry_get_dn(e));
+                 slapi_log_err(SLAPI_LOG_ERR, "attr_index_config",
+                               "%s: line %d: unknown index type \"%s\" (ignored) in entry (%s), "
+                               "valid index types are \"pres\", \"eq\", \"approx\", or \"sub\"\n",
+@@ -715,6 +733,7 @@ attr_index_config(
+         }
+         if (hasIndexType == 0) {
+             /* indexType missing, error out */
++            slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: missing index type\n");
+             slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "Missing index type\n");
+             attrinfo_delete(&a);
+             return -1;
+@@ -873,16 +892,26 @@ attr_index_config(
+             slapi_ch_free((void **)&official_rules);
+         }
+     }
+-
+     if ((return_value = attr_index_idlistsize_config(e, a, myreturntext))) {
++        slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
++                              "Error: %s: Failed to parse idscanlimit info: %d:%s\n",
++                              fname, return_value, myreturntext);
+         slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "%s: Failed to parse idscanlimit info: %d:%s\n",
+                       fname, return_value, myreturntext);
++        if (err_buf != NULL) {
++            /* we are inside of a callback, we shouldn't allow malformed attributes in index entries */
++            attrinfo_delete(&a);
++            return return_value;
++        }
+     }
+ 
+     /* initialize the IDL code's private data */
+     return_value = idl_init_private(be, a);
+     if (0 != return_value) {
+         /* fatal error, exit */
++        slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
++                              "Error: %s: line %d:Fatal Error: Failed to initialize attribute structure\n",
++                              fname, lineno);
+         slapi_log_err(SLAPI_LOG_CRIT, "attr_index_config",
+                       "%s: line %d:Fatal Error: Failed to initialize attribute structure\n",
+                       fname, lineno);
+diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
+index 45f0034f0..720f93036 100644
+--- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
++++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
+@@ -25,26 +25,34 @@ int ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb, Slapi_Entry *en
+ #define INDEXTYPE_NONE 1
+ 
+ static int
+-ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name)
++ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, char *err_buf)
+ {
+     Slapi_Attr *attr;
+     const struct berval *attrValue;
+     Slapi_Value *sval;
++    char *edn = slapi_entry_get_dn(e);
+ 
+     /* Get the name of the attribute to index which will be the value
+      * of the cn attribute. */
+     if (slapi_entry_attr_find(e, "cn", &attr) != 0) {
+-        slapi_log_err(SLAPI_LOG_ERR, "ldbm_index_parse_entry", "Malformed index entry %s\n",
+-                      slapi_entry_get_dn(e));
++        slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
++                              "Error: malformed index entry %s\n",
++                              edn);
++        slapi_log_err(SLAPI_LOG_ERR,
++                      "ldbm_index_parse_entry", "Malformed index entry %s\n",
++                      edn);
+         return LDAP_OPERATIONS_ERROR;
+     }
+ 
+     slapi_attr_first_value(attr, &sval);
+     attrValue = slapi_value_get_berval(sval);
+     if (NULL == attrValue->bv_val || 0 == attrValue->bv_len) {
++        slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
++                              "Error: malformed index entry %s -- empty index name\n",
++                              edn);
+         slapi_log_err(SLAPI_LOG_ERR,
+                       "ldbm_index_parse_entry", "Malformed index entry %s -- empty index name\n",
+-                      slapi_entry_get_dn(e));
++                      edn);
+         return LDAP_OPERATIONS_ERROR;
+     }
+ 
+@@ -59,16 +67,19 @@ ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_st
+         attrValue = slapi_value_get_berval(sval);
+         if (NULL == attrValue->bv_val || attrValue->bv_len == 0) {
+             /* missing the index type, error out */
+-            slapi_log_err(SLAPI_LOG_ERR,
+-                          "ldbm_index_parse_entry", "Malformed index entry %s -- empty nsIndexType\n",
+-                          slapi_entry_get_dn(e));
++            slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
++                                  "Error: malformed index entry %s -- empty nsIndexType\n",
++                                  edn);
++            slapi_log_err(SLAPI_LOG_ERR, "ldbm_index_parse_entry",
++                          "Malformed index entry %s -- empty nsIndexType\n",
++                          edn);
+             slapi_ch_free_string(index_name);
+             return LDAP_OPERATIONS_ERROR;
+         }
+     }
+ 
+     /* ok the entry is good to process, pass it to attr_index_config */
+-    if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0)) {
++    if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0, err_buf)) {
+         slapi_ch_free_string(index_name);
+         return LDAP_OPERATIONS_ERROR;
+     }
+@@ -92,7 +103,7 @@ ldbm_index_init_entry_callback(Slapi_PBlock *pb __attribute__((unused)),
+     ldbm_instance *inst = (ldbm_instance *)arg;
+ 
+     returntext[0] = '\0';
+-    *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL);
++    *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, NULL);
+     if (*returncode == LDAP_SUCCESS) {
+         return SLAPI_DSE_CALLBACK_OK;
+     } else {
+@@ -117,7 +128,7 @@ ldbm_instance_index_config_add_callback(Slapi_PBlock *pb __attribute__((unused))
+     char *index_name = NULL;
+ 
+     returntext[0] = '\0';
+-    *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name);
++    *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, returntext);
+     if (*returncode == LDAP_SUCCESS) {
+         struct attrinfo *ai = NULL;
+         /* if the index is a "system" index, we assume it's being added by
+@@ -179,7 +190,7 @@ ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb,
+     slapi_attr_first_value(attr, &sval);
+     attrValue = slapi_value_get_berval(sval);
+ 
+-    attr_index_config(inst->inst_be, "From DSE delete", 0, e, 0, INDEXTYPE_NONE);
++    attr_index_config(inst->inst_be, "From DSE delete", 0, e, 0, INDEXTYPE_NONE, returntext);
+ 
+     ainfo_get(inst->inst_be, attrValue->bv_val, &ainfo);
+     if (NULL == ainfo) {
+@@ -213,14 +224,19 @@ ldbm_instance_index_config_modify_callback(Slapi_PBlock *pb __attribute__((unuse
+     Slapi_Value *sval;
+     const struct berval *attrValue;
+     struct attrinfo *ainfo = NULL;
++    char *edn = slapi_entry_get_dn(e);
++    char *edn_after = slapi_entry_get_dn(entryAfter);
+ 
+     returntext[0] = '\0';
+     *returncode = LDAP_SUCCESS;
+ 
+     if (slapi_entry_attr_find(entryAfter, "cn", &attr) != 0) {
++        slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
++                              "Error: malformed index entry %s - missing cn attribute\n",
++                              edn_after);
+         slapi_log_err(SLAPI_LOG_ERR,
+                       "ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing cn attribute\n",
+-                      slapi_entry_get_dn(entryAfter));
++                      edn_after);
+         *returncode = LDAP_OBJECT_CLASS_VIOLATION;
+         return SLAPI_DSE_CALLBACK_ERROR;
+     }
+@@ -228,31 +244,40 @@ ldbm_instance_index_config_modify_callback(Slapi_PBlock *pb __attribute__((unuse
+     attrValue = slapi_value_get_berval(sval);
+ 
+     if (NULL == attrValue->bv_val || 0 == attrValue->bv_len) {
++        slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
++                              "Error: malformed index entry %s - missing index name\n",
++                              edn);
+         slapi_log_err(SLAPI_LOG_ERR,
+                       "ldbm_instance_index_config_modify_callback", "Malformed index entry %s, missing index name\n",
+-                      slapi_entry_get_dn(e));
++                      edn);
+         *returncode = LDAP_UNWILLING_TO_PERFORM;
+         return SLAPI_DSE_CALLBACK_ERROR;
+     }
+ 
+     ainfo_get(inst->inst_be, attrValue->bv_val, &ainfo);
+     if (NULL == ainfo) {
++        slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
++                              "Error: malformed index entry %s - missing cn attribute info\n",
++                              edn);
+         slapi_log_err(SLAPI_LOG_ERR,
+                       "ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing cn attribute info\n",
+-                      slapi_entry_get_dn(e));
++                      edn);
+         *returncode = LDAP_UNWILLING_TO_PERFORM;
+         return SLAPI_DSE_CALLBACK_ERROR;
+     }
+ 
+     if (slapi_entry_attr_find(entryAfter, "nsIndexType", &attr) != 0) {
++        slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
++                              "Error: malformed index entry %s - missing nsIndexType attribute\n",
++                              edn_after);
+         slapi_log_err(SLAPI_LOG_ERR,
+                       "ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing nsIndexType attribute\n",
+-                      slapi_entry_get_dn(entryAfter));
++                      edn_after);
+         *returncode = LDAP_OBJECT_CLASS_VIOLATION;
+         return SLAPI_DSE_CALLBACK_ERROR;
+     }
+ 
+-    if (attr_index_config(inst->inst_be, "from DSE modify", 0, entryAfter, 0, 0)) {
++    if (attr_index_config(inst->inst_be, "from DSE modify", 0, entryAfter, 0, 0, returntext)) {
+         *returncode = LDAP_UNWILLING_TO_PERFORM;
+         return SLAPI_DSE_CALLBACK_ERROR;
+     }
+@@ -364,7 +389,7 @@ ldbm_instance_index_config_enable_index(ldbm_instance *inst, Slapi_Entry *e)
+         ainfo_get(inst->inst_be, index_name, &ai);
+     }
+     if (!ai) {
+-        rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name);
++        rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, NULL);
+     }
+     if (rc == LDAP_SUCCESS) {
+         /* Assume the caller knows if it is OK to go online immediately */
+diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
+index 9d82c8228..f2ef5ecd4 100644
+--- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
++++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
+@@ -291,7 +291,7 @@ db2index_add_indexed_attr(backend *be, char *attrString)
+         }
+     }
+ 
+-    attr_index_config(be, "from db2index()", 0, e, 0, 0);
++    attr_index_config(be, "from db2index()", 0, e, 0, 0, NULL);
+     slapi_entry_free(e);
+ 
+     return (0);
+diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+index 9a86c752b..a07acee5e 100644
+--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
++++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+@@ -24,7 +24,7 @@ void attrinfo_delete(struct attrinfo **pp);
+ void ainfo_get(backend *be, char *type, struct attrinfo **at);
+ void attr_masks(backend *be, char *type, int *indexmask, int *syntaxmask);
+ void attr_masks_ex(backend *be, char *type, int *indexmask, int *syntaxmask, struct attrinfo **at);
+-int attr_index_config(backend *be, char *fname, int lineno, Slapi_Entry *e, int init, int none);
++int attr_index_config(backend *be, char *fname, int lineno, Slapi_Entry *e, int init, int none, char *err_buf);
+ int db2index_add_indexed_attr(backend *be, char *attrString);
+ int ldbm_compute_init(void);
+ void attrinfo_deletetree(ldbm_instance *inst);
+-- 
+2.26.2
+
diff --git a/SOURCES/0015-Issue-51157-Reindex-task-may-create-abandoned-index-.patch b/SOURCES/0015-Issue-51157-Reindex-task-may-create-abandoned-index-.patch
new file mode 100644
index 0000000..8da8d8f
--- /dev/null
+++ b/SOURCES/0015-Issue-51157-Reindex-task-may-create-abandoned-index-.patch
@@ -0,0 +1,213 @@
+From 3b3faee01e645577ad77ff4f38429a9e0806231b Mon Sep 17 00:00:00 2001
+From: Simon Pichugin <simon.pichugin@gmail.com>
+Date: Tue, 16 Jun 2020 20:35:05 +0200
+Subject: [PATCH] Issue 51157 - Reindex task may create abandoned index file
+
+Bug Description: Recreating an index for the same attribute but changing
+the case of for example 1 letter, results in abandoned indexfile.
+
+Fix Decsription: Add a test case to a newly created 'indexes' test suite.
+When we remove the index config from the backend, - remove the attribute
+info from LDBM instance attributes.
+
+https://pagure.io/389-ds-base/issue/51157
+
+Reviewed by: firstyear, mreynolds (Thanks!)
+---
+ dirsrvtests/tests/suites/indexes/__init__.py  |   3 +
+ .../tests/suites/indexes/regression_test.py   | 125 ++++++++++++++++++
+ ldap/servers/slapd/back-ldbm/ldbm_attr.c      |   7 +
+ .../slapd/back-ldbm/ldbm_index_config.c       |   3 +
+ .../servers/slapd/back-ldbm/proto-back-ldbm.h |   1 +
+ 5 files changed, 139 insertions(+)
+ create mode 100644 dirsrvtests/tests/suites/indexes/__init__.py
+ create mode 100644 dirsrvtests/tests/suites/indexes/regression_test.py
+
+diff --git a/dirsrvtests/tests/suites/indexes/__init__.py b/dirsrvtests/tests/suites/indexes/__init__.py
+new file mode 100644
+index 000000000..04441667e
+--- /dev/null
++++ b/dirsrvtests/tests/suites/indexes/__init__.py
+@@ -0,0 +1,3 @@
++"""
++   :Requirement: 389-ds-base: Indexes
++"""
+diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py
+new file mode 100644
+index 000000000..1a71f16e9
+--- /dev/null
++++ b/dirsrvtests/tests/suites/indexes/regression_test.py
+@@ -0,0 +1,125 @@
++# --- BEGIN COPYRIGHT BLOCK ---
++# Copyright (C) 2020 Red Hat, Inc.
++# All rights reserved.
++#
++# License: GPL (version 3 or any later version).
++# See LICENSE for details.
++# --- END COPYRIGHT BLOCK ---
++#
++import time
++import os
++import pytest
++import ldap
++from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX
++from lib389.index import Indexes
++from lib389.backend import Backends
++from lib389.idm.user import UserAccounts
++from lib389.topologies import topology_st as topo
++
++pytestmark = pytest.mark.tier1
++
++
++def test_reindex_task_creates_abandoned_index_file(topo):
++    """
++    Recreating an index for the same attribute but changing
++    the case of for example 1 letter, results in abandoned indexfile
++
++    :id: 07ae5274-481a-4fa8-8074-e0de50d89ac6
++    :setup: Standalone instance
++    :steps:
++        1. Create a user object with additional attributes:
++           objectClass: mozillaabpersonalpha
++           mozillaCustom1: xyz
++        2. Add an index entry mozillacustom1
++        3. Reindex the backend
++        4. Check the content of the index (after it has been flushed to disk) mozillacustom1.db
++        5. Remove the index
++        6. Notice the mozillacustom1.db is removed
++        7. Recreate the index but now use the exact case as mentioned in the schema
++        8. Reindex the backend
++        9. Check the content of the index (after it has been flushed to disk) mozillaCustom1.db
++        10. Check that an ldapsearch does not return a result (mozillacustom1=xyz)
++        11. Check that an ldapsearch returns the results (mozillaCustom1=xyz)
++        12. Restart the instance
++        13. Notice that an ldapsearch does not return a result(mozillacustom1=xyz)
++        15. Check that an ldapsearch does not return a result (mozillacustom1=xyz)
++        16. Check that an ldapsearch returns the results (mozillaCustom1=xyz)
++        17. Reindex the backend
++        18. Notice the second indexfile for this attribute
++        19. Check the content of the index (after it has been flushed to disk) no mozillacustom1.db
++        20. Check the content of the index (after it has been flushed to disk) mozillaCustom1.db
++    :expectedresults:
++        1. Should Success.
++        2. Should Success.
++        3. Should Success.
++        4. Should Success.
++        5. Should Success.
++        6. Should Success.
++        7. Should Success.
++        8. Should Success.
++        9. Should Success.
++        10. Should Success.
++        11. Should Success.
++        12. Should Success.
++        13. Should Success.
++        14. Should Success.
++        15. Should Success.
++        16. Should Success.
++        17. Should Success.
++        18. Should Success.
++        19. Should Success.
++        20. Should Success.
++    """
++
++    inst = topo.standalone
++    attr_name = "mozillaCustom1"
++    attr_value = "xyz"
++
++    users = UserAccounts(inst, DEFAULT_SUFFIX)
++    user = users.create_test_user()
++    user.add("objectClass", "mozillaabpersonalpha")
++    user.add(attr_name, attr_value)
++
++    backends = Backends(inst)
++    backend = backends.get(DEFAULT_BENAME)
++    indexes = backend.get_indexes()
++    index = indexes.create(properties={
++        'cn': attr_name.lower(),
++        'nsSystemIndex': 'false',
++        'nsIndexType': ['eq', 'pres']
++        })
++
++    backend.reindex()
++    time.sleep(3)
++    assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db")
++    index.delete()
++    assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db")
++
++    index = indexes.create(properties={
++        'cn': attr_name,
++        'nsSystemIndex': 'false',
++        'nsIndexType': ['eq', 'pres']
++        })
++
++    backend.reindex()
++    time.sleep(3)
++    assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db")
++    assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name}.db")
++
++    entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f"{attr_name}={attr_value}")
++    assert len(entries) > 0
++    inst.restart()
++    entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f"{attr_name}={attr_value}")
++    assert len(entries) > 0
++
++    backend.reindex()
++    time.sleep(3)
++    assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db")
++    assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name}.db")
++
++
++if __name__ == "__main__":
++    # Run isolated
++    # -s for DEBUG mode
++    CURRENT_FILE = os.path.realpath(__file__)
++    pytest.main("-s %s" % CURRENT_FILE)
+diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
+index f0d418572..688c4f137 100644
+--- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c
++++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
+@@ -98,6 +98,13 @@ ainfo_cmp(
+     return (strcasecmp(a->ai_type, b->ai_type));
+ }
+ 
++void
++attrinfo_delete_from_tree(backend *be, struct attrinfo *ai)
++{
++    ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
++    avl_delete(&inst->inst_attrs, ai, ainfo_cmp);
++}
++
+ /*
+  * Called when a duplicate "index" line is encountered.
+  *
+diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
+index 720f93036..9722d0ce7 100644
+--- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
++++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
+@@ -201,7 +201,10 @@ ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb,
+             *returncode = LDAP_UNWILLING_TO_PERFORM;
+             rc = SLAPI_DSE_CALLBACK_ERROR;
+         }
++        attrinfo_delete_from_tree(inst->inst_be, ainfo);
+     }
++    /* Free attrinfo structure */
++    attrinfo_delete(&ainfo);
+ bail:
+     return rc;
+ }
+diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+index a07acee5e..4d2524fd9 100644
+--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
++++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+@@ -21,6 +21,7 @@
+  */
+ struct attrinfo *attrinfo_new(void);
+ void attrinfo_delete(struct attrinfo **pp);
++void attrinfo_delete_from_tree(backend *be, struct attrinfo *ai);
+ void ainfo_get(backend *be, char *type, struct attrinfo **at);
+ void attr_masks(backend *be, char *type, int *indexmask, int *syntaxmask);
+ void attr_masks_ex(backend *be, char *type, int *indexmask, int *syntaxmask, struct attrinfo **at);
+-- 
+2.26.2
+
diff --git a/SOURCES/0016-Issue-51165-add-new-access-log-keywords-for-wtime-an.patch b/SOURCES/0016-Issue-51165-add-new-access-log-keywords-for-wtime-an.patch
new file mode 100644
index 0000000..10c002c
--- /dev/null
+++ b/SOURCES/0016-Issue-51165-add-new-access-log-keywords-for-wtime-an.patch
@@ -0,0 +1,668 @@
+From 282edde7950ceb2515d74fdbcc0a188131769d74 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Tue, 23 Jun 2020 16:38:55 -0400
+Subject: [PATCH] Issue 51165 - add new access log keywords for wtime and
+ optime
+
+Description:  In addition to the "etime" stat in the access we can also
+              add the time the operation spent in the work queue, and
+              how long the actual operation took.  We now have "wtime"
+              and "optime" to track these stats in the access log.
+
+              Also updated logconf for notes=F (related to a different
+              ticket), and stats for wtime and optime.
+
+relates: https://pagure.io/389-ds-base/issue/51165
+
+Reviewed by: ?
+---
+ ldap/admin/src/logconv.pl         | 187 +++++++++++++++++++++++++++---
+ ldap/servers/slapd/add.c          |   3 +
+ ldap/servers/slapd/bind.c         |   4 +
+ ldap/servers/slapd/delete.c       |   3 +
+ ldap/servers/slapd/modify.c       |   3 +
+ ldap/servers/slapd/modrdn.c       |   3 +
+ ldap/servers/slapd/operation.c    |  24 ++++
+ ldap/servers/slapd/opshared.c     |   3 +
+ ldap/servers/slapd/result.c       |  49 ++++----
+ ldap/servers/slapd/slap.h         |  13 ++-
+ ldap/servers/slapd/slapi-plugin.h |  26 ++++-
+ 11 files changed, 269 insertions(+), 49 deletions(-)
+
+diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl
+index f4808a101..1ed44a888 100755
+--- a/ldap/admin/src/logconv.pl
++++ b/ldap/admin/src/logconv.pl
+@@ -3,7 +3,7 @@
+ #
+ # BEGIN COPYRIGHT BLOCK
+ # Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
+-# Copyright (C) 2013 Red Hat, Inc.
++# Copyright (C) 2020 Red Hat, Inc.
+ # All rights reserved.
+ #
+ # License: GPL (version 3 or any later version).
+@@ -55,7 +55,7 @@ my $reportStats = "";
+ my $dataLocation = "/tmp";
+ my $startTLSoid = "1.3.6.1.4.1.1466.20037";
+ my @statnames=qw(last last_str results srch add mod modrdn moddn cmp del abandon
+-                 conns sslconns bind anonbind unbind notesA notesU etime);
++                 conns sslconns bind anonbind unbind notesA notesU notesF etime);
+ my $s_stats;
+ my $m_stats;
+ my $verb = "no";
+@@ -211,6 +211,7 @@ my $sslClientBindCount = 0;
+ my $sslClientFailedCount = 0;
+ my $objectclassTopCount= 0;
+ my $pagedSearchCount = 0;
++my $invalidFilterCount = 0;
+ my $bindCount = 0;
+ my $filterCount = 0;
+ my $baseCount = 0;
+@@ -258,7 +259,7 @@ map {$conn{$_} = $_} @conncodes;
+ # hash db-backed hashes
+ my @hashnames = qw(attr rc src rsrc excount conn_hash ip_hash conncount nentries
+                    filter base ds6xbadpwd saslmech saslconnop bindlist etime oid
+-                   start_time_of_connection end_time_of_connection
++                   start_time_of_connection end_time_of_connection notesf_conn_op
+                    notesa_conn_op notesu_conn_op etime_conn_op nentries_conn_op
+                    optype_conn_op time_conn_op srch_conn_op del_conn_op mod_conn_op
+                    mdn_conn_op cmp_conn_op bind_conn_op unbind_conn_op ext_conn_op
+@@ -926,7 +927,7 @@ if ($verb eq "yes" || $usage =~ /u/ || $usage =~ /U/){
+ 			}
+ 			while($op > 0){
+ 				# The bind op is not the same as the search op that triggered the notes=A.
+-				# We have adjust the key by decrementing the op count until we find the last bind op.
++				# We have to adjust the key by decrementing the op count until we find the last bind op.
+ 				$op--;
+ 				$binddn_key = "$srvRstCnt,$conn,$op";
+ 				if (exists($bind_conn_op->{$binddn_key}) && defined($bind_conn_op->{$binddn_key})) {
+@@ -1049,9 +1050,60 @@ if ($verb eq "yes" || $usage =~ /u/ || $usage =~ /U/){
+ 			}
+ 		}
+ 	}
+-} # end of unindexed search report
++    print "\n";
++}
++
++print "Invalid Attribute Filters:    $invalidFilterCount\n";
++if ($invalidFilterCount > 0 && $verb eq "yes"){
++    my $conn_hash = $hashes->{conn_hash};
++    my $notesf_conn_op = $hashes->{notesf_conn_op};
++    my $time_conn_op = $hashes->{time_conn_op};
++    my $etime_conn_op = $hashes->{etime_conn_op};
++    my $nentries_conn_op = $hashes->{nentries_conn_op};
++    my $filter_conn_op = $hashes->{filter_conn_op};
++    my $bind_conn_op = $hashes->{bind_conn_op};
++    my $notesCount = 1;
++    my $unindexedIp;
++    my $binddn_key;
++    my %uniqFilt = (); # hash of unique filters
++    my %uniqFilter = (); # hash of unique filters bind dn
++    my %uniqBindDNs = (); # hash of unique bind dn's
++    my %uniqBindFilters = (); # hash of filters for a bind DN
++
++    while (my ($srcnt_conn_op, $count) = each %{$notesf_conn_op}) {
++        my ($srvRstCnt, $conn, $op) = split(",", $srcnt_conn_op);
++        my $attrIp = getIPfromConn($conn, $srvRstCnt);
++        print "\n  Invalid Attribute Filter #".$notesCount." (notes=F)\n";
++        print "  -  Date/Time:             $time_conn_op->{$srcnt_conn_op}\n";
++        print "  -  Connection Number:     $conn\n";
++        print "  -  Operation Number:      $op\n";
++        print "  -  Etime:                 $etime_conn_op->{$srcnt_conn_op}\n";
++        print "  -  Nentries:              $nentries_conn_op->{$srcnt_conn_op}\n";
++        print "  -  IP Address:            $attrIp\n";
++        if (exists($filter_conn_op->{$srcnt_conn_op}) && defined($filter_conn_op->{$srcnt_conn_op})) {
++            print "  -  Search Filter:         $filter_conn_op->{$srcnt_conn_op}\n";
++            $uniqFilt{$filter_conn_op->{$srcnt_conn_op}}++;
++        }
++        while($op > 0){
++            # The bind op is not the same as the search op that triggered the notes=A.
++            # We have to adjust the key by decrementing the op count until we find the last bind op.
++            $op--;
++            $binddn_key = "$srvRstCnt,$conn,$op";
++            if (exists($bind_conn_op->{$binddn_key}) && defined($bind_conn_op->{$binddn_key})) {
++                print "  -  Bind DN:               $bind_conn_op->{$binddn_key}\n";
++                $uniqBindDNs{$bind_conn_op->{$binddn_key}}++;
++                if( $uniqFilt{$filter_conn_op->{$srcnt_conn_op}} && defined($filter_conn_op->{$srcnt_conn_op})) {
++                    $uniqBindFilters{$bind_conn_op->{$binddn_key}}{$filter_conn_op->{$srcnt_conn_op}}++;
++                    $uniqFilter{$filter_conn_op->{$srcnt_conn_op}}{$bind_conn_op->{$binddn_key}}++;
++                }
++                last;
++            }
++        }
++        $notesCount++;
++    }
++    print "\n";
++}
+ 
+-print "\n";
+ print "FDs Taken:                    $fdTaken\n";
+ print "FDs Returned:                 $fdReturned\n";
+ print "Highest FD Taken:             $highestFdTaken\n\n";
+@@ -1386,20 +1438,20 @@ if ($usage =~ /l/ || $verb eq "yes"){
+ 	}
+ }
+ 
+-#########################################
+-#                                       #
+-# Gather and Process the unique etimes  #
+-#                                       #
+-#########################################
++##############################################################
++#                                                            #
++# Gather and Process the unique etimes, wtimes, and optimes  #
++#                                                            #
++##############################################################
+ 
+ my $first;
+ if ($usage =~ /t/i || $verb eq "yes"){
++	# Print the elapsed times (etime)
++
+ 	my $etime = $hashes->{etime};
+ 	my @ekeys = keys %{$etime};
+-	#
+ 	# print most often etimes
+-	#
+-	print "\n\n----- Top $sizeCount Most Frequent etimes -----\n\n";
++	print "\n\n----- Top $sizeCount Most Frequent etimes (elapsed times) -----\n\n";
+ 	my $eloop = 0;
+ 	my $retime = 0;
+ 	foreach my $et (sort { $etime->{$b} <=> $etime->{$a} } @ekeys) {
+@@ -1411,16 +1463,84 @@ if ($usage =~ /t/i || $verb eq "yes"){
+ 		printf "%-8s        %-12s\n", $etime->{ $et }, "etime=$et";
+ 		$eloop++;
+ 	}
+-	#
++	if ($eloop == 0) {
++		print "None";
++	}
+ 	# print longest etimes
+-	#
+-	print "\n\n----- Top $sizeCount Longest etimes -----\n\n";
++	print "\n\n----- Top $sizeCount Longest etimes (elapsed times) -----\n\n";
+ 	$eloop = 0;
+ 	foreach my $et (sort { $b <=> $a } @ekeys) {
+ 		if ($eloop == $sizeCount) { last; }
+ 		printf "%-12s    %-10s\n","etime=$et",$etime->{ $et };
+ 		$eloop++;
+ 	}
++	if ($eloop == 0) {
++		print "None";
++	}
++
++	# Print the wait times (wtime)
++
++	my $wtime = $hashes->{wtime};
++	my @wkeys = keys %{$wtime};
++	# print most often wtimes
++	print "\n\n----- Top $sizeCount Most Frequent wtimes (wait times) -----\n\n";
++	$eloop = 0;
++	$retime = 0;
++	foreach my $et (sort { $wtime->{$b} <=> $wtime->{$a} } @wkeys) {
++		if ($eloop == $sizeCount) { last; }
++		if ($retime ne "2"){
++			$first = $et;
++			$retime = "2";
++		}
++		printf "%-8s        %-12s\n", $wtime->{ $et }, "wtime=$et";
++		$eloop++;
++	}
++	if ($eloop == 0) {
++		print "None";
++	}
++	# print longest wtimes
++	print "\n\n----- Top $sizeCount Longest wtimes (wait times) -----\n\n";
++	$eloop = 0;
++	foreach my $et (sort { $b <=> $a } @wkeys) {
++		if ($eloop == $sizeCount) { last; }
++		printf "%-12s    %-10s\n","wtime=$et",$wtime->{ $et };
++		$eloop++;
++	}
++	if ($eloop == 0) {
++		print "None";
++	}
++
++	# Print the operation times (optime)
++
++	my $optime = $hashes->{optime};
++	my @opkeys = keys %{$optime};
++	# print most often optimes
++	print "\n\n----- Top $sizeCount Most Frequent optimes (actual operation times) -----\n\n";
++	$eloop = 0;
++	$retime = 0;
++	foreach my $et (sort { $optime->{$b} <=> $optime->{$a} } @opkeys) {
++		if ($eloop == $sizeCount) { last; }
++		if ($retime ne "2"){
++			$first = $et;
++			$retime = "2";
++		}
++		printf "%-8s        %-12s\n", $optime->{ $et }, "optime=$et";
++		$eloop++;
++	}
++	if ($eloop == 0) {
++		print "None";
++	}
++	# print longest optimes
++	print "\n\n----- Top $sizeCount Longest optimes (actual operation times) -----\n\n";
++	$eloop = 0;
++	foreach my $et (sort { $b <=> $a } @opkeys) {
++		if ($eloop == $sizeCount) { last; }
++		printf "%-12s    %-10s\n","optime=$et",$optime->{ $et };
++		$eloop++;
++	}
++	if ($eloop == 0) {
++		print "None";
++	}
+ }
+ 
+ #######################################
+@@ -2152,6 +2272,26 @@ sub parseLineNormal
+ 	if (m/ RESULT err=/ && m/ notes=[A-Z,]*P/){
+ 		$pagedSearchCount++;
+ 	}
++	if (m/ RESULT err=/ && m/ notes=[A-Z,]*F/){
++		$invalidFilterCount++;
++		$con = "";
++		if ($_ =~ /conn= *([0-9A-Z]+)/i){
++			$con = $1;
++			if ($_ =~ /op= *([0-9\-]+)/i){ $op = $1;}
++		}
++
++		if($reportStats){ inc_stats('notesF',$s_stats,$m_stats); }
++        if ($usage =~ /u/ || $usage =~ /U/ || $verb eq "yes"){
++            if($_ =~ /etime= *([0-9.]+)/i ){
++                if($1 >= $minEtime){
++                    $hashes->{etime_conn_op}->{"$serverRestartCount,$con,$op"} = $1;
++                    $hashes->{notesf_conn_op}->{"$serverRestartCount,$con,$op"}++;
++                    if ($_ =~ / *([0-9a-z:\/]+)/i){ $hashes->{time_conn_op}->{"$serverRestartCount,$con,$op"} = $1; }
++                    if ($_ =~ /nentries= *([0-9]+)/i ){ $hashes->{nentries_conn_op}->{"$serverRestartCount,$con,$op"} = $1; }
++                }
++            }
++        }
++	}
+ 	if (m/ notes=[A-Z,]*A/){
+ 		$con = "";
+ 		if ($_ =~ /conn= *([0-9A-Z]+)/i){
+@@ -2435,6 +2575,16 @@ sub parseLineNormal
+ 		if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{etime}->{$etime_val}++; }
+ 		if ($reportStats){ inc_stats_val('etime',$etime_val,$s_stats,$m_stats); }
+ 	}
++	if ($_ =~ /wtime= *([0-9.]+)/ ) {
++		my $wtime_val = $1;
++		if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{wtime}->{$wtime_val}++; }
++		if ($reportStats){ inc_stats_val('wtime',$wtime_val,$s_stats,$m_stats); }
++	}
++	if ($_ =~ /optime= *([0-9.]+)/ ) {
++		my $optime_val = $1;
++		if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{optime}->{$optime_val}++; }
++		if ($reportStats){ inc_stats_val('optime',$optime_val,$s_stats,$m_stats); }
++	}
+ 	if ($_ =~ / tag=101 / || $_ =~ / tag=111 / || $_ =~ / tag=100 / || $_ =~ / tag=115 /){
+ 		if ($_ =~ / nentries= *([0-9]+)/i ){ 
+ 			my $nents = $1;
+@@ -2555,7 +2705,7 @@ sub parseLineNormal
+ 			}
+ 		}
+ 	}
+-	if (/ RESULT err=/ && / tag=97 nentries=0 etime=/ && $_ =~ /dn=\"(.*)\"/i){
++	if (/ RESULT err=/ && / tag=97 nentries=0 / && $_ =~ /dn=\"(.*)\"/i){
+ 		# Check if this is a sasl bind, if see we need to add the RESULT's dn as a bind dn
+ 		my $binddn = $1;
+ 		my ($conn, $op);
+@@ -2680,6 +2830,7 @@ print_stats_block
+ 						 $stats->{'unbind'},
+ 						 $stats->{'notesA'},
+ 						 $stats->{'notesU'},
++						 $stats->{'notesF'},
+ 						 $stats->{'etime'}),
+ 					"\n" );
+ 			} else {
+diff --git a/ldap/servers/slapd/add.c b/ldap/servers/slapd/add.c
+index 06ca1ee79..52c64fa3c 100644
+--- a/ldap/servers/slapd/add.c
++++ b/ldap/servers/slapd/add.c
+@@ -441,6 +441,9 @@ op_shared_add(Slapi_PBlock *pb)
+     internal_op = operation_is_flag_set(operation, OP_FLAG_INTERNAL);
+     pwpolicy = new_passwdPolicy(pb, slapi_entry_get_dn(e));
+ 
++    /* Set the time we actually started the operation */
++    slapi_operation_set_time_started(operation);
++
+     /* target spec is used to decide which plugins are applicable for the operation */
+     operation_set_target_spec(operation, slapi_entry_get_sdn(e));
+ 
+diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c
+index 310216e89..55f865077 100644
+--- a/ldap/servers/slapd/bind.c
++++ b/ldap/servers/slapd/bind.c
+@@ -87,6 +87,10 @@ do_bind(Slapi_PBlock *pb)
+         send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, NULL, 0, NULL);
+         goto free_and_return;
+     }
++
++    /* Set the time we actually started the operation */
++    slapi_operation_set_time_started(pb_op);
++
+     ber = pb_op->o_ber;
+ 
+     /*
+diff --git a/ldap/servers/slapd/delete.c b/ldap/servers/slapd/delete.c
+index c0e61adf1..1a7209317 100644
+--- a/ldap/servers/slapd/delete.c
++++ b/ldap/servers/slapd/delete.c
+@@ -236,6 +236,9 @@ op_shared_delete(Slapi_PBlock *pb)
+     slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
+     internal_op = operation_is_flag_set(operation, OP_FLAG_INTERNAL);
+ 
++    /* Set the time we actually started the operation */
++    slapi_operation_set_time_started(operation);
++
+     sdn = slapi_sdn_new_dn_byval(rawdn);
+     dn = slapi_sdn_get_dn(sdn);
+     slapi_pblock_set(pb, SLAPI_DELETE_TARGET_SDN, (void *)sdn);
+diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
+index 259bedfff..a186dbde3 100644
+--- a/ldap/servers/slapd/modify.c
++++ b/ldap/servers/slapd/modify.c
+@@ -626,6 +626,9 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
+     slapi_pblock_get(pb, SLAPI_SKIP_MODIFIED_ATTRS, &skip_modified_attrs);
+     slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn);
+ 
++    /* Set the time we actually started the operation */
++    slapi_operation_set_time_started(operation);
++
+     if (sdn) {
+         passin_sdn = 1;
+     } else {
+diff --git a/ldap/servers/slapd/modrdn.c b/ldap/servers/slapd/modrdn.c
+index 3efe584a7..e04916b83 100644
+--- a/ldap/servers/slapd/modrdn.c
++++ b/ldap/servers/slapd/modrdn.c
+@@ -417,6 +417,9 @@ op_shared_rename(Slapi_PBlock *pb, int passin_args)
+     internal_op = operation_is_flag_set(operation, OP_FLAG_INTERNAL);
+     slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn);
+ 
++    /* Set the time we actually started the operation */
++    slapi_operation_set_time_started(operation);
++
+     /*
+      * If ownership has not been passed to this function, we replace the
+      * string input fields within the pblock with strdup'd copies.  Why?
+diff --git a/ldap/servers/slapd/operation.c b/ldap/servers/slapd/operation.c
+index ff16cd906..4dd3481c7 100644
+--- a/ldap/servers/slapd/operation.c
++++ b/ldap/servers/slapd/operation.c
+@@ -651,3 +651,27 @@ slapi_operation_time_expiry(Slapi_Operation *o, time_t timeout, struct timespec
+ {
+     slapi_timespec_expire_rel(timeout, &(o->o_hr_time_rel), expiry);
+ }
++
++/* Set the time the operation actually started */
++void
++slapi_operation_set_time_started(Slapi_Operation *o)
++{
++	clock_gettime(CLOCK_MONOTONIC, &(o->o_hr_time_started_rel));
++}
++
++/* The time diff of how long the operation took once it actually started */
++void
++slapi_operation_op_time_elapsed(Slapi_Operation *o, struct timespec *elapsed)
++{
++    struct timespec o_hr_time_now;
++    clock_gettime(CLOCK_MONOTONIC, &o_hr_time_now);
++
++    slapi_timespec_diff(&o_hr_time_now, &(o->o_hr_time_started_rel), elapsed);
++}
++
++/* The time diff the operation waited in the work queue */
++void
++slapi_operation_workq_time_elapsed(Slapi_Operation *o, struct timespec *elapsed)
++{
++    slapi_timespec_diff(&(o->o_hr_time_started_rel), &(o->o_hr_time_rel), elapsed);
++}
+diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
+index 9fe78655c..c0bc5dcd0 100644
+--- a/ldap/servers/slapd/opshared.c
++++ b/ldap/servers/slapd/opshared.c
+@@ -284,6 +284,9 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
+     slapi_pblock_get(pb, SLAPI_SEARCH_TARGET_SDN, &sdn);
+     slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
+ 
++    /* Set the time we actually started the operation */
++    slapi_operation_set_time_started(operation);
++
+     if (NULL == sdn) {
+         sdn = slapi_sdn_new_dn_byval(base);
+         slapi_pblock_set(pb, SLAPI_SEARCH_TARGET_SDN, sdn);
+diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
+index 0b13c30e9..61efb6f8d 100644
+--- a/ldap/servers/slapd/result.c
++++ b/ldap/servers/slapd/result.c
+@@ -1975,6 +1975,8 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
+     CSN *operationcsn = NULL;
+     char csn_str[CSN_STRSIZE + 5];
+     char etime[ETIME_BUFSIZ] = {0};
++    char wtime[ETIME_BUFSIZ] = {0};
++    char optime[ETIME_BUFSIZ] = {0};
+     int pr_idx = -1;
+     int pr_cookie = -1;
+     uint32_t operation_notes;
+@@ -1982,19 +1984,26 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
+     int32_t op_id;
+     int32_t op_internal_id;
+     int32_t op_nested_count;
++    struct timespec o_hr_time_end;
+ 
+     get_internal_conn_op(&connid, &op_id, &op_internal_id, &op_nested_count);
+-
+     slapi_pblock_get(pb, SLAPI_PAGED_RESULTS_INDEX, &pr_idx);
+     slapi_pblock_get(pb, SLAPI_PAGED_RESULTS_COOKIE, &pr_cookie);
+-
+     internal_op = operation_is_flag_set(op, OP_FLAG_INTERNAL);
+ 
+-    struct timespec o_hr_time_end;
++    /* total elapsed time */
+     slapi_operation_time_elapsed(op, &o_hr_time_end);
++    snprintf(etime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec);
++
++    /* wait time */
++    slapi_operation_workq_time_elapsed(op, &o_hr_time_end);
++    snprintf(wtime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec);
++
++    /* op time */
++    slapi_operation_op_time_elapsed(op, &o_hr_time_end);
++    snprintf(optime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec);
+ 
+ 
+-    snprintf(etime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec);
+ 
+     operation_notes = slapi_pblock_get_operation_notes(pb);
+ 
+@@ -2025,16 +2034,16 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
+         if (!internal_op) {
+             slapi_log_access(LDAP_DEBUG_STATS,
+                              "conn=%" PRIu64 " op=%d RESULT err=%d"
+-                             " tag=%" BERTAG_T " nentries=%d etime=%s%s%s"
++                             " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s"
+                              ", SASL bind in progress\n",
+                              op->o_connid,
+                              op->o_opid,
+                              err, tag, nentries,
+-                             etime,
++                             wtime, optime, etime,
+                              notes_str, csn_str);
+         } else {
+ 
+-#define LOG_SASLMSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s, SASL bind in progress\n"
++#define LOG_SASLMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s, SASL bind in progress\n"
+             slapi_log_access(LDAP_DEBUG_ARGS,
+                              connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_SASLMSG_FMT :
+                                            LOG_CONN_OP_FMT_EXT_INT LOG_SASLMSG_FMT,
+@@ -2043,7 +2052,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
+                              op_internal_id,
+                              op_nested_count,
+                              err, tag, nentries,
+-                             etime,
++                             wtime, optime, etime,
+                              notes_str, csn_str);
+         }
+     } else if (op->o_tag == LDAP_REQ_BIND && err == LDAP_SUCCESS) {
+@@ -2057,15 +2066,15 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
+         if (!internal_op) {
+             slapi_log_access(LDAP_DEBUG_STATS,
+                              "conn=%" PRIu64 " op=%d RESULT err=%d"
+-                             " tag=%" BERTAG_T " nentries=%d etime=%s%s%s"
++                             " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s"
+                              " dn=\"%s\"\n",
+                              op->o_connid,
+                              op->o_opid,
+                              err, tag, nentries,
+-                             etime,
++                             wtime, optime, etime,
+                              notes_str, csn_str, dn ? dn : "");
+         } else {
+-#define LOG_BINDMSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s dn=\"%s\"\n"
++#define LOG_BINDMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s dn=\"%s\"\n"
+             slapi_log_access(LDAP_DEBUG_ARGS,
+                              connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_BINDMSG_FMT :
+                                            LOG_CONN_OP_FMT_EXT_INT LOG_BINDMSG_FMT,
+@@ -2074,7 +2083,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
+                              op_internal_id,
+                              op_nested_count,
+                              err, tag, nentries,
+-                             etime,
++                             wtime, optime, etime,
+                              notes_str, csn_str, dn ? dn : "");
+         }
+         slapi_ch_free((void **)&dn);
+@@ -2083,15 +2092,15 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
+             if (!internal_op) {
+                 slapi_log_access(LDAP_DEBUG_STATS,
+                                  "conn=%" PRIu64 " op=%d RESULT err=%d"
+-                                 " tag=%" BERTAG_T " nentries=%d etime=%s%s%s"
++                                 " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s"
+                                  " pr_idx=%d pr_cookie=%d\n",
+                                  op->o_connid,
+                                  op->o_opid,
+                                  err, tag, nentries,
+-                                 etime,
++                                 wtime, optime, etime,
+                                  notes_str, csn_str, pr_idx, pr_cookie);
+             } else {
+-#define LOG_PRMSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s pr_idx=%d pr_cookie=%d \n"
++#define LOG_PRMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s pr_idx=%d pr_cookie=%d \n"
+                 slapi_log_access(LDAP_DEBUG_ARGS,
+                                  connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_PRMSG_FMT :
+                                                LOG_CONN_OP_FMT_EXT_INT LOG_PRMSG_FMT,
+@@ -2100,7 +2109,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
+                                  op_internal_id,
+                                  op_nested_count,
+                                  err, tag, nentries,
+-                                 etime,
++                                 wtime, optime, etime,
+                                  notes_str, csn_str, pr_idx, pr_cookie);
+             }
+         } else if (!internal_op) {
+@@ -2114,11 +2123,11 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
+             }
+             slapi_log_access(LDAP_DEBUG_STATS,
+                              "conn=%" PRIu64 " op=%d RESULT err=%d"
+-                             " tag=%" BERTAG_T " nentries=%d etime=%s%s%s%s\n",
++                             " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s%s\n",
+                              op->o_connid,
+                              op->o_opid,
+                              err, tag, nentries,
+-                             etime,
++                             wtime, optime, etime,
+                              notes_str, csn_str, ext_str);
+             if (pbtxt) {
+                 /* if !pbtxt ==> ext_str == "".  Don't free ext_str. */
+@@ -2126,7 +2135,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
+             }
+         } else {
+             int optype;
+-#define LOG_MSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s\n"
++#define LOG_MSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s\n"
+             slapi_log_access(LDAP_DEBUG_ARGS,
+                              connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_MSG_FMT :
+                                            LOG_CONN_OP_FMT_EXT_INT LOG_MSG_FMT,
+@@ -2135,7 +2144,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
+                              op_internal_id,
+                              op_nested_count,
+                              err, tag, nentries,
+-                             etime,
++                             wtime, optime, etime,
+                              notes_str, csn_str);
+             /*
+              *  If this is an unindexed search we should log it in the error log if
+diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
+index cef8c789c..8e76393c3 100644
+--- a/ldap/servers/slapd/slap.h
++++ b/ldap/servers/slapd/slap.h
+@@ -1538,16 +1538,17 @@ typedef struct slapi_operation_results
+  */
+ typedef struct op
+ {
+-    BerElement *o_ber;             /* ber of the request          */
+-    ber_int_t o_msgid;             /* msgid of the request          */
+-    ber_tag_t o_tag;               /* tag of the request          */
++    BerElement *o_ber;             /* ber of the request */
++    ber_int_t o_msgid;             /* msgid of the request */
++    ber_tag_t o_tag;               /* tag of the request */
+     struct timespec o_hr_time_rel; /* internal system time op initiated */
+     struct timespec o_hr_time_utc; /* utc system time op initiated */
+-    int o_isroot;                  /* requestor is manager          */
++    struct timespec o_hr_time_started_rel; /* internal system time op started */
++    int o_isroot;                  /* requestor is manager */
+     Slapi_DN o_sdn;                /* dn bound when op was initiated */
+-    char *o_authtype;              /* auth method used to bind dn      */
++    char *o_authtype;              /* auth method used to bind dn */
+     int o_ssf;                     /* ssf for this operation (highest between SASL and TLS/SSL) */
+-    int o_opid;                    /* id of this operation          */
++    int o_opid;                    /* id of this operation */
+     PRUint64 o_connid;             /* id of conn initiating this op; for logging only */
+     void *o_handler_data;
+     result_handler o_result_handler;
+diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
+index 834a98742..8d9c3fa6a 100644
+--- a/ldap/servers/slapd/slapi-plugin.h
++++ b/ldap/servers/slapd/slapi-plugin.h
+@@ -8210,13 +8210,29 @@ void slapi_operation_time_elapsed(Slapi_Operation *o, struct timespec *elapsed);
+  */
+ void slapi_operation_time_initiated(Slapi_Operation *o, struct timespec *initiated);
+ /**
+- * Given an operation and a timeout, return a populate struct with the expiry
+- * time of the operation suitable for checking with slapi_timespec_expire_check
++ * Given an operation, determine the time elapsed since the op
++ * was actually started.
+  *
+- * \param Slapi_Operation o - the operation that is in progress
+- * \param time_t timeout the seconds relative to operation initiation to expiry at.
+- * \param struct timespec *expiry the timespec to popluate with the relative expiry.
++ * \param Slapi_Operation o - the operation which is inprogress
++ * \param struct timespec *elapsed - location where the time difference will be
++ * placed.
++ */
++void slapi_operation_op_time_elapsed(Slapi_Operation *o, struct timespec *elapsed);
++/**
++ * Given an operation, determine the time elapsed that the op spent
++ * in the work queue before actually being dispatched to a worker thread
++ *
++ * \param Slapi_Operation o - the operation which is inprogress
++ * \param struct timespec *elapsed - location where the time difference will be
++ * placed.
++ */
++void slapi_operation_workq_time_elapsed(Slapi_Operation *o, struct timespec *elapsed);
++/**
++ * Set the time the operation actually started
++ *
++ * \param Slapi_Operation o - the operation which is inprogress
+  */
++void slapi_operation_set_time_started(Slapi_Operation *o);
+ #endif
+ 
+ /**
+-- 
+2.26.2
+
diff --git a/SOURCES/0017-Issue-50912-pwdReset-can-be-modified-by-a-user.patch b/SOURCES/0017-Issue-50912-pwdReset-can-be-modified-by-a-user.patch
new file mode 100644
index 0000000..56b0db4
--- /dev/null
+++ b/SOURCES/0017-Issue-50912-pwdReset-can-be-modified-by-a-user.patch
@@ -0,0 +1,31 @@
+From ec1714c81290a03ae9aa5fd10acf3e9be71596d7 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Thu, 11 Jun 2020 15:47:43 -0400
+Subject: [PATCH] Issue 50912 - pwdReset can be modified by a user
+
+Description:  The attribute "pwdReset" should only be allowed to be set by the
+              server.  Update schema definition to include NO-USER-MODIFICATION
+
+relates: https://pagure.io/389-ds-base/issue/50912
+
+Reviewed by: mreynolds(one line commit rule)
+---
+ ldap/schema/02common.ldif | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif
+index 966636bef..c6dc074db 100644
+--- a/ldap/schema/02common.ldif
++++ b/ldap/schema/02common.ldif
+@@ -76,7 +76,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2349 NAME ( 'passwordDictCheck' 'pwdDict
+ attributeTypes: ( 2.16.840.1.113730.3.1.2350 NAME ( 'passwordDictPath' 'pwdDictPath' ) DESC '389 Directory Server password policy attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
+ attributeTypes: ( 2.16.840.1.113730.3.1.2351 NAME ( 'passwordUserAttributes' 'pwdUserAttributes' ) DESC '389 Directory Server password policy attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
+ attributeTypes: ( 2.16.840.1.113730.3.1.2352 NAME ( 'passwordBadWords' 'pwdBadWords' ) DESC '389 Directory Server password policy attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
+-attributeTypes: ( 2.16.840.1.113730.3.1.2366 NAME 'pwdReset' DESC '389 Directory Server password policy attribute type' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE USAGE directoryOperation X-ORIGIN '389 Directory Server' )
++attributeTypes: ( 2.16.840.1.113730.3.1.2366 NAME 'pwdReset' DESC '389 Directory Server password policy attribute type' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-ORIGIN '389 Directory Server' )
+ attributeTypes: ( 2.16.840.1.113730.3.1.198 NAME 'memberURL' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 X-ORIGIN 'Netscape Directory Server' )
+ attributeTypes: ( 2.16.840.1.113730.3.1.199 NAME 'memberCertificateDescription' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 X-ORIGIN 'Netscape Directory Server' )
+ attributeTypes: ( 2.16.840.1.113730.3.1.207 NAME 'vlvBase' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'Netscape Directory Server' )
+-- 
+2.26.2
+
diff --git a/SOURCES/0018-Issue-50791-Healthcheck-should-look-for-notes-A-F-in.patch b/SOURCES/0018-Issue-50791-Healthcheck-should-look-for-notes-A-F-in.patch
new file mode 100644
index 0000000..5f4f6a3
--- /dev/null
+++ b/SOURCES/0018-Issue-50791-Healthcheck-should-look-for-notes-A-F-in.patch
@@ -0,0 +1,202 @@
+From a6a52365df26edd4f6b0028056395d943344d787 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Thu, 11 Jun 2020 15:30:28 -0400
+Subject: [PATCH] Issue 50791 - Healthcheck should look for notes=A/F in access
+ log
+
+Description:  Add checks for notes=A (fully unindexed search) and
+              notes=F (Unknown attribute in search filter) in the
+              current access log.
+
+relates: https://pagure.io/389-ds-base/issue/50791
+
+Reviewed by: firstyear(Thanks!)
+---
+ src/lib389/lib389/cli_ctl/health.py |  4 +-
+ src/lib389/lib389/dirsrv_log.py     | 72 +++++++++++++++++++++++++++--
+ src/lib389/lib389/lint.py           | 26 ++++++++++-
+ 3 files changed, 96 insertions(+), 6 deletions(-)
+
+diff --git a/src/lib389/lib389/cli_ctl/health.py b/src/lib389/lib389/cli_ctl/health.py
+index 6333a753a..89484a11b 100644
+--- a/src/lib389/lib389/cli_ctl/health.py
++++ b/src/lib389/lib389/cli_ctl/health.py
+@@ -1,5 +1,5 @@
+ # --- BEGIN COPYRIGHT BLOCK ---
+-# Copyright (C) 2019 Red Hat, Inc.
++# Copyright (C) 2020 Red Hat, Inc.
+ # All rights reserved.
+ #
+ # License: GPL (version 3 or any later version).
+@@ -18,6 +18,7 @@ from lib389.monitor import MonitorDiskSpace
+ from lib389.replica import Replica, Changelog5
+ from lib389.nss_ssl import NssSsl
+ from lib389.dseldif import FSChecks, DSEldif
++from lib389.dirsrv_log import DirsrvAccessLog
+ from lib389 import lint
+ from lib389 import plugins
+ from lib389._constants import DSRC_HOME
+@@ -37,6 +38,7 @@ CHECK_OBJECTS = [
+     Changelog5,
+     DSEldif,
+     NssSsl,
++    DirsrvAccessLog,
+ ]
+ 
+ 
+diff --git a/src/lib389/lib389/dirsrv_log.py b/src/lib389/lib389/dirsrv_log.py
+index baac2a3c9..7bed4bb17 100644
+--- a/src/lib389/lib389/dirsrv_log.py
++++ b/src/lib389/lib389/dirsrv_log.py
+@@ -1,5 +1,5 @@
+ # --- BEGIN COPYRIGHT BLOCK ---
+-# Copyright (C) 2016 Red Hat, Inc.
++# Copyright (C) 2020 Red Hat, Inc.
+ # All rights reserved.
+ #
+ # License: GPL (version 3 or any later version).
+@@ -9,12 +9,17 @@
+ """Helpers for managing the directory server internal logs.
+ """
+ 
++import copy
+ import re
+ import gzip
+ from dateutil.parser import parse as dt_parse
+ from glob import glob
+ from lib389.utils import ensure_bytes
+-
++from lib389._mapped_object_lint import DSLint
++from lib389.lint import (
++    DSLOGNOTES0001,  # Unindexed search
++    DSLOGNOTES0002,  # Unknown attr in search filter
++)
+ 
+ # Because many of these settings can change live, we need to check for certain
+ # attributes all the time.
+@@ -35,7 +40,7 @@ MONTH_LOOKUP = {
+ }
+ 
+ 
+-class DirsrvLog(object):
++class DirsrvLog(DSLint):
+     """Class of functions to working with the various DIrectory Server logs
+     """
+     def __init__(self, dirsrv):
+@@ -189,6 +194,67 @@ class DirsrvAccessLog(DirsrvLog):
+         self.full_regexs = [self.prog_m1, self.prog_con, self.prog_discon]
+         self.result_regexs = [self.prog_notes, self.prog_repl,
+                               self.prog_result]
++    @classmethod
++    def lint_uid(cls):
++        return 'logs'
++
++    def _log_get_search_stats(self, conn, op):
++        lines = self.match(f".* conn={conn} op={op} SRCH base=.*")
++        if len(lines) != 1:
++            return None
++
++        quoted_vals = re.findall('"([^"]*)"', lines[0])
++        return {
++            'base': quoted_vals[0],
++            'filter': quoted_vals[1],
++            'timestamp': re.findall('\[(.*)\]', lines[0])[0],
++            'scope': lines[0].split(' scope=', 1)[1].split(' ',1)[0]
++        }
++
++    def _lint_notes(self):
++        """
++        Check for notes=A (fully unindexed searches), and
++        notes=F (unknown attribute in filter)
++        """
++        for pattern, lint_report in [(".* notes=A", DSLOGNOTES0001), (".* notes=F", DSLOGNOTES0002)]:
++            lines = self.match(pattern)
++            if len(lines) > 0:
++                count = 0
++                searches = []
++                for line in lines:
++                    if ' RESULT err=' in line:
++                        # Looks like a valid notes=A/F
++                        conn = line.split(' conn=', 1)[1].split(' ',1)[0]
++                        op = line.split(' op=', 1)[1].split(' ',1)[0]
++                        etime = line.split(' etime=', 1)[1].split(' ',1)[0]
++                        stats = self._log_get_search_stats(conn, op)
++                        if stats is not None:
++                            timestamp = stats['timestamp']
++                            base = stats['base']
++                            scope = stats['scope']
++                            srch_filter = stats['filter']
++                            count += 1
++                            if lint_report == DSLOGNOTES0001:
++                                searches.append(f'\n  [{count}] Unindexed Search\n'
++                                                f'      - date:    {timestamp}\n'
++                                                f'      - conn/op: {conn}/{op}\n'
++                                                f'      - base:    {base}\n'
++                                                f'      - scope:   {scope}\n'
++                                                f'      - filter:  {srch_filter}\n'
++                                                f'      - etime:   {etime}\n')
++                            else:
++                                searches.append(f'\n  [{count}] Invalid Attribute in Filter\n'
++                                                f'      - date:    {timestamp}\n'
++                                                f'      - conn/op: {conn}/{op}\n'
++                                                f'      - filter:  {srch_filter}\n')
++                if len(searches) > 0:
++                    report = copy.deepcopy(lint_report)
++                    report['items'].append(self._get_log_path())
++                    report['detail'] = report['detail'].replace('NUMBER', str(count))
++                    for srch in searches:
++                        report['detail'] += srch
++                    yield report
++
+ 
+     def _get_log_path(self):
+         """Return the current log file location"""
+diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py
+index a103feec7..4b1700b92 100644
+--- a/src/lib389/lib389/lint.py
++++ b/src/lib389/lib389/lint.py
+@@ -1,5 +1,5 @@
+ # --- BEGIN COPYRIGHT BLOCK ---
+-# Copyright (C) 2019 Red Hat, Inc.
++# Copyright (C) 2020 Red Hat, Inc.
+ # All rights reserved.
+ #
+ # License: GPL (version 3 or any later version).
+@@ -253,7 +253,7 @@ can use the CLI tool "dsconf" to resolve the conflict.  Here is an example:
+ 
+     Remove conflict entry and keep only the original/counterpart entry:
+ 
+-        # dsconf slapd-YOUR_INSTANCE  repl-conflict remove <DN of conflict entry>
++        # dsconf slapd-YOUR_INSTANCE  repl-conflict delete <DN of conflict entry>
+ 
+     Replace the original/counterpart entry with the conflict entry:
+ 
+@@ -418,3 +418,25 @@ until the time issues have been resolved:
+ Also look at https://access.redhat.com/documentation/en-us/red_hat_directory_server/11/html/administration_guide/managing_replication-troubleshooting_replication_related_problems
+ and find the paragraph "Too much time skew"."""
+ }
++
++DSLOGNOTES0001 = {
++    'dsle': 'DSLOGNOTES0001',
++    'severity': 'Medium',
++    'description': 'Unindexed Search',
++    'items': ['Performance'],
++    'detail': """Found NUMBER fully unindexed searches in the current access log.
++Unindexed searches can cause high CPU and slow down the entire server's performance.\n""",
++    'fix': """Examine the searches that are unindexed, and either properly index the attributes
++in the filter, increase the nsslapd-idlistscanlimit, or stop using that filter."""
++}
++
++DSLOGNOTES0002 = {
++    'dsle': 'DSLOGNOTES0002',
++    'severity': 'Medium',
++    'description': 'Unknown Attribute In Filter',
++    'items': ['Possible Performance Impact'],
++    'detail': """Found NUMBER searches in the current access log that are using an
++unknown attribute in the search filter.\n""",
++    'fix': """Stop using this these unknown attributes in the filter, or add the schema
++to the server and make sure it's properly indexed."""
++}
+-- 
+2.26.2
+
diff --git a/SOURCES/0019-Issue-51144-dsctl-fails-with-instance-names-that-con.patch b/SOURCES/0019-Issue-51144-dsctl-fails-with-instance-names-that-con.patch
new file mode 100644
index 0000000..d2663da
--- /dev/null
+++ b/SOURCES/0019-Issue-51144-dsctl-fails-with-instance-names-that-con.patch
@@ -0,0 +1,51 @@
+From 2844d4ad90cbbd23ae75309e50ae4d7145586bb7 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Wed, 10 Jun 2020 14:07:24 -0400
+Subject: [PATCH] Issue 51144 - dsctl fails with instance names that contain
+ slapd-
+
+Bug Description:  If an instance name contains 'slapd-' the CLI breaks:
+
+                      slapd-test-slapd
+
+Fix Description:  Only strip off "slapd-" from the front of the instance
+                  name.
+
+relates: https://pagure.io/389-ds-base/issue/51144
+
+Reviewed by: firstyear(Thanks!)
+---
+ src/lib389/lib389/__init__.py | 2 +-
+ src/lib389/lib389/dseldif.py  | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
+index 0ff1ab173..63d44b60a 100644
+--- a/src/lib389/lib389/__init__.py
++++ b/src/lib389/lib389/__init__.py
+@@ -710,7 +710,7 @@ class DirSrv(SimpleLDAPObject, object):
+         # Don't need a default value now since it's set in init.
+         if serverid is None and hasattr(self, 'serverid'):
+             serverid = self.serverid
+-        elif serverid is not None:
++        elif serverid is not None and serverid.startswith('slapd-'):
+             serverid = serverid.replace('slapd-', '', 1)
+ 
+         if self.serverid is None:
+diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py
+index 96c9af9d1..f2725add9 100644
+--- a/src/lib389/lib389/dseldif.py
++++ b/src/lib389/lib389/dseldif.py
+@@ -40,7 +40,8 @@ class DSEldif(DSLint):
+         if serverid:
+             # Get the dse.ldif from the instance name
+             prefix = os.environ.get('PREFIX', ""),
+-            serverid = serverid.replace("slapd-", "")
++            if serverid.startswith("slapd-"):
++                serverid = serverid.replace("slapd-", "", 1)
+             self.path = "{}/etc/dirsrv/slapd-{}/dse.ldif".format(prefix[0], serverid)
+         else:
+             ds_paths = Paths(self._instance.serverid, self._instance)
+-- 
+2.26.2
+
diff --git a/SOURCES/0020-Ticket-49859-A-distinguished-value-can-be-missing-in.patch b/SOURCES/0020-Ticket-49859-A-distinguished-value-can-be-missing-in.patch
new file mode 100644
index 0000000..8d25933
--- /dev/null
+++ b/SOURCES/0020-Ticket-49859-A-distinguished-value-can-be-missing-in.patch
@@ -0,0 +1,520 @@
+From 6cd4b1c60dbd3d7b74adb19a2434585d50553f39 Mon Sep 17 00:00:00 2001
+From: Thierry Bordaz <tbordaz@redhat.com>
+Date: Fri, 5 Jun 2020 12:14:51 +0200
+Subject: [PATCH] Ticket 49859 - A distinguished value can be missing in an
+ entry
+
+Bug description:
+	According to RFC 4511 (see ticket), the values of the RDN attributes
+        should be present in an entry.
+	With a set of replicated operations, it is possible that those values
+        would be missing
+
+Fix description:
+        MOD and MODRDN update checks that the RDN values are presents.
+        If they are missing they are added to the resulting entry. In addition
+        the set of modifications to add those values are also indexed.
+        The specific case of single-valued attributes, where the final and unique value
+        can not be the RDN value, the attribute nsds5ReplConflict is added.
+
+https://pagure.io/389-ds-base/issue/49859
+
+Reviewed by: Mark Reynolds, William Brown
+
+Platforms tested: F31
+---
+ .../replication/conflict_resolve_test.py      | 174 +++++++++++++++++-
+ ldap/servers/slapd/back-ldbm/ldbm_modify.c    | 136 ++++++++++++++
+ ldap/servers/slapd/back-ldbm/ldbm_modrdn.c    |  37 +++-
+ .../servers/slapd/back-ldbm/proto-back-ldbm.h |   1 +
+ 4 files changed, 343 insertions(+), 5 deletions(-)
+
+diff --git a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py
+index 99a072935..48d0067db 100644
+--- a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py
++++ b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py
+@@ -10,10 +10,11 @@ import time
+ import logging
+ import ldap
+ import pytest
++import re
+ from itertools import permutations
+ from lib389._constants import *
+ from lib389.idm.nscontainer import nsContainers
+-from lib389.idm.user import UserAccounts
++from lib389.idm.user import UserAccounts, UserAccount
+ from lib389.idm.group import Groups
+ from lib389.idm.organizationalunit import OrganizationalUnits
+ from lib389.replica import ReplicationManager
+@@ -763,6 +764,177 @@ class TestTwoMasters:
+         user_dns_m2 = [user.dn for user in test_users_m2.list()]
+         assert set(user_dns_m1) == set(user_dns_m2)
+ 
++    def test_conflict_attribute_multi_valued(self, topology_m2, base_m2):
++        """A RDN attribute being multi-valued, checks that after several operations
++           MODRDN and MOD_REPL its RDN values are the same on both servers
++
++        :id: 225b3522-8ed7-4256-96f9-5fab9b7044a5
++        :setup: Two master replication,
++                audit log, error log for replica and access log for internal
++        :steps:
++            1. Create a test entry uid=user_test_1000,...
++            2. Pause all replication agreements
++            3. On M1 rename it into uid=foo1,...
++            4. On M2 rename it into uid=foo2,...
++            5. On M1 MOD_REPL uid:foo1
++            6. Resume all replication agreements
++            7. Check that entry on M1 has uid=foo1, foo2
++            8. Check that entry on M2 has uid=foo1, foo2
++            9. Check that entry on M1 and M2 has the same uid values
++        :expectedresults:
++            1. It should pass
++            2. It should pass
++            3. It should pass
++            4. It should pass
++            5. It should pass
++            6. It should pass
++            7. It should pass
++            8. It should pass
++            9. It should pass
++        """
++
++        M1 = topology_m2.ms["master1"]
++        M2 = topology_m2.ms["master2"]
++
++        # add a test user
++        test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
++        user_1 = test_users_m1.create_test_user(uid=1000)
++        test_users_m2 = UserAccount(M2, user_1.dn)
++        # Waiting fo the user to be replicated
++        for i in range(0,4):
++            time.sleep(1)
++            if test_users_m2.exists():
++                break
++        assert(test_users_m2.exists())
++
++        # Stop replication agreements
++        topology_m2.pause_all_replicas()
++
++        # On M1 rename test entry in uid=foo1
++        original_dn = user_1.dn
++        user_1.rename('uid=foo1')
++        time.sleep(1)
++
++        # On M2 rename test entry in uid=foo2
++        M2.rename_s(original_dn, 'uid=foo2')
++        time.sleep(2)
++
++        # on M1 MOD_REPL uid into foo1
++        user_1.replace('uid', 'foo1')
++
++        # resume replication agreements
++        topology_m2.resume_all_replicas()
++        time.sleep(5)
++
++        # check that on M1, the entry 'uid' has two values 'foo1' and 'foo2'
++        final_dn = re.sub('^.*1000,', 'uid=foo2,', original_dn)
++        final_user_m1 = UserAccount(M1, final_dn)
++        for val in final_user_m1.get_attr_vals_utf8('uid'):
++            log.info("Check %s is on M1" % val)
++            assert(val in ['foo1', 'foo2'])
++
++        # check that on M2, the entry 'uid' has two values 'foo1' and 'foo2'
++        final_user_m2 = UserAccount(M2, final_dn)
++        for val in final_user_m2.get_attr_vals_utf8('uid'):
++            log.info("Check %s is on M1" % val)
++            assert(val in ['foo1', 'foo2'])
++
++        # check that the entry have the same uid values
++        for val in final_user_m1.get_attr_vals_utf8('uid'):
++            log.info("Check M1.uid %s is also on M2" % val)
++            assert(val in final_user_m2.get_attr_vals_utf8('uid'))
++
++        for val in final_user_m2.get_attr_vals_utf8('uid'):
++            log.info("Check M2.uid %s is also on M1" % val)
++            assert(val in final_user_m1.get_attr_vals_utf8('uid'))
++
++    def test_conflict_attribute_single_valued(self, topology_m2, base_m2):
++        """A RDN attribute being signle-valued, checks that after several operations
++           MODRDN and MOD_REPL its RDN values are the same on both servers
++
++        :id: c38ae613-5d1e-47cf-b051-c7284e64b817
++        :setup: Two master replication, test container for entries, enable plugin logging,
++                audit log, error log for replica and access log for internal
++        :steps:
++            1. Create a test entry uid=user_test_1000,...
++            2. Pause all replication agreements
++            3. On M1 rename it into employeenumber=foo1,...
++            4. On M2 rename it into employeenumber=foo2,...
++            5. On M1 MOD_REPL employeenumber:foo1
++            6. Resume all replication agreements
++            7. Check that entry on M1 has employeenumber=foo1
++            8. Check that entry on M2 has employeenumber=foo1
++            9. Check that entry on M1 and M2 has the same employeenumber values
++        :expectedresults:
++            1. It should pass
++            2. It should pass
++            3. It should pass
++            4. It should pass
++            5. It should pass
++            6. It should pass
++            7. It should pass
++            8. It should pass
++            9. It should pass
++        """
++
++        M1 = topology_m2.ms["master1"]
++        M2 = topology_m2.ms["master2"]
++
++        # add a test user with a dummy 'uid' extra value because modrdn removes
++        # uid that conflict with 'account' objectclass
++        test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
++        user_1 = test_users_m1.create_test_user(uid=1000)
++        user_1.add('objectclass', 'extensibleobject')
++        user_1.add('uid', 'dummy')
++        test_users_m2 = UserAccount(M2, user_1.dn)
++
++        # Waiting fo the user to be replicated
++        for i in range(0,4):
++            time.sleep(1)
++            if test_users_m2.exists():
++                break
++        assert(test_users_m2.exists())
++
++        # Stop replication agreements
++        topology_m2.pause_all_replicas()
++
++        # On M1 rename test entry in employeenumber=foo1
++        original_dn = user_1.dn
++        user_1.rename('employeenumber=foo1')
++        time.sleep(1)
++
++        # On M2 rename test entry in employeenumber=foo2
++        M2.rename_s(original_dn, 'employeenumber=foo2')
++        time.sleep(2)
++
++        # on M1 MOD_REPL uid into foo1
++        user_1.replace('employeenumber', 'foo1')
++
++        # resume replication agreements
++        topology_m2.resume_all_replicas()
++        time.sleep(5)
++
++        # check that on M1, the entry 'employeenumber' has value 'foo1'
++        final_dn = re.sub('^.*1000,', 'employeenumber=foo2,', original_dn)
++        final_user_m1 = UserAccount(M1, final_dn)
++        for val in final_user_m1.get_attr_vals_utf8('employeenumber'):
++            log.info("Check %s is on M1" % val)
++            assert(val in ['foo1'])
++
++        # check that on M2, the entry 'employeenumber' has values 'foo1'
++        final_user_m2 = UserAccount(M2, final_dn)
++        for val in final_user_m2.get_attr_vals_utf8('employeenumber'):
++            log.info("Check %s is on M2" % val)
++            assert(val in ['foo1'])
++
++        # check that the entry have the same uid values
++        for val in final_user_m1.get_attr_vals_utf8('employeenumber'):
++            log.info("Check M1.uid %s is also on M2" % val)
++            assert(val in final_user_m2.get_attr_vals_utf8('employeenumber'))
++
++        for val in final_user_m2.get_attr_vals_utf8('employeenumber'):
++            log.info("Check M2.uid %s is also on M1" % val)
++            assert(val in final_user_m1.get_attr_vals_utf8('employeenumber'))
+ 
+ class TestThreeMasters:
+     def test_nested_entries(self, topology_m3, base_m3):
+diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+index e9d7e87e3..a507f3c31 100644
+--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
++++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+@@ -213,6 +213,112 @@ error:
+     return retval;
+ }
+ 
++int32_t
++entry_get_rdn_mods(Slapi_PBlock *pb, Slapi_Entry *entry, CSN *csn, int repl_op, Slapi_Mods **smods_ret)
++{
++    unsigned long op_type = SLAPI_OPERATION_NONE;
++    char *new_rdn = NULL;
++    char **dns = NULL;
++    char **rdns = NULL;
++    Slapi_Mods *smods = NULL;
++    char *type = NULL;
++    struct berval *bvp[2] = {0};
++    struct berval bv;
++    Slapi_Attr *attr = NULL;
++    const char *entry_dn = NULL;
++
++    *smods_ret = NULL;
++    entry_dn = slapi_entry_get_dn_const(entry);
++    /* Do not bother to check that RDN is present, no one rename RUV or change its nsuniqueid */
++    if (strcasestr(entry_dn, RUV_STORAGE_ENTRY_UNIQUEID)) {
++        return 0;
++    }
++
++    /* First get the RDNs of the operation */
++    slapi_pblock_get(pb, SLAPI_OPERATION_TYPE, &op_type);
++    switch (op_type) {
++        case SLAPI_OPERATION_MODIFY:
++            dns = slapi_ldap_explode_dn(entry_dn, 0);
++            if (dns == NULL) {
++                slapi_log_err(SLAPI_LOG_ERR, "entry_get_rdn_mods",
++                      "Fails to split DN \"%s\" into components\n", entry_dn);
++                return -1;
++            }
++            rdns = slapi_ldap_explode_rdn(dns[0], 0);
++            slapi_ldap_value_free(dns);
++
++            break;
++        case SLAPI_OPERATION_MODRDN:
++            slapi_pblock_get(pb, SLAPI_MODRDN_NEWRDN, &new_rdn);
++            rdns = slapi_ldap_explode_rdn(new_rdn, 0);
++            break;
++        default:
++            break;
++    }
++    if (rdns == NULL || rdns[0] == NULL) {
++        slapi_log_err(SLAPI_LOG_ERR, "entry_get_rdn_mods",
++                      "Fails to split RDN \"%s\" into components\n", slapi_entry_get_dn_const(entry));
++        return -1;
++    }
++
++    /* Update the entry to add RDNs values if they are missing */
++    smods = slapi_mods_new();
++
++    bvp[0] = &bv;
++    bvp[1] = NULL;
++    for (size_t rdns_count = 0; rdns[rdns_count]; rdns_count++) {
++        Slapi_Value *value;
++        attr = NULL;
++        slapi_rdn2typeval(rdns[rdns_count], &type, &bv);
++
++        /* Check if the RDN value exists */
++        if ((slapi_entry_attr_find(entry, type, &attr) != 0) ||
++            (slapi_attr_value_find(attr, &bv))) {
++            const CSN *csn_rdn_add;
++            const CSN *adcsn = attr_get_deletion_csn(attr);
++
++            /* It is missing => adds it */
++            if (slapi_attr_flag_is_set(attr, SLAPI_ATTR_FLAG_SINGLE)) {
++                if (csn_compare(adcsn, csn) >= 0) {
++                    /* this is a single valued attribute and the current value
++                     * (that is different from RDN value) is more recent than
++                     * the RDN value we want to apply.
++                     * Keep the current value and add a conflict flag
++                     */
++
++                    type = ATTR_NSDS5_REPLCONFLICT;
++                    bv.bv_val = "RDN value may be missing because it is single-valued";
++                    bv.bv_len = strlen(bv.bv_val);
++                    slapi_entry_add_string(entry, type, bv.bv_val);
++                    slapi_mods_add_modbvps(smods, LDAP_MOD_ADD, type, bvp);
++                    continue;
++                }
++            }
++            /* if a RDN value needs to be forced, make sure it csn is ahead */
++            slapi_mods_add_modbvps(smods, LDAP_MOD_ADD, type, bvp);
++            csn_rdn_add = csn_max(adcsn, csn);
++
++            if (entry_apply_mods_wsi(entry, smods, csn_rdn_add, repl_op)) {
++                slapi_log_err(SLAPI_LOG_ERR, "entry_get_rdn_mods",
++                              "Fails to set \"%s\" in  \"%s\"\n", type, slapi_entry_get_dn_const(entry));
++                slapi_ldap_value_free(rdns);
++                slapi_mods_free(&smods);
++                return -1;
++            }
++            /* Make the RDN value a distinguished value */
++            attr_value_find_wsi(attr, &bv, &value);
++            value_update_csn(value, CSN_TYPE_VALUE_DISTINGUISHED, csn_rdn_add);
++        }
++    }
++    slapi_ldap_value_free(rdns);
++    if (smods->num_mods == 0) {
++        /* smods_ret already NULL, just free the useless smods */
++        slapi_mods_free(&smods);
++    } else {
++        *smods_ret = smods;
++    }
++    return 0;
++}
+ /**
+    Apply the mods to the ec entry.  Check for syntax, schema problems.
+    Check for abandon.
+@@ -269,6 +375,8 @@ modify_apply_check_expand(
+         goto done;
+     }
+ 
++
++
+     /*
+      * If the objectClass attribute type was modified in any way, expand
+      * the objectClass values to reflect the inheritance hierarchy.
+@@ -414,6 +522,7 @@ ldbm_back_modify(Slapi_PBlock *pb)
+     int result_sent = 0;
+     int32_t parent_op = 0;
+     struct timespec parent_time;
++    Slapi_Mods *smods_add_rdn = NULL;
+ 
+     slapi_pblock_get(pb, SLAPI_BACKEND, &be);
+     slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
+@@ -731,6 +840,15 @@ ldbm_back_modify(Slapi_PBlock *pb)
+             }
+         } /* else if new_mod_count == mod_count then betxnpremod plugin did nothing */
+ 
++        /* time to check if applying a replicated operation removed
++         * the RDN value from the entry. Assuming that only replicated update
++         * can lead to that bad result
++         */
++        if (entry_get_rdn_mods(pb, ec->ep_entry, opcsn, repl_op, &smods_add_rdn)) {
++            goto error_return;
++        }
++
++
+         /*
+          * Update the ID to Entry index.
+          * Note that id2entry_add replaces the entry, so the Entry ID
+@@ -764,6 +882,23 @@ ldbm_back_modify(Slapi_PBlock *pb)
+             MOD_SET_ERROR(ldap_result_code, LDAP_OPERATIONS_ERROR, retry_count);
+             goto error_return;
+         }
++
++        if (smods_add_rdn && slapi_mods_get_num_mods(smods_add_rdn) > 0) {
++            retval = index_add_mods(be, (LDAPMod **) slapi_mods_get_ldapmods_byref(smods_add_rdn), e, ec, &txn);
++            if (DB_LOCK_DEADLOCK == retval) {
++                /* Abort and re-try */
++                slapi_mods_free(&smods_add_rdn);
++                continue;
++            }
++            if (retval != 0) {
++                slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_modify",
++                        "index_add_mods (rdn) failed, err=%d %s\n",
++                        retval, (msg = dblayer_strerror(retval)) ? msg : "");
++                MOD_SET_ERROR(ldap_result_code, LDAP_OPERATIONS_ERROR, retry_count);
++                slapi_mods_free(&smods_add_rdn);
++                goto error_return;
++            }
++        }
+         /*
+          * Remove the old entry from the Virtual List View indexes.
+          * Add the new entry to the Virtual List View indexes.
+@@ -978,6 +1113,7 @@ error_return:
+ 
+ common_return:
+     slapi_mods_done(&smods);
++    slapi_mods_free(&smods_add_rdn);
+ 
+     if (inst) {
+         if (ec_locked || cache_is_in_cache(&inst->inst_cache, ec)) {
+diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
+index fde83c99f..e97b7a5f6 100644
+--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
++++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
+@@ -21,7 +21,7 @@ static void moddn_unlock_and_return_entry(backend *be, struct backentry **target
+ static int moddn_newrdn_mods(Slapi_PBlock *pb, const char *olddn, struct backentry *ec, Slapi_Mods *smods_wsi, int is_repl_op);
+ static IDList *moddn_get_children(back_txn *ptxn, Slapi_PBlock *pb, backend *be, struct backentry *parententry, Slapi_DN *parentdn, struct backentry ***child_entries, struct backdn ***child_dns, int is_resurect_operation);
+ static int moddn_rename_children(back_txn *ptxn, Slapi_PBlock *pb, backend *be, IDList *children, Slapi_DN *dn_parentdn, Slapi_DN *dn_newsuperiordn, struct backentry *child_entries[]);
+-static int modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li, struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3);
++static int modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li, struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3, Slapi_Mods *smods4);
+ static void mods_remove_nsuniqueid(Slapi_Mods *smods);
+ 
+ #define MOD_SET_ERROR(rc, error, count)                                            \
+@@ -100,6 +100,7 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
+     Connection *pb_conn = NULL;
+     int32_t parent_op = 0;
+     struct timespec parent_time;
++    Slapi_Mods *smods_add_rdn = NULL;
+ 
+     if (slapi_pblock_get(pb, SLAPI_CONN_ID, &conn_id) < 0) {
+         conn_id = 0; /* connection is NULL */
+@@ -842,6 +843,15 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
+                     goto error_return;
+                 }
+             }
++
++            /* time to check if applying a replicated operation removed
++             * the RDN value from the entry. Assuming that only replicated update
++             * can lead to that bad result
++             */
++            if (entry_get_rdn_mods(pb, ec->ep_entry, opcsn, is_replicated_operation, &smods_add_rdn)) {
++                goto error_return;
++            }
++
+             /* check that the entry still obeys the schema */
+             if (slapi_entry_schema_check(pb, ec->ep_entry) != 0) {
+                 ldap_result_code = LDAP_OBJECT_CLASS_VIOLATION;
+@@ -1003,7 +1013,7 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
+         /*
+          * Update the indexes for the entry.
+          */
+-        retval = modrdn_rename_entry_update_indexes(&txn, pb, li, e, &ec, &smods_generated, &smods_generated_wsi, &smods_operation_wsi);
++        retval = modrdn_rename_entry_update_indexes(&txn, pb, li, e, &ec, &smods_generated, &smods_generated_wsi, &smods_operation_wsi, smods_add_rdn);
+         if (DB_LOCK_DEADLOCK == retval) {
+             /* Retry txn */
+             continue;
+@@ -1497,6 +1507,7 @@ common_return:
+     slapi_mods_done(&smods_operation_wsi);
+     slapi_mods_done(&smods_generated);
+     slapi_mods_done(&smods_generated_wsi);
++    slapi_mods_free(&smods_add_rdn);
+     slapi_ch_free((void **)&child_entries);
+     slapi_ch_free((void **)&child_dns);
+     if (ldap_result_matcheddn && 0 != strcmp(ldap_result_matcheddn, "NULL"))
+@@ -1778,7 +1789,7 @@ mods_remove_nsuniqueid(Slapi_Mods *smods)
+  * mods contains the list of attribute change made.
+  */
+ static int
+-modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li __attribute__((unused)), struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3)
++modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li __attribute__((unused)), struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3, Slapi_Mods *smods4)
+ {
+     backend *be;
+     ldbm_instance *inst;
+@@ -1874,6 +1885,24 @@ modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbm
+             goto error_return;
+         }
+     }
++    if (smods4 != NULL && slapi_mods_get_num_mods(smods4) > 0) {
++        /*
++         * update the indexes: lastmod, rdn, etc.
++         */
++        retval = index_add_mods(be, slapi_mods_get_ldapmods_byref(smods4), e, *ec, ptxn);
++        if (DB_LOCK_DEADLOCK == retval) {
++            /* Retry txn */
++            slapi_log_err(SLAPI_LOG_BACKLDBM, "modrdn_rename_entry_update_indexes",
++                          "index_add_mods4 deadlock\n");
++            goto error_return;
++        }
++        if (retval != 0) {
++            slapi_log_err(SLAPI_LOG_TRACE, "modrdn_rename_entry_update_indexes",
++                          "index_add_mods 4 failed, err=%d %s\n",
++                          retval, (msg = dblayer_strerror(retval)) ? msg : "");
++            goto error_return;
++        }
++    }
+     /*
+      * Remove the old entry from the Virtual List View indexes.
+      * Add the new entry to the Virtual List View indexes.
+@@ -1991,7 +2020,7 @@ moddn_rename_child_entry(
+          * Update all the indexes.
+          */
+         retval = modrdn_rename_entry_update_indexes(ptxn, pb, li, e, ec,
+-                                                    smodsp, NULL, NULL);
++                                                    smodsp, NULL, NULL, NULL);
+         /* JCMREPL - Should the children get updated modifiersname and lastmodifiedtime? */
+         slapi_mods_done(&smods);
+     }
+diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+index 4d2524fd9..e2f1100ed 100644
+--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
++++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+@@ -324,6 +324,7 @@ int get_parent_rdn(DB *db, ID parentid, Slapi_RDN *srdn);
+ /*
+  * modify.c
+  */
++int32_t entry_get_rdn_mods(Slapi_PBlock *pb, Slapi_Entry *entry, CSN *csn, int repl_op, Slapi_Mods **smods_ret);
+ int modify_update_all(backend *be, Slapi_PBlock *pb, modify_context *mc, back_txn *txn);
+ void modify_init(modify_context *mc, struct backentry *old_entry);
+ int modify_apply_mods(modify_context *mc, Slapi_Mods *smods);
+-- 
+2.26.2
+
diff --git a/SOURCES/0021-Issue-49256-log-warning-when-thread-number-is-very-d.patch b/SOURCES/0021-Issue-49256-log-warning-when-thread-number-is-very-d.patch
new file mode 100644
index 0000000..2e20c8c
--- /dev/null
+++ b/SOURCES/0021-Issue-49256-log-warning-when-thread-number-is-very-d.patch
@@ -0,0 +1,128 @@
+From 2be9d1b4332d3b9b55a2d285e9610813100e235f Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Mon, 22 Jun 2020 17:49:10 -0400
+Subject: [PATCH] Issue 49256 - log warning when thread number is very
+ different from autotuned value
+
+Description:  To help prevent customers from setting incorrect values for
+              the thread number it would be useful to warn them that the
+              configured value is either way too low or way too high.
+
+relates: https://pagure.io/389-ds-base/issue/49256
+
+Reviewed by: firstyear(Thanks!)
+---
+ .../tests/suites/config/autotuning_test.py    | 28 +++++++++++++++
+ ldap/servers/slapd/libglobs.c                 | 34 ++++++++++++++++++-
+ ldap/servers/slapd/slap.h                     |  3 ++
+ 3 files changed, 64 insertions(+), 1 deletion(-)
+
+diff --git a/dirsrvtests/tests/suites/config/autotuning_test.py b/dirsrvtests/tests/suites/config/autotuning_test.py
+index d1c751444..540761250 100644
+--- a/dirsrvtests/tests/suites/config/autotuning_test.py
++++ b/dirsrvtests/tests/suites/config/autotuning_test.py
+@@ -43,6 +43,34 @@ def test_threads_basic(topo):
+     assert topo.standalone.config.get_attr_val_int("nsslapd-threadnumber") > 0
+ 
+ 
++def test_threads_warning(topo):
++    """Check that we log a warning if the thread number is too high or low
++
++    :id: db92412b-2812-49de-84b0-00f452cd254f
++    :setup: Standalone Instance
++    :steps:
++        1. Get autotuned thread number
++        2. Set threads way higher than hw threads, and find a warning in the log
++        3. Set threads way lower than hw threads, and find a warning in the log
++    :expectedresults:
++        1. Success
++        2. Success
++        3. Success
++    """
++    topo.standalone.config.set("nsslapd-threadnumber", "-1")
++    autotuned_value = topo.standalone.config.get_attr_val_utf8("nsslapd-threadnumber")
++
++    topo.standalone.config.set("nsslapd-threadnumber", str(int(autotuned_value) * 4))
++    time.sleep(.5)
++    assert topo.standalone.ds_error_log.match('.*higher.*hurt server performance.*')
++
++    if int(autotuned_value) > 1:
++        # If autotuned is 1, there isn't anything to test here
++        topo.standalone.config.set("nsslapd-threadnumber", "1")
++        time.sleep(.5)
++        assert topo.standalone.ds_error_log.match('.*lower.*hurt server performance.*')
++
++
+ @pytest.mark.parametrize("invalid_value", ('-2', '0', 'invalid'))
+ def test_threads_invalid_value(topo, invalid_value):
+     """Check nsslapd-threadnumber for an invalid values
+diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
+index fbf90d92d..88676a303 100644
+--- a/ldap/servers/slapd/libglobs.c
++++ b/ldap/servers/slapd/libglobs.c
+@@ -4374,6 +4374,7 @@ config_set_threadnumber(const char *attrname, char *value, char *errorbuf, int a
+ {
+     int retVal = LDAP_SUCCESS;
+     int32_t threadnum = 0;
++    int32_t hw_threadnum = 0;
+     char *endp = NULL;
+ 
+     slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+@@ -4386,8 +4387,39 @@ config_set_threadnumber(const char *attrname, char *value, char *errorbuf, int a
+     threadnum = strtol(value, &endp, 10);
+ 
+     /* Means we want to re-run the hardware detection. */
++    hw_threadnum = util_get_hardware_threads();
+     if (threadnum == -1) {
+-        threadnum = util_get_hardware_threads();
++        threadnum = hw_threadnum;
++    } else {
++        /*
++         * Log a message if the user defined thread number is very different
++         * from the hardware threads as this is probably not the optimal
++         * value.
++         */
++        if (threadnum >= hw_threadnum) {
++            if (threadnum > MIN_THREADS && threadnum / hw_threadnum >= 4) {
++                /* We're over the default minimum and way higher than the hw
++                 * threads. */
++                slapi_log_err(SLAPI_LOG_NOTICE, "config_set_threadnumber",
++                        "The configured thread number (%d) is significantly "
++                        "higher than the number of hardware threads (%d).  "
++                        "This can potentially hurt server performance.  If "
++                        "you are unsure how to tune \"nsslapd-threadnumber\" "
++                        "then set it to \"-1\" and the server will tune it "
++                        "according to the system hardware\n",
++                        threadnum, hw_threadnum);
++            }
++        } else if (threadnum < MIN_THREADS) {
++            /* The thread number should never be less than the minimum and
++             * hardware threads. */
++            slapi_log_err(SLAPI_LOG_WARNING, "config_set_threadnumber",
++                    "The configured thread number (%d) is lower than the number "
++                    "of hardware threads (%d).  This will hurt server performance.  "
++                    "If you are unsure how to tune \"nsslapd-threadnumber\" then "
++                    "set it to \"-1\" and the server will tune it according to the "
++                    "system hardware\n",
++                    threadnum, hw_threadnum);
++            }
+     }
+ 
+     if (*endp != '\0' || errno == ERANGE || threadnum < 1 || threadnum > 65535) {
+diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
+index 8e76393c3..894efd29c 100644
+--- a/ldap/servers/slapd/slap.h
++++ b/ldap/servers/slapd/slap.h
+@@ -403,6 +403,9 @@ typedef void (*VFPV)(); /* takes undefined arguments */
+ #define SLAPD_DEFAULT_PW_MAX_CLASS_CHARS_ATTRIBUTE 0
+ #define SLAPD_DEFAULT_PW_MAX_CLASS_CHARS_ATTRIBUTE_STR "0"
+ 
++#define MIN_THREADS 16
++#define MAX_THREADS 512
++
+ 
+ /* Default password values. */
+ 
+-- 
+2.26.2
+
diff --git a/SOURCES/0022-Issue-51188-db2ldif-crashes-when-LDIF-file-can-t-be-.patch b/SOURCES/0022-Issue-51188-db2ldif-crashes-when-LDIF-file-can-t-be-.patch
new file mode 100644
index 0000000..94c3f34
--- /dev/null
+++ b/SOURCES/0022-Issue-51188-db2ldif-crashes-when-LDIF-file-can-t-be-.patch
@@ -0,0 +1,34 @@
+From d24381488a997dda0006b603fb2b452b726757c0 Mon Sep 17 00:00:00 2001
+From: Simon Pichugin <simon.pichugin@gmail.com>
+Date: Thu, 25 Jun 2020 10:45:16 +0200
+Subject: [PATCH] Issue 51188 - db2ldif crashes when LDIF file can't be
+ accessed
+
+Bug Description: db2ldif crashes when we set '-a LDIF_PATH' to a place that
+can't be accessed by the user (dirsrv by default)
+
+Fix Description: Don't attempt to close DB if we bail after a failed
+attempt to open LDIF file.
+
+https://pagure.io/389-ds-base/issue/51188
+
+Reviewed by: mreynolds (Thanks!)
+---
+ ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
+index 542147c3d..9ffd877cb 100644
+--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
+@@ -871,6 +871,7 @@ bdb_db2ldif(Slapi_PBlock *pb)
+             slapi_log_err(SLAPI_LOG_ERR, "bdb_db2ldif",
+                     "db2ldif: %s: can't open %s: %d (%s) while running as user \"%s\"\n",
+                     inst->inst_name, fname, errno, dblayer_strerror(errno), slapdFrontendConfig->localuserinfo->pw_name);
++            we_start_the_backends = 0;
+             return_value = -1;
+             goto bye;
+         }
+-- 
+2.26.2
+
diff --git a/SPECS/389-ds-base.spec b/SPECS/389-ds-base.spec
index ea0e209..7901e19 100644
--- a/SPECS/389-ds-base.spec
+++ b/SPECS/389-ds-base.spec
@@ -45,7 +45,7 @@ ExcludeArch: i686
 Summary:          389 Directory Server (base)
 Name:             389-ds-base
 Version:          1.4.3.8
-Release:          %{?relprefix}2%{?prerel}%{?dist}
+Release:          %{?relprefix}4%{?prerel}%{?dist}
 License:          GPLv3+
 URL:              https://www.port389.org
 Group:            System Environment/Daemons
@@ -174,6 +174,28 @@ Source2:          %{name}-devel.README
 %if %{bundle_jemalloc}
 Source3:          https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2
 %endif
+Patch01:          0001-Issue-51076-prevent-unnecessarily-duplication-of-the.patch
+Patch02:          0002-Ticket-51082-abort-when-a-empty-valueset-is-freed.patch
+Patch03:          0003-Issue-51091-healthcheck-json-report-fails-when-mappi.patch
+Patch04:          0004-Issue-51076-remove-unnecessary-slapi-entry-dups.patch
+Patch05:          0005-Issue-51086-Improve-dscreate-instance-name-validatio.patch
+Patch06:          0006-Issue-51102-RFE-ds-replcheck-make-online-timeout-con.patch
+Patch07:          0007-Issue-51110-Fix-ASAN-ODR-warnings.patch
+Patch08:          0008-Issue-51095-abort-operation-if-CSN-can-not-be-genera.patch
+Patch09:          0009-Issue-51113-Allow-using-uid-for-replication-manager-.patch
+Patch10:          0010-Issue-50931-RFE-AD-filter-rewriter-for-ObjectCategor.patch
+Patch11:          0011-Issue-50746-Add-option-to-healthcheck-to-list-all-th.patch
+Patch12:          0012-Issue-50984-Memory-leaks-in-disk-monitoring.patch
+Patch13:          0013-Issue-50984-Memory-leaks-in-disk-monitoring.patch
+Patch14:          0014-Issue-50201-nsIndexIDListScanLimit-accepts-any-value.patch
+Patch15:          0015-Issue-51157-Reindex-task-may-create-abandoned-index-.patch
+Patch16:          0016-Issue-51165-add-new-access-log-keywords-for-wtime-an.patch
+Patch17:          0017-Issue-50912-pwdReset-can-be-modified-by-a-user.patch
+Patch18:          0018-Issue-50791-Healthcheck-should-look-for-notes-A-F-in.patch
+Patch19:          0019-Issue-51144-dsctl-fails-with-instance-names-that-con.patch
+Patch20:          0020-Ticket-49859-A-distinguished-value-can-be-missing-in.patch
+Patch21:          0021-Issue-49256-log-warning-when-thread-number-is-very-d.patch
+Patch22:          0022-Issue-51188-db2ldif-crashes-when-LDIF-file-can-t-be-.patch
 
 %description
 389 Directory Server is an LDAPv3 compliant server.  The base package includes
@@ -413,7 +435,7 @@ popd
 
 mkdir -p $RPM_BUILD_ROOT/var/log/%{pkgname}
 mkdir -p $RPM_BUILD_ROOT/var/lib/%{pkgname}
-mkdir -p $RPM_BUILD_ROOT/var/lock/%{pkgname}
+mkdir -p $RPM_BUILD_ROOT/var/3lock/%{pkgname}
 
 # for systemd
 mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/systemd/system/%{groupname}.wants
@@ -791,6 +813,33 @@ exit 0
 %doc README.md
 
 %changelog
+* Fri Jun 26 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-4
+- Bump version to 1.4.3.8-4
+- Resolves: Bug 1806978 - ns-slapd crashes during db2ldif
+- Resolves: Bug 1450863 - Log warning when tuning of nsslapd-threadnumber above or below the optimal value
+- Resolves: Bug 1647017 - A distinguished value of a single valued attribute can be missing in an entry
+- Resolves: Bug 1806573 - Dsctl healthcheck doesn't work when using instance name with 'slapd-'
+- Resolves: Bug 1807773 - dsctl healthcheck : typo in DSREPLLE0002 Lint error suggested resolution commands
+- Resolves: Bug 1843567 - Healthcheck to find notes=F
+- Resolves: Bug 1845094 - User/Directory Manager can modify Password Policy attribute "pwdReset"
+- Resolves: Bug 1850275 - Add new access log keywords for time spent in work queue and actual operation time
+- Resolves: Bug 1442386 - Recreating an index while changing case will create an indexfile with the old name (different case) and after restart the indexfile is abandoned
+- Resolves: Bug 1672574 - nsIndexIDListScanLimit accepts any value
+- Resolves: Bug 1800529 - Memory leaks in disk monitoring
+
+* Fri Jun 5 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-3
+- Bump version to 1.4.3.8-3
+- Resolves: Bug 1835619 - Healthcheck with --json option reports "Object of type 'bytes' is not JSON serializable" when mapping tree is deleted 
+- Resolves: Bug 1836428 - Directory Server ds-replcheck RFE to add a timeout command-line arg/value to wait longer when connecting to a replica server
+- Resolves: Bug 1843090 - abort when a empty valueset is freed
+- Resolves: Bug 1843156 - Prevent unnecessarily duplication of the target entry
+- Resolves: Bug 1843157 - Check for clock errors and time skew
+- Resolves: Bug 1843159 - RFE AD filter rewriter for ObjectCategory
+- Resolves: Bug 1843162 - Creating Replication Manager fails if uid=repman is used
+- Resolves: Bug 1816851 - Add option to healthcheck to list all the lint reports
+- Resolves: Bug 1748227 - Instance name length is not enforced
+- Resolves: Bug 1748244 - dscreate doesn't sanitize instance name
+
 * Mon May 11 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-2
 - Bump version to 1.4.3.8-2
 - Resolves: Bug 1833350 - Remove cockpit dependancies that are breaking builds