diff --git a/.389-ds-base.metadata b/.389-ds-base.metadata
index 9ce4a90..d5c6a78 100644
--- a/.389-ds-base.metadata
+++ b/.389-ds-base.metadata
@@ -1,3 +1,3 @@
-c69c175a2f27053dffbfefac9c84ff16c7ff4cbf SOURCES/389-ds-base-1.4.3.23.tar.bz2
+9274c7088190993255749ea90bbb770c5c5e0f5c SOURCES/389-ds-base-1.4.3.28.tar.bz2
 9e06b5cc57fd185379d007696da153893cf73e30 SOURCES/jemalloc-5.2.1.tar.bz2
-22b1ef11852864027e184bb4bee56286b855b703 SOURCES/vendor-1.4.3.23-2.tar.gz
+c6875530163f0e217ed2e0e5b768506db3d07447 SOURCES/vendor-1.4.3.28-1.tar.gz
diff --git a/.gitignore b/.gitignore
index 3e96486..5367d88 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,3 @@
-SOURCES/389-ds-base-1.4.3.23.tar.bz2
+SOURCES/389-ds-base-1.4.3.28.tar.bz2
 SOURCES/jemalloc-5.2.1.tar.bz2
-SOURCES/vendor-1.4.3.23-2.tar.gz
+SOURCES/vendor-1.4.3.28-1.tar.gz
diff --git a/SOURCES/0001-Issue-4678-RFE-automatique-disable-of-virtual-attrib.patch b/SOURCES/0001-Issue-4678-RFE-automatique-disable-of-virtual-attrib.patch
new file mode 100644
index 0000000..5990610
--- /dev/null
+++ b/SOURCES/0001-Issue-4678-RFE-automatique-disable-of-virtual-attrib.patch
@@ -0,0 +1,738 @@
+From 67e19da62a9e8958458de54173dcd9bcaf53164d Mon Sep 17 00:00:00 2001
+From: tbordaz <tbordaz@redhat.com>
+Date: Thu, 30 Sep 2021 15:59:40 +0200
+Subject: [PATCH 01/12] Issue 4678 - RFE automatique disable of virtual
+ attribute checking (#4918)
+
+Bug description:
+	Virtual attributes are configured via Roles or COS definitions
+        and registered during initialization of those plugins.
+	Virtual attributes are processed during search evaluation of
+	filter and returned attributes. This processing is expensive
+	and prone to create contention between searches.
+	Use of virtual attribute is not frequent. So many of the
+	deployement process virtual attribute even if there is none.
+
+Fix description:
+	The fix configure the server to ignore virtual attribute by
+        default (nsslapd-ignore-virtual-attrs: on).
+        At startup, if a new virtual attribute is registered or
+        it exists Roles/COS definitions, then the server is
+	configured to process the virtual attributes
+        (nsslapd-ignore-virtual-attrs: off)
+        design: https://www.port389.org/docs/389ds/design/vattr-automatic-toggle.html
+
+relates: https://github.com/389ds/389-ds-base/issues/4678
+
+Reviewed by: William Brown, Simon Pichugin, Mark Reynolds (Thanks !!)
+
+Platforms tested: F34
+---
+ .../tests/suites/config/config_test.py        |  40 +++-
+ dirsrvtests/tests/suites/cos/cos_test.py      |  94 ++++++--
+ dirsrvtests/tests/suites/roles/basic_test.py  | 200 +++++++++++++++++-
+ ldap/servers/plugins/roles/roles_cache.c      |   9 +
+ ldap/servers/slapd/libglobs.c                 |   2 +-
+ ldap/servers/slapd/main.c                     |   2 +
+ ldap/servers/slapd/proto-slap.h               |   1 +
+ ldap/servers/slapd/vattr.c                    | 127 +++++++++++
+ src/lib389/lib389/idm/role.py                 |   4 +
+ 9 files changed, 455 insertions(+), 24 deletions(-)
+
+diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py
+index 2ecff8f98..19232c87d 100644
+--- a/dirsrvtests/tests/suites/config/config_test.py
++++ b/dirsrvtests/tests/suites/config/config_test.py
+@@ -351,7 +351,7 @@ def test_ignore_virtual_attrs(topo):
+     :setup: Standalone instance
+     :steps:
+          1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
+-         2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF
++         2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
+          3. Set the valid values i.e. on/ON and off/OFF for nsslapd-ignore-virtual-attrs
+          4. Set invalid value for attribute nsslapd-ignore-virtual-attrs
+          5. Set nsslapd-ignore-virtual-attrs=off
+@@ -374,8 +374,8 @@ def test_ignore_virtual_attrs(topo):
+     log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
+     assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
+ 
+-    log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
+-    assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "off"
++    log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
++    assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
+ 
+     log.info("Set the valid values i.e. on/ON and off/OFF for nsslapd-ignore-virtual-attrs")
+     for attribute_value in ['on', 'off', 'ON', 'OFF']:
+@@ -415,6 +415,40 @@ def test_ignore_virtual_attrs(topo):
+     log.info("Test if virtual attribute i.e. postal code not shown while nsslapd-ignore-virtual-attrs: on")
+     assert not test_user.present('postalcode', '117')
+ 
++def test_ignore_virtual_attrs_after_restart(topo):
++    """Test nsslapd-ignore-virtual-attrs configuration attribute
++       The attribute is ON by default. If it set to OFF, it keeps
++       its value on restart
++
++    :id: ac368649-4fda-473c-9ef8-e0c728b162af
++    :setup: Standalone instance
++    :steps:
++         1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
++         2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
++         3. Set nsslapd-ignore-virtual-attrs=off
++         4. restart the instance
++         5. Check the attribute nsslapd-ignore-virtual-attrs is OFF
++    :expectedresults:
++         1. This should be successful
++         2. This should be successful
++         3. This should be successful
++         4. This should be successful
++         5. This should be successful
++    """
++
++    log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
++    assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
++
++    log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
++    assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
++
++    log.info("Set nsslapd-ignore-virtual-attrs = off")
++    topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'off')
++
++    topo.standalone.restart()
++
++    log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
++    assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
+ 
+ @pytest.mark.bz918694
+ @pytest.mark.ds408
+diff --git a/dirsrvtests/tests/suites/cos/cos_test.py b/dirsrvtests/tests/suites/cos/cos_test.py
+index d6a498c73..d1f99f96f 100644
+--- a/dirsrvtests/tests/suites/cos/cos_test.py
++++ b/dirsrvtests/tests/suites/cos/cos_test.py
+@@ -6,6 +6,8 @@
+ # See LICENSE for details.
+ # --- END COPYRIGHT BLOCK ---
+ 
++import logging
++import time
+ import pytest, os, ldap
+ from lib389.cos import  CosClassicDefinition, CosClassicDefinitions, CosTemplate
+ from lib389._constants import DEFAULT_SUFFIX
+@@ -14,26 +16,37 @@ from lib389.idm.role import FilteredRoles
+ from lib389.idm.nscontainer import nsContainer
+ from lib389.idm.user import UserAccount
+ 
++logging.getLogger(__name__).setLevel(logging.INFO)
++log = logging.getLogger(__name__)
++
+ pytestmark = pytest.mark.tier1
++@pytest.fixture(scope="function")
++def reset_ignore_vattr(topo, request):
++    default_ignore_vattr_value = topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs')
++    def fin():
++        topo.standalone.config.set('nsslapd-ignore-virtual-attrs', default_ignore_vattr_value)
+ 
+-def test_positive(topo):
+-    """
+-        :id: a5a74235-597f-4fe8-8c38-826860927472
+-        :setup: server
+-        :steps:
+-            1. Add filter role entry
+-            2. Add ns container
+-            3. Add cos template
+-            4. Add CosClassic Definition
+-            5. Cos entries should be added and searchable
+-            6. employeeType attribute should be there in user entry as per the cos plugin property
+-        :expectedresults:
+-            1. Operation should success
+-            2. Operation should success
+-            3. Operation should success
+-            4. Operation should success
+-            5. Operation should success
+-            6. Operation should success
++    request.addfinalizer(fin)
++
++def test_positive(topo, reset_ignore_vattr):
++    """CoS positive tests
++
++    :id: a5a74235-597f-4fe8-8c38-826860927472
++    :setup: server
++    :steps:
++        1. Add filter role entry
++        2. Add ns container
++        3. Add cos template
++        4. Add CosClassic Definition
++        5. Cos entries should be added and searchable
++        6. employeeType attribute should be there in user entry as per the cos plugin property
++    :expectedresults:
++        1. Operation should success
++        2. Operation should success
++        3. Operation should success
++        4. Operation should success
++        5. Operation should success
++        6. Operation should success
+     """
+     # Adding ns filter role
+     roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX)
+@@ -77,7 +90,52 @@ def test_positive(topo):
+ 
+     #  CoS definition entry's cosSpecifier attribute specifies the employeeType attribute
+     assert user.present('employeeType')
++    cosdef.delete()
++
++def test_vattr_on_cos_definition(topo, reset_ignore_vattr):
++    """Test nsslapd-ignore-virtual-attrs configuration attribute
++       The attribute is ON by default. If a cos definition is
++       added it is moved to OFF
++
++    :id: e7ef5254-386f-4362-bbb4-9409f3f51b08
++    :setup: Standalone instance
++    :steps:
++         1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
++         2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
++         3. Create a cos definition for employeeType
++         4. Check the value of nsslapd-ignore-virtual-attrs should be OFF (with a delay for postop processing)
++         5. Check a message "slapi_vattrspi_regattr - Because employeeType,.." in error logs
++    :expectedresults:
++         1. This should be successful
++         2. This should be successful
++         3. This should be successful
++         4. This should be successful
++         5. This should be successful
++    """
++
++    log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
++    assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
++
++    log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
++    assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
++
++    # creating CosClassicDefinition
++    log.info("Create a cos definition")
++    properties = {'cosTemplateDn': 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(DEFAULT_SUFFIX),
++                  'cosAttribute': 'employeeType',
++                  'cosSpecifier': 'nsrole',
++                  'cn': 'cosClassicGenerateEmployeeTypeUsingnsrole'}
++    cosdef = CosClassicDefinition(topo.standalone,'cn=cosClassicGenerateEmployeeTypeUsingnsrole,{}'.format(DEFAULT_SUFFIX))\
++        .create(properties=properties)
++
++    log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
++    time.sleep(2)
++    assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
+ 
++    topo.standalone.stop()
++    assert topo.standalone.searchErrorsLog("slapi_vattrspi_regattr - Because employeeType is a new registered virtual attribute , nsslapd-ignore-virtual-attrs was set to \'off\'")
++    topo.standalone.start()
++    cosdef.delete()
+ 
+ if __name__ == "__main__":
+     CURRENT_FILE = os.path.realpath(__file__)
+diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py
+index 47a531794..bec3aedfc 100644
+--- a/dirsrvtests/tests/suites/roles/basic_test.py
++++ b/dirsrvtests/tests/suites/roles/basic_test.py
+@@ -11,6 +11,8 @@
+ Importing necessary Modules.
+ """
+ 
++import logging
++import time
+ import os
+ import pytest
+ 
+@@ -22,6 +24,9 @@ from lib389.topologies import topology_st as topo
+ from lib389.idm.role import FilteredRoles, ManagedRoles, NestedRoles
+ from lib389.idm.domain import Domain
+ 
++logging.getLogger(__name__).setLevel(logging.INFO)
++log = logging.getLogger(__name__)
++
+ pytestmark = pytest.mark.tier1
+ 
+ DNBASE = "o=acivattr,{}".format(DEFAULT_SUFFIX)
+@@ -35,7 +40,7 @@ FILTERROLESALESROLE = "cn=FILTERROLESALESROLE,{}".format(DNBASE)
+ FILTERROLEENGROLE = "cn=FILTERROLEENGROLE,{}".format(DNBASE)
+ 
+ 
+-def test_filterrole(topo):
++def test_filterrole(topo, request):
+     """Test Filter Role
+ 
+     :id: 8ada4064-786b-11e8-8634-8c16451d917b
+@@ -136,8 +141,20 @@ def test_filterrole(topo):
+                   SALES_OU, DNBASE]:
+         UserAccount(topo.standalone, dn_dn).delete()
+ 
++    def fin():
++        topo.standalone.restart()
++        try:
++            filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX)
++            for i in filtered_roles.list():
++                i.delete()
++        except:
++            pass
++        topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
++
++    request.addfinalizer(fin)
++
+ 
+-def test_managedrole(topo):
++def test_managedrole(topo, request):
+     """Test Managed Role
+ 
+     :id: d52a9c00-3bf6-11e9-9b7b-8c16451d917b
+@@ -209,6 +226,16 @@ def test_managedrole(topo):
+     for i in roles.list():
+         i.delete()
+ 
++    def fin():
++        topo.standalone.restart()
++        try:
++            role = ManagedRoles(topo.standalone, DEFAULT_SUFFIX).get('ROLE1')
++            role.delete()
++        except:
++            pass
++        topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
++
++    request.addfinalizer(fin)
+ 
+ @pytest.fixture(scope="function")
+ def _final(request, topo):
+@@ -220,6 +247,7 @@ def _final(request, topo):
+     def finofaci():
+         """
+         Removes and Restores ACIs and other users after the test.
++        And restore nsslapd-ignore-virtual-attrs to default
+         """
+         domain = Domain(topo.standalone, DEFAULT_SUFFIX)
+         domain.remove_all('aci')
+@@ -234,6 +262,8 @@ def _final(request, topo):
+         for i in aci_list:
+             domain.add("aci", i)
+ 
++        topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
++
+     request.addfinalizer(finofaci)
+ 
+ 
+@@ -296,6 +326,172 @@ def test_nestedrole(topo, _final):
+     conn = users.get('test_user_3').bind(PW_DM)
+     assert UserAccounts(conn, DEFAULT_SUFFIX).list()
+ 
++def test_vattr_on_filtered_role(topo, request):
++    """Test nsslapd-ignore-virtual-attrs configuration attribute
++       The attribute is ON by default. If a filtered role is
++       added it is moved to OFF
++
++    :id: 88b3ad3c-f39a-4eb7-a8c9-07c685f11908
++    :setup: Standalone instance
++    :steps:
++         1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
++         2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
++         3. Create a filtered role
++         4. Check the value of nsslapd-ignore-virtual-attrs should be OFF
++         5. Check a message "roles_cache_trigger_update_role - Because of virtual attribute.." in error logs
++    :expectedresults:
++         1. This should be successful
++         2. This should be successful
++         3. This should be successful
++         4. This should be successful
++         5. This should be successful
++    """
++
++    log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
++    assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
++
++    log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
++    assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
++
++    log.info("Create a filtered role")
++    try:
++        Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX)
++    except:
++        pass
++    roles = FilteredRoles(topo.standalone, DNBASE)
++    roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'})
++
++    log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
++    assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
++
++    topo.standalone.stop()
++    assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'")
++
++    def fin():
++        topo.standalone.restart()
++        try:
++            filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX)
++            for i in filtered_roles.list():
++                i.delete()
++        except:
++            pass
++        topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
++
++    request.addfinalizer(fin)
++
++def test_vattr_on_filtered_role_restart(topo, request):
++    """Test nsslapd-ignore-virtual-attrs configuration attribute
++    If it exists a filtered role definition at restart then
++    nsslapd-ignore-virtual-attrs should be set to 'off'
++
++    :id: 972183f7-d18f-40e0-94ab-580e7b7d78d0
++    :setup: Standalone instance
++    :steps:
++         1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
++         2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
++         3. Create a filtered role
++         4. Check the value of nsslapd-ignore-virtual-attrs should be OFF
++         5. restart the instance
++         6. Check the presence of virtual attribute is detected
++         7. Check the value of nsslapd-ignore-virtual-attrs should be OFF
++    :expectedresults:
++         1. This should be successful
++         2. This should be successful
++         3. This should be successful
++         4. This should be successful
++         5. This should be successful
++         6. This should be successful
++         7. This should be successful
++    """
++
++    log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
++    assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
++
++    log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
++    assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
++
++    log.info("Create a filtered role")
++    try:
++        Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX)
++    except:
++        pass
++    roles = FilteredRoles(topo.standalone, DNBASE)
++    roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'})
++
++    log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
++    assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
++
++    
++    log.info("Check the virtual attribute definition is found (after a required delay)")
++    topo.standalone.restart()
++    time.sleep(5)
++    assert topo.standalone.searchErrorsLog("Found a role/cos definition in")
++    assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'")
++
++    log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
++    assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
++
++    def fin():
++        topo.standalone.restart()
++        try:
++            filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX)
++            for i in filtered_roles.list():
++                i.delete()
++        except:
++            pass
++        topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
++
++    request.addfinalizer(fin)
++
++
++def test_vattr_on_managed_role(topo, request):
++    """Test nsslapd-ignore-virtual-attrs configuration attribute
++       The attribute is ON by default. If a managed role is
++       added it is moved to OFF
++
++    :id: 664b722d-c1ea-41e4-8f6c-f9c87a212346
++    :setup: Standalone instance
++    :steps:
++         1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
++         2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
++         3. Create a managed role
++         4. Check the value of nsslapd-ignore-virtual-attrs should be OFF
++         5. Check a message "roles_cache_trigger_update_role - Because of virtual attribute.." in error logs
++    :expectedresults:
++         1. This should be successful
++         2. This should be successful
++         3. This should be successful
++         4. This should be successful
++         5. This should be successful
++    """
++
++    log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
++    assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
++
++    log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
++    assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
++
++    log.info("Create a managed role")
++    roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX)
++    role = roles.create(properties={"cn": 'ROLE1'})
++
++    log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
++    assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
++
++    topo.standalone.stop()
++    assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'")
++
++    def fin():
++        topo.standalone.restart()
++        try:
++            filtered_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX)
++            for i in filtered_roles.list():
++                i.delete()
++        except:
++            pass
++        topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
++
++    request.addfinalizer(fin)
+ 
+ if __name__ == "__main__":
+     CURRENT_FILE = os.path.realpath(__file__)
+diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c
+index 3d076a4cb..cd00e0aba 100644
+--- a/ldap/servers/plugins/roles/roles_cache.c
++++ b/ldap/servers/plugins/roles/roles_cache.c
+@@ -530,6 +530,15 @@ roles_cache_trigger_update_role(char *dn, Slapi_Entry *roles_entry, Slapi_DN *be
+     }
+ 
+     slapi_rwlock_unlock(global_lock);
++    {
++        /* A role definition has been updated, enable vattr handling */
++        char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
++        errorbuf[0] = '\0';
++        config_set_ignore_vattrs(CONFIG_IGNORE_VATTRS, "off", errorbuf, 1);
++        slapi_log_err(SLAPI_LOG_INFO,
++                      "roles_cache_trigger_update_role",
++                      "Because of virtual attribute definition (role), %s was set to 'off'\n", CONFIG_IGNORE_VATTRS);
++    }
+ 
+     slapi_log_err(SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM, "<-- roles_cache_trigger_update_role: %p \n", roles_list);
+ }
+diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
+index 2ea4cd760..f6dacce30 100644
+--- a/ldap/servers/slapd/libglobs.c
++++ b/ldap/servers/slapd/libglobs.c
+@@ -1803,7 +1803,7 @@ FrontendConfig_init(void)
+     init_ndn_cache_enabled = cfg->ndn_cache_enabled = LDAP_ON;
+     cfg->ndn_cache_max_size = SLAPD_DEFAULT_NDN_SIZE;
+     init_sasl_mapping_fallback = cfg->sasl_mapping_fallback = LDAP_OFF;
+-    init_ignore_vattrs = cfg->ignore_vattrs = LDAP_OFF;
++    init_ignore_vattrs = cfg->ignore_vattrs = LDAP_ON;
+     cfg->sasl_max_bufsize = SLAPD_DEFAULT_SASL_MAXBUFSIZE;
+     cfg->unhashed_pw_switch = SLAPD_DEFAULT_UNHASHED_PW_SWITCH;
+     init_return_orig_type = cfg->return_orig_type = LDAP_OFF;
+diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
+index 4931a4ca4..61ed40b7d 100644
+--- a/ldap/servers/slapd/main.c
++++ b/ldap/servers/slapd/main.c
+@@ -1042,6 +1042,8 @@ main(int argc, char **argv)
+         eq_start(); /* must be done after plugins started - DEPRECATED */
+         eq_start_rel(); /* must be done after plugins started */
+ 
++        vattr_check(); /* Check if it exists virtual attribute definitions */
++
+ #ifdef HPUX10
+         /* HPUX linker voodoo */
+         if (collation_init == NULL) {
+diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
+index c143f3772..442a621aa 100644
+--- a/ldap/servers/slapd/proto-slap.h
++++ b/ldap/servers/slapd/proto-slap.h
+@@ -1462,6 +1462,7 @@ void subentry_create_filter(Slapi_Filter **filter);
+  */
+ void vattr_init(void);
+ void vattr_cleanup(void);
++void vattr_check(void);
+ 
+ /*
+  * slapd_plhash.c - supplement to NSPR plhash
+diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c
+index 09dab6ecf..24750a57c 100644
+--- a/ldap/servers/slapd/vattr.c
++++ b/ldap/servers/slapd/vattr.c
+@@ -64,6 +64,10 @@
+ #define SOURCEFILE "vattr.c"
+ static char *sourcefile = SOURCEFILE;
+ 
++/* stolen from roles_cache.h, must remain in sync */
++#define NSROLEATTR "nsRole"
++static Slapi_Eq_Context vattr_check_ctx = {0};
++
+ /* Define only for module test code */
+ /* #define VATTR_TEST_CODE */
+ 
+@@ -130,6 +134,112 @@ vattr_cleanup()
+ {
+     /* We need to free and remove anything that was inserted first */
+     vattr_map_destroy();
++    slapi_eq_cancel_rel(vattr_check_ctx);
++}
++
++static void
++vattr_check_thread(void *arg)
++{
++    Slapi_Backend *be = NULL;
++    char *cookie = NULL;
++    Slapi_DN *base_sdn = NULL;
++    Slapi_PBlock *search_pb = NULL;
++    Slapi_Entry **entries = NULL;
++    int32_t rc;
++    int32_t check_suffix; /* used to skip suffixes in ignored_backend */
++    PRBool exist_vattr_definition = PR_FALSE;
++    char *ignored_backend[5] = {"cn=config", "cn=schema", "cn=monitor", "cn=changelog", NULL}; /* suffixes to ignore */
++    char *suffix;
++    int ignore_vattrs;
++
++    ignore_vattrs = config_get_ignore_vattrs();
++
++    if (!ignore_vattrs) {
++        /* Nothing to do more, we are already evaluating virtual attribute */
++        return;
++    }
++
++    search_pb = slapi_pblock_new();
++    be = slapi_get_first_backend(&cookie);
++    while (be && !exist_vattr_definition && !slapi_is_shutting_down()) {
++        base_sdn = (Slapi_DN *) slapi_be_getsuffix(be, 0);
++        suffix = (char *) slapi_sdn_get_dn(base_sdn);
++
++        if (suffix) {
++            /* First check that we need to check that suffix */
++            check_suffix = 1;
++            for (size_t i = 0; ignored_backend[i]; i++) {
++                if (strcasecmp(suffix, ignored_backend[i]) == 0) {
++                    check_suffix = 0;
++                    break;
++                }
++            }
++
++            /* search for a role or cos definition */
++            if (check_suffix) {
++                slapi_search_internal_set_pb(search_pb, slapi_sdn_get_dn(base_sdn),
++                        LDAP_SCOPE_SUBTREE, "(&(objectclass=ldapsubentry)(|(objectclass=nsRoleDefinition)(objectclass=cosSuperDefinition)))",
++                        NULL, 0, NULL, NULL, (void *) plugin_get_default_component_id(), 0);
++                slapi_search_internal_pb(search_pb);
++                slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
++
++                if (rc == LDAP_SUCCESS) {
++                    slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
++                    if (entries && entries[0]) {
++                        /* it exists at least a cos or role definition */
++                        exist_vattr_definition = PR_TRUE;
++                        slapi_log_err(SLAPI_LOG_INFO,
++                                "vattr_check_thread",
++                                "Found a role/cos definition in %s\n", slapi_entry_get_dn(entries[0]));
++                    } else {
++                        slapi_log_err(SLAPI_LOG_INFO,
++                                "vattr_check_thread",
++                                "No role/cos definition in %s\n", slapi_sdn_get_dn(base_sdn));
++                    }
++                }
++                slapi_free_search_results_internal(search_pb);
++            } /* check_suffix */
++        } /* suffix */
++        be = (backend *) slapi_get_next_backend(cookie);
++    }
++    slapi_pblock_destroy(search_pb);
++    slapi_ch_free_string(&cookie);
++
++    /* Now if a virtual attribute is defined, then CONFIG_IGNORE_VATTRS -> off */
++    if (exist_vattr_definition) {
++        char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
++        errorbuf[0] = '\0';
++        config_set_ignore_vattrs(CONFIG_IGNORE_VATTRS, "off", errorbuf, 1);
++        slapi_log_err(SLAPI_LOG_INFO,
++                      "vattr_check_thread",
++                      "Because of virtual attribute definition, %s was set to 'off'\n", CONFIG_IGNORE_VATTRS);
++    }
++}
++static void
++vattr_check_schedule_once(time_t when __attribute__((unused)), void *arg)
++{
++    if (PR_CreateThread(PR_USER_THREAD,
++                        vattr_check_thread,
++                        (void *) arg,
++                        PR_PRIORITY_NORMAL,
++                        PR_GLOBAL_THREAD,
++                        PR_UNJOINABLE_THREAD,
++                        SLAPD_DEFAULT_THREAD_STACKSIZE) == NULL) {
++        slapi_log_err(SLAPI_LOG_ERR,
++                      "vattr_check_schedule_once",
++                      "Fails to check if %s needs to be toggled to FALSE\n", CONFIG_IGNORE_VATTRS);
++    }
++}
++#define VATTR_CHECK_DELAY 3
++void
++vattr_check()
++{
++    /* Schedule running a callback that will create a thread
++     * but make sure it is called a first thing when event loop is created */
++    time_t now;
++
++    now = slapi_current_rel_time_t();
++    vattr_check_ctx = slapi_eq_once_rel(vattr_check_schedule_once, NULL, now + VATTR_CHECK_DELAY);
+ }
+ 
+ /* The public interface functions start here */
+@@ -1631,6 +1741,9 @@ slapi_vattrspi_regattr(vattr_sp_handle *h, char *type_name_to_register, char *DN
+     char *type_to_add;
+     int free_type_to_add = 0;
+     Slapi_DN original_dn;
++    int ignore_vattrs;
++
++    ignore_vattrs = config_get_ignore_vattrs();
+ 
+     slapi_sdn_init(&original_dn);
+ 
+@@ -1676,6 +1789,20 @@ slapi_vattrspi_regattr(vattr_sp_handle *h, char *type_name_to_register, char *DN
+     if (free_type_to_add) {
+         slapi_ch_free((void **)&type_to_add);
+     }
++    if (ignore_vattrs && strcasecmp(type_name_to_register, NSROLEATTR)) {
++        /* A new virtual attribute is registered.
++         * This new vattr being *different* than the default roles vattr 'nsRole'
++         * It is time to allow vattr lookup
++         */
++        char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
++        errorbuf[0] = '\0';
++        config_set_ignore_vattrs(CONFIG_IGNORE_VATTRS, "off", errorbuf, 1);
++        slapi_log_err(SLAPI_LOG_INFO,
++                      "slapi_vattrspi_regattr",
++                      "Because %s is a new registered virtual attribute , %s was set to 'off'\n",
++                      type_name_to_register,
++                      CONFIG_IGNORE_VATTRS);
++    }
+ 
+     return ret;
+ }
+diff --git a/src/lib389/lib389/idm/role.py b/src/lib389/lib389/idm/role.py
+index fe91aab6f..9a2bff3d6 100644
+--- a/src/lib389/lib389/idm/role.py
++++ b/src/lib389/lib389/idm/role.py
+@@ -252,6 +252,8 @@ class FilteredRole(Role):
+         self._rdn_attribute = 'cn'
+         self._create_objectclasses = ['nsComplexRoleDefinition', 'nsFilteredRoleDefinition']
+ 
++        self._protected = False
++
+ 
+ 
+ class FilteredRoles(Roles):
+@@ -285,6 +287,7 @@ class ManagedRole(Role):
+         self._rdn_attribute = 'cn'
+         self._create_objectclasses = ['nsSimpleRoleDefinition', 'nsManagedRoleDefinition']
+ 
++        self._protected = False
+ 
+ class ManagedRoles(Roles):
+     """DSLdapObjects that represents all Managed Roles entries
+@@ -320,6 +323,7 @@ class NestedRole(Role):
+         self._rdn_attribute = 'cn'
+         self._create_objectclasses = ['nsComplexRoleDefinition', 'nsNestedRoleDefinition']
+ 
++        self._protected = False
+ 
+ class NestedRoles(Roles):
+     """DSLdapObjects that represents all NestedRoles entries in suffix.
+-- 
+2.31.1
+
diff --git a/SOURCES/0001-Issue-4747-Remove-unstable-unstatus-tests-from-PRCI-.patch b/SOURCES/0001-Issue-4747-Remove-unstable-unstatus-tests-from-PRCI-.patch
deleted file mode 100644
index 1400b43..0000000
--- a/SOURCES/0001-Issue-4747-Remove-unstable-unstatus-tests-from-PRCI-.patch
+++ /dev/null
@@ -1,1370 +0,0 @@
-From 5d730f7e9f1e857bc886556db0229607b8d536d2 Mon Sep 17 00:00:00 2001
-From: tbordaz <tbordaz@redhat.com>
-Date: Thu, 6 May 2021 18:54:20 +0200
-Subject: [PATCH 01/12] Issue 4747 - Remove unstable/unstatus tests from PRCI
- (#4748)
-
-Bug description:
-	Some tests (17) in the tests suite (dirsrvtest/tests/suites)
-	are failing although there is no regression.
-	It needs (long) investigations to status if failures
-	are due to a bug in the tests or in DS core.
-	Until those investigations are completes, test suites
-	loose a large part of its value to detect regression.
-	Indeed those failing tests may hide a real regression.
-
-Fix description:
-	Flag failing tests with pytest.mark.flaky(max_runs=2, min_passes=1)
-	Additional action will be to create upstream 17 ticket to
-	status on each failing tests
-
-relates: https://github.com/389ds/389-ds-base/issues/4747
-
-Reviewed by: Simon Pichugin, Viktor Ashirov (many thanks for your
-reviews and help)
-
-Platforms tested: F33
----
- .github/workflows/pytest.yml                  |  84 +++++
- dirsrvtests/tests/suites/acl/keywords_test.py |  16 +-
- .../tests/suites/clu/dsctl_acceptance_test.py |  56 ---
- .../tests/suites/clu/repl_monitor_test.py     |   2 +
- .../dynamic_plugins/dynamic_plugins_test.py   |   8 +-
- .../suites/fourwaymmr/fourwaymmr_test.py      |   3 +-
- .../suites/healthcheck/health_config_test.py  |   1 +
- .../suites/healthcheck/health_sync_test.py    |   2 +
- .../tests/suites/import/import_test.py        |  23 +-
- .../tests/suites/indexes/regression_test.py   |  63 ++++
- .../paged_results/paged_results_test.py       |   3 +-
- .../tests/suites/password/regression_test.py  |   2 +
- .../tests/suites/plugins/accpol_test.py       |  20 +-
- .../suites/plugins/managed_entry_test.py      | 351 ++++++++++++++++++
- .../tests/suites/plugins/memberof_test.py     |   3 +-
- .../suites/replication/cleanallruv_test.py    |   8 +-
- .../suites/replication/encryption_cl5_test.py |   8 +-
- .../tests/suites/retrocl/basic_test.py        | 292 ---------------
- 18 files changed, 576 insertions(+), 369 deletions(-)
- create mode 100644 .github/workflows/pytest.yml
- delete mode 100644 dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py
- create mode 100644 dirsrvtests/tests/suites/plugins/managed_entry_test.py
- delete mode 100644 dirsrvtests/tests/suites/retrocl/basic_test.py
-
-diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml
-new file mode 100644
-index 000000000..015794d96
---- /dev/null
-+++ b/.github/workflows/pytest.yml
-@@ -0,0 +1,84 @@
-+name: Test
-+
-+on: [push, pull_request]
-+
-+jobs:
-+  build:
-+    name: Build
-+    runs-on: ubuntu-20.04
-+    container:
-+      image: quay.io/389ds/ci-images:test
-+    outputs:
-+        matrix: ${{ steps.set-matrix.outputs.matrix }}
-+    steps:
-+      - name: Checkout
-+        uses: actions/checkout@v2
-+
-+      - name: Get a list of all test suites
-+        id: set-matrix
-+        run: echo "::set-output name=matrix::$(python3 .github/scripts/generate_matrix.py)"
-+
-+      - name: Build RPMs
-+        run: cd $GITHUB_WORKSPACE && SKIP_AUDIT_CI=1 make -f rpm.mk dist-bz2 rpms
-+
-+      - name: Tar build artifacts
-+        run: tar -cvf dist.tar dist/
-+
-+      - name: Upload RPMs
-+        uses: actions/upload-artifact@v2
-+        with:
-+          name: rpms
-+          path: dist.tar
-+
-+  test:
-+    name: Test
-+    runs-on: ubuntu-20.04
-+    needs: build
-+    strategy:
-+      fail-fast: false
-+      matrix: ${{ fromJson(needs.build.outputs.matrix) }}
-+
-+    steps:
-+    - name: Checkout
-+      uses: actions/checkout@v2
-+
-+    - name: Install dependencies
-+      run: |
-+        sudo apt update -y
-+        sudo apt install -y docker.io containerd runc
-+
-+        sudo cp .github/daemon.json /etc/docker/daemon.json
-+
-+        sudo systemctl unmask docker
-+        sudo systemctl start docker
-+
-+    - name: Download RPMs
-+      uses: actions/download-artifact@master
-+      with:
-+        name: rpms
-+    
-+    - name: Extract RPMs
-+      run: tar xvf dist.tar
-+
-+    - name: Run pytest in a container
-+      run: |
-+        set -x
-+        CID=$(sudo docker run -d -h server.example.com --privileged --rm -v /sys/fs/cgroup:/sys/fs/cgroup:rw,rslave -v ${PWD}:/workspace quay.io/389ds/ci-images:test)
-+        sudo docker exec $CID sh -c "dnf install -y -v dist/rpms/*rpm"
-+        sudo docker exec $CID py.test  --suppress-no-test-exit-code  -m "not flaky" --junit-xml=pytest.xml -v dirsrvtests/tests/suites/${{ matrix.suite }}
-+
-+    - name: Make the results file readable by all
-+      if: always()
-+      run:
-+        sudo chmod -f a+r pytest.xml
-+
-+    - name: Sanitize filename
-+      run: echo "PYTEST_SUITE=$(echo ${{ matrix.suite }} | sed -e 's#\/#-#g')" >> $GITHUB_ENV
-+      
-+    - name: Upload pytest test results
-+      if: always()
-+      uses: actions/upload-artifact@v2
-+      with:
-+        name: pytest-${{ env.PYTEST_SUITE }}
-+        path: pytest.xml
-+ 
-diff --git a/dirsrvtests/tests/suites/acl/keywords_test.py b/dirsrvtests/tests/suites/acl/keywords_test.py
-index 0174152e3..c5e989f3b 100644
---- a/dirsrvtests/tests/suites/acl/keywords_test.py
-+++ b/dirsrvtests/tests/suites/acl/keywords_test.py
-@@ -216,7 +216,8 @@ def test_user_binds_without_any_password_and_cannot_access_the_data(topo, add_us
-     with pytest.raises(ldap.INSUFFICIENT_ACCESS):
-         org.replace("seeAlso", "cn=1")
- 
--
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_user_can_access_the_data_when_connecting_from_any_machine(
-         topo, add_user, aci_of_user
- ):
-@@ -245,6 +246,8 @@ def test_user_can_access_the_data_when_connecting_from_any_machine(
-     OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1")
- 
- 
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_user_can_access_the_data_when_connecting_from_internal_ds_network_only(
-         topo, add_user, aci_of_user
- ):
-@@ -276,7 +279,8 @@ def test_user_can_access_the_data_when_connecting_from_internal_ds_network_only(
-     # Perform Operation
-     OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1")
- 
--
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_user_can_access_the_data_when_connecting_from_some_network_only(
-         topo, add_user, aci_of_user
- ):
-@@ -306,7 +310,8 @@ def test_user_can_access_the_data_when_connecting_from_some_network_only(
-     # Perform Operation
-     OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1")
- 
--
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_from_an_unauthorized_network(topo, add_user, aci_of_user):
-     """User cannot access the data when connecting from an unauthorized network as per the ACI.
- 
-@@ -332,7 +337,8 @@ def test_from_an_unauthorized_network(topo, add_user, aci_of_user):
-     # Perform Operation
-     OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1")
- 
--
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_user_cannot_access_the_data_when_connecting_from_an_unauthorized_network_2(
-         topo, add_user, aci_of_user):
-     """User cannot access the data when connecting from an unauthorized network as per the ACI.
-@@ -418,6 +424,8 @@ def test_dnsalias_keyword_test_nodns_cannot(topo, add_user, aci_of_user):
-     with pytest.raises(ldap.INSUFFICIENT_ACCESS):
-         org.replace("seeAlso", "cn=1")
- 
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- @pytest.mark.ds50378
- @pytest.mark.bz1710848
- @pytest.mark.parametrize("ip_addr", ['127.0.0.1', "[::1]"])
-diff --git a/dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py b/dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py
-deleted file mode 100644
-index a0f89defd..000000000
---- a/dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py
-+++ /dev/null
-@@ -1,56 +0,0 @@
--# --- BEGIN COPYRIGHT BLOCK ---
--# Copyright (C) 2021 Red Hat, Inc.
--# All rights reserved.
--#
--# License: GPL (version 3 or any later version).
--# See LICENSE for details.
--# --- END COPYRIGHT BLOCK ---
--
--import logging
--import pytest
--import os
--from lib389._constants import *
--from lib389.topologies import topology_st as topo
--
--log = logging.getLogger(__name__)
--
--
--def test_custom_path(topo):
--    """Test that a custom path, backup directory, is correctly used by lib389
--    when the server is stopped.
--
--    :id: 8659e209-ee83-477e-8183-1d2f555669ea
--    :setup: Standalone Instance
--    :steps:
--        1. Get the LDIF directory
--        2. Change the server's backup directory to the LDIF directory
--        3. Stop the server, and perform a backup
--        4. Backup was written to LDIF directory
--    :expectedresults:
--        1. Success
--        2. Success
--        3. Success
--        4. Success
--    """
--
--    # Get LDIF dir
--    ldif_dir = topo.standalone.get_ldif_dir()
--
--    # Set backup directory to LDIF directory
--    topo.standalone.config.replace('nsslapd-bakdir', ldif_dir)
--
--    # Stop the server and take a backup
--    topo.standalone.stop()
--    topo.standalone.db2bak(None)
--
--    # Verify backup was written to LDIF directory
--    backups = os.listdir(ldif_dir)
--    assert len(backups)
--
--
--if __name__ == '__main__':
--    # Run isolated
--    # -s for DEBUG mode
--    CURRENT_FILE = os.path.realpath(__file__)
--    pytest.main(["-s", CURRENT_FILE])
--
-diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
-index 9428edb26..3cf6343c8 100644
---- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py
-+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
-@@ -90,6 +90,8 @@ def get_hostnames_from_log(port1, port2):
-         host_m2 = match.group(2)
-     return (host_m1, host_m2)
- 
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- @pytest.mark.ds50545
- @pytest.mark.bz1739718
- @pytest.mark.skipif(ds_is_older("1.4.0"), reason="Not implemented")
-diff --git a/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py b/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py
-index b61daed74..7558cc03d 100644
---- a/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py
-+++ b/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py
-@@ -68,7 +68,8 @@ def check_replicas(topology_m2):
- 
-     log.info('Data is consistent across the replicas.\n')
- 
--
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_acceptance(topology_m2):
-     """Exercise each plugin and its main features, while
-     changing the configuration without restarting the server.
-@@ -140,7 +141,8 @@ def test_acceptance(topology_m2):
-     ############################################################################
-     check_replicas(topology_m2)
- 
--
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_memory_corruption(topology_m2):
-     """Check the plugins for memory corruption issues while
-     dynamic plugins option is enabled
-@@ -242,6 +244,8 @@ def test_memory_corruption(topology_m2):
-     ############################################################################
-     check_replicas(topology_m2)
- 
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- @pytest.mark.tier2
- def test_stress(topology_m2):
-     """Test plugins while under a big load. Perform the test 5 times
-diff --git a/dirsrvtests/tests/suites/fourwaymmr/fourwaymmr_test.py b/dirsrvtests/tests/suites/fourwaymmr/fourwaymmr_test.py
-index 5b0754a2e..c5a746ebb 100644
---- a/dirsrvtests/tests/suites/fourwaymmr/fourwaymmr_test.py
-+++ b/dirsrvtests/tests/suites/fourwaymmr/fourwaymmr_test.py
-@@ -144,7 +144,8 @@ def test_delete_a_few_entries_in_m4(topo_m4, _cleanupentris):
-         topo_m4.ms["supplier4"], topo_m4.ms["supplier3"], 30
-     )
- 
--
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_replicated_multivalued_entries(topo_m4):
-     """
-     Replicated multivalued entries are ordered the same way on all consumers
-diff --git a/dirsrvtests/tests/suites/healthcheck/health_config_test.py b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
-index 3d102e859..f470c05c6 100644
---- a/dirsrvtests/tests/suites/healthcheck/health_config_test.py
-+++ b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
-@@ -337,6 +337,7 @@ def test_healthcheck_low_disk_space(topology_st):
-     os.remove(file)
- 
- 
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- @pytest.mark.ds50791
- @pytest.mark.bz1843567
- @pytest.mark.xfail(ds_is_older("1.4.3.8"), reason="Not implemented")
-diff --git a/dirsrvtests/tests/suites/healthcheck/health_sync_test.py b/dirsrvtests/tests/suites/healthcheck/health_sync_test.py
-index 75bbfd35c..74df1b322 100644
---- a/dirsrvtests/tests/suites/healthcheck/health_sync_test.py
-+++ b/dirsrvtests/tests/suites/healthcheck/health_sync_test.py
-@@ -70,6 +70,8 @@ def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searc
- @pytest.mark.ds50873
- @pytest.mark.bz1685160
- @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented")
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_healthcheck_replication_out_of_sync_not_broken(topology_m3):
-     """Check if HealthCheck returns DSREPLLE0003 code
- 
-diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py
-index defe447d5..119b097f1 100644
---- a/dirsrvtests/tests/suites/import/import_test.py
-+++ b/dirsrvtests/tests/suites/import/import_test.py
-@@ -14,6 +14,7 @@ import os
- import pytest
- import time
- import glob
-+import logging
- from lib389.topologies import topology_st as topo
- from lib389._constants import DEFAULT_SUFFIX, TaskWarning
- from lib389.dbgen import dbgen_users
-@@ -28,6 +29,12 @@ from lib389.idm.account import Accounts
- 
- pytestmark = pytest.mark.tier1
- 
-+DEBUGGING = os.getenv("DEBUGGING", default=False)
-+if DEBUGGING:
-+    logging.getLogger(__name__).setLevel(logging.DEBUG)
-+else:
-+    logging.getLogger(__name__).setLevel(logging.INFO)
-+log = logging.getLogger(__name__)
- 
- def _generate_ldif(topo, no_no):
-     """
-@@ -349,7 +356,8 @@ def _toggle_private_import_mem(request, topo):
-             ('nsslapd-db-private-import-mem', 'off'))
-     request.addfinalizer(finofaci)
- 
--
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_fast_slow_import(topo, _toggle_private_import_mem, _import_clean):
-     """With nsslapd-db-private-import-mem: on is faster import.
- 
-@@ -381,16 +389,19 @@ def test_fast_slow_import(topo, _toggle_private_import_mem, _import_clean):
-     # Let's set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: 0
-     config = LDBMConfig(topo.standalone)
-     # Measure offline import time duration total_time1
--    total_time1 = _import_offline(topo, 20)
-+    total_time1 = _import_offline(topo, 1000)
-     # Now nsslapd-db-private-import-mem:off
-     config.replace('nsslapd-db-private-import-mem', 'off')
-     accounts = Accounts(topo.standalone, DEFAULT_SUFFIX)
-     for i in accounts.filter('(uid=*)'):
-         UserAccount(topo.standalone, i.dn).delete()
-     # Measure offline import time duration total_time2
--    total_time2 = _import_offline(topo, 20)
-+    total_time2 = _import_offline(topo, 1000)
-     # total_time1 < total_time2
-+    log.info("total_time1 = %f" % total_time1)
-+    log.info("total_time2 = %f" % total_time2)
-     assert total_time1 < total_time2
-+
-     # Set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: -1
-     config.replace_many(
-         ('nsslapd-db-private-import-mem', 'on'),
-@@ -398,14 +409,16 @@ def test_fast_slow_import(topo, _toggle_private_import_mem, _import_clean):
-     for i in accounts.filter('(uid=*)'):
-         UserAccount(topo.standalone, i.dn).delete()
-     # Measure offline import time duration total_time1
--    total_time1 = _import_offline(topo, 20)
-+    total_time1 = _import_offline(topo, 1000)
-     # Now nsslapd-db-private-import-mem:off
-     config.replace('nsslapd-db-private-import-mem', 'off')
-     for i in accounts.filter('(uid=*)'):
-         UserAccount(topo.standalone, i.dn).delete()
-     # Measure offline import time duration total_time2
--    total_time2 = _import_offline(topo, 20)
-+    total_time2 = _import_offline(topo, 1000)
-     # total_time1 < total_time2
-+    log.info("toral_time1 = %f" % total_time1)
-+    log.info("total_time2 = %f" % total_time2)
-     assert total_time1 < total_time2
- 
- 
-diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py
-index 1a71f16e9..ed0c8885f 100644
---- a/dirsrvtests/tests/suites/indexes/regression_test.py
-+++ b/dirsrvtests/tests/suites/indexes/regression_test.py
-@@ -19,6 +19,68 @@ from lib389.topologies import topology_st as topo
- pytestmark = pytest.mark.tier1
- 
- 
-+@pytest.fixture(scope="function")
-+def add_a_group_with_users(request, topo):
-+    """
-+    Add a group and users, which are members of this group.
-+    """
-+    groups = Groups(topo.standalone, DEFAULT_SUFFIX, rdn=None)
-+    group = groups.create(properties={'cn': 'test_group'})
-+    users_list = []
-+    users_num = 100
-+    users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None)
-+    for num in range(users_num):
-+        USER_NAME = f'test_{num}'
-+        user = users.create(properties={
-+            'uid': USER_NAME,
-+            'sn': USER_NAME,
-+            'cn': USER_NAME,
-+            'uidNumber': f'{num}',
-+            'gidNumber': f'{num}',
-+            'homeDirectory': f'/home/{USER_NAME}'
-+        })
-+        users_list.append(user)
-+        group.add_member(user.dn)
-+
-+    def fin():
-+        """
-+        Removes group and users.
-+        """
-+        # If the server crashed, start it again to do the cleanup
-+        if not topo.standalone.status():
-+            topo.standalone.start()
-+        for user in users_list:
-+            user.delete()
-+        group.delete()
-+
-+    request.addfinalizer(fin)
-+
-+
-+@pytest.fixture(scope="function")
-+def set_small_idlistscanlimit(request, topo):
-+    """
-+    Set nsslapd-idlistscanlimit to a smaller value to accelerate the reproducer
-+    """
-+    db_cfg = DatabaseConfig(topo.standalone)
-+    old_idlistscanlimit = db_cfg.get_attr_vals_utf8('nsslapd-idlistscanlimit')
-+    db_cfg.set([('nsslapd-idlistscanlimit', '100')])
-+    topo.standalone.restart()
-+
-+    def fin():
-+        """
-+        Set nsslapd-idlistscanlimit back to the default value
-+        """
-+        # If the server crashed, start it again to do the cleanup
-+        if not topo.standalone.status():
-+            topo.standalone.start()
-+        db_cfg.set([('nsslapd-idlistscanlimit', old_idlistscanlimit)])
-+        topo.standalone.restart()
-+
-+    request.addfinalizer(fin)
-+
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
-+@pytest.mark.skipif(ds_is_older("1.4.4.4"), reason="Not implemented")
- def test_reindex_task_creates_abandoned_index_file(topo):
-     """
-     Recreating an index for the same attribute but changing
-@@ -123,3 +185,4 @@ if __name__ == "__main__":
-     # -s for DEBUG mode
-     CURRENT_FILE = os.path.realpath(__file__)
-     pytest.main("-s %s" % CURRENT_FILE)
-+
-diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
-index 9fdceb165..0b45b7d96 100644
---- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py
-+++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
-@@ -506,7 +506,8 @@ def test_search_with_timelimit(topology_st, create_user):
-     finally:
-         del_users(users_list)
- 
--
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- @pytest.mark.parametrize('aci_subject',
-                          ('dns = "{}"'.format(HOSTNAME),
-                           'ip = "{}"'.format(IP_ADDRESS)))
-diff --git a/dirsrvtests/tests/suites/password/regression_test.py b/dirsrvtests/tests/suites/password/regression_test.py
-index 251834421..8f1facb6d 100644
---- a/dirsrvtests/tests/suites/password/regression_test.py
-+++ b/dirsrvtests/tests/suites/password/regression_test.py
-@@ -215,6 +215,8 @@ def test_global_vs_local(topo, passw_policy, create_user, user_pasw):
-     # reset password
-     create_user.set('userPassword', PASSWORD)
- 
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- @pytest.mark.ds49789
- def test_unhashed_pw_switch(topo_supplier):
-     """Check that nsslapd-unhashed-pw-switch works corrently
-diff --git a/dirsrvtests/tests/suites/plugins/accpol_test.py b/dirsrvtests/tests/suites/plugins/accpol_test.py
-index 73e2e54d1..77975c747 100644
---- a/dirsrvtests/tests/suites/plugins/accpol_test.py
-+++ b/dirsrvtests/tests/suites/plugins/accpol_test.py
-@@ -520,7 +520,8 @@ def test_glinact_limit(topology_st, accpol_global):
-     modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '12')
-     del_users(topology_st, suffix, subtree, userid, nousrs)
- 
--
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_glnologin_attr(topology_st, accpol_global):
-     """Verify if user account is inactivated based on createTimeStamp attribute, no lastLoginTime attribute present
- 
-@@ -610,7 +611,8 @@ def test_glnologin_attr(topology_st, accpol_global):
-     account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled")
-     del_users(topology_st, suffix, subtree, userid, nousrs)
- 
--
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_glnoalt_stattr(topology_st, accpol_global):
-     """Verify if user account can be inactivated based on lastLoginTime attribute, altstateattrname set to 1.1
- 
-@@ -656,6 +658,8 @@ def test_glnoalt_stattr(topology_st, accpol_global):
-     del_users(topology_st, suffix, subtree, userid, nousrs)
- 
- 
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_glattr_modtime(topology_st, accpol_global):
-     """Verify if user account can be inactivated based on modifyTimeStamp attribute
- 
-@@ -705,6 +709,8 @@ def test_glattr_modtime(topology_st, accpol_global):
-     del_users(topology_st, suffix, subtree, userid, nousrs)
- 
- 
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_glnoalt_nologin(topology_st, accpol_global):
-     """Verify if account policy plugin works if we set altstateattrname set to 1.1 and alwaysrecordlogin to NO
- 
-@@ -763,6 +769,8 @@ def test_glnoalt_nologin(topology_st, accpol_global):
-     del_users(topology_st, suffix, subtree, userid, nousrs)
- 
- 
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_glinact_nsact(topology_st, accpol_global):
-     """Verify if user account can be activated using ns-activate.pl script.
- 
-@@ -812,6 +820,8 @@ def test_glinact_nsact(topology_st, accpol_global):
-     del_users(topology_st, suffix, subtree, userid, nousrs)
- 
- 
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_glinact_acclock(topology_st, accpol_global):
-     """Verify if user account is activated when account is unlocked by passwordlockoutduration.
- 
-@@ -868,6 +878,8 @@ def test_glinact_acclock(topology_st, accpol_global):
-     del_users(topology_st, suffix, subtree, userid, nousrs)
- 
- 
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_glnact_pwexp(topology_st, accpol_global):
-     """Verify if user account is activated when password is reset after password is expired
- 
-@@ -951,6 +963,8 @@ def test_glnact_pwexp(topology_st, accpol_global):
-     del_users(topology_st, suffix, subtree, userid, nousrs)
- 
- 
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_locact_inact(topology_st, accpol_local):
-     """Verify if user account is inactivated when accountInactivityLimit is exceeded.
- 
-@@ -995,6 +1009,8 @@ def test_locact_inact(topology_st, accpol_local):
-     del_users(topology_st, suffix, subtree, userid, nousrs)
- 
- 
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_locinact_modrdn(topology_st, accpol_local):
-     """Verify if user account is inactivated when moved from ou=groups to ou=people subtree.
- 
-diff --git a/dirsrvtests/tests/suites/plugins/managed_entry_test.py b/dirsrvtests/tests/suites/plugins/managed_entry_test.py
-new file mode 100644
-index 000000000..662044ccd
---- /dev/null
-+++ b/dirsrvtests/tests/suites/plugins/managed_entry_test.py
-@@ -0,0 +1,351 @@
-+# --- BEGIN COPYRIGHT BLOCK ---
-+# Copyright (C) 2020 Red Hat, Inc.
-+# All rights reserved.
-+#
-+# License: GPL (version 3 or any later version).
-+# See LICENSE for details.
-+# --- END COPYRIGHT BLOCK ---
-+#
-+import pytest
-+import time
-+from lib389.topologies import topology_st as topo
-+from lib389.idm.user import UserAccount, UserAccounts
-+from lib389.idm.account import Account, Accounts
-+from lib389._constants import DEFAULT_SUFFIX
-+from lib389.idm.group import Groups
-+from lib389.config import Config
-+from lib389.idm.organizationalunit import OrganizationalUnits, OrganizationalUnit
-+from lib389.plugins import MEPTemplates, MEPConfigs, ManagedEntriesPlugin, MEPTemplate
-+from lib389.idm.nscontainer import nsContainers
-+from lib389.idm.domain import Domain
-+from lib389.tasks import Entry
-+import ldap
-+
-+pytestmark = pytest.mark.tier1
-+USER_PASSWORD = 'password'
-+
-+
-+@pytest.fixture(scope="module")
-+def _create_inital(topo):
-+    """
-+    Will create entries for this module
-+    """
-+    meps = MEPTemplates(topo.standalone, DEFAULT_SUFFIX)
-+    mep_template1 = meps.create(
-+        properties={'cn': 'UPG Template', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: posixGroup',
-+                    'mepMappedAttr': 'cn: $uid|gidNumber: $gidNumber|description: User private group for $uid'.split(
-+                        '|')})
-+    conf_mep = MEPConfigs(topo.standalone)
-+    conf_mep.create(properties={'cn': 'UPG Definition1', 'originScope': f'cn=Users,{DEFAULT_SUFFIX}',
-+                                             'originFilter': 'objectclass=posixaccount',
-+                                             'managedBase': f'cn=Groups,{DEFAULT_SUFFIX}',
-+                                             'managedTemplate': mep_template1.dn})
-+    container = nsContainers(topo.standalone, DEFAULT_SUFFIX)
-+    for cn in ['Users', 'Groups']:
-+        container.create(properties={'cn': cn})
-+
-+
-+def test_binddn_tracking(topo, _create_inital):
-+    """Test Managed Entries basic functionality
-+
-+    :id: ea2ddfd4-aaec-11ea-8416-8c16451d917b
-+    :setup: Standalone Instance
-+    :steps:
-+        1. Set nsslapd-plugin-binddn-tracking attribute under cn=config
-+        2. Add user
-+        3. Managed Entry Plugin runs against managed entries upon any update without validating
-+        4. verify creation of User Private Group with its time stamp value
-+        5. Modify the SN attribute which is not mapped with managed entry
-+        6. run ModRDN operation and check the User Private group
-+        7. Check the time stamp of UPG should be changed now
-+        8. Check the creatorsname should be user dn and internalCreatorsname should be plugin name
-+        9. Check if a managed group entry was created
-+    :expected results:
-+        1. Success
-+        2. Success
-+        3. Success
-+        4. Success
-+        5. Success
-+        6. Success
-+        7. Success
-+        8. Success
-+        9. Success
-+    """
-+    config = Config(topo.standalone)
-+    # set nsslapd-plugin-binddn-tracking attribute under cn=config
-+    config.replace('nsslapd-plugin-binddn-tracking', 'on')
-+    # Add user
-+    user = UserAccounts(topo.standalone, f'cn=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user()
-+    assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,cn=Groups,{DEFAULT_SUFFIX}'
-+    entry = Account(topo.standalone, f'cn=test_user_1000,cn=Groups,{DEFAULT_SUFFIX}')
-+    # Managed Entry Plugin runs against managed entries upon any update without validating
-+    # verify creation of User Private Group with its time stamp value
-+    stamp1 = entry.get_attr_val_utf8('modifyTimestamp')
-+    user.replace('sn', 'NewSN_modified')
-+    stamp2 = entry.get_attr_val_utf8('modifyTimestamp')
-+    # Modify the SN attribute which is not mapped with managed entry
-+    # Check the time stamp of UPG should not be changed
-+    assert stamp1 == stamp2
-+    time.sleep(1)
-+    # run ModRDN operation and check the User Private group
-+    user.rename(new_rdn='uid=UserNewRDN', newsuperior='cn=Users,dc=example,dc=com')
-+    assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=UserNewRDN,cn=Groups,{DEFAULT_SUFFIX}'
-+    entry = Account(topo.standalone, f'cn=UserNewRDN,cn=Groups,{DEFAULT_SUFFIX}')
-+    stamp3 = entry.get_attr_val_utf8('modifyTimestamp')
-+    # Check the time stamp of UPG should be changed now
-+    assert stamp2 != stamp3
-+    time.sleep(1)
-+    user.replace('gidNumber', '1')
-+    stamp4 = entry.get_attr_val_utf8('modifyTimestamp')
-+    assert stamp4 != stamp3
-+    # Check the creatorsname should be user dn and internalCreatorsname should be plugin name
-+    assert entry.get_attr_val_utf8('creatorsname') == 'cn=directory manager'
-+    assert entry.get_attr_val_utf8('internalCreatorsname') == 'cn=Managed Entries,cn=plugins,cn=config'
-+    assert entry.get_attr_val_utf8('modifiersname') == 'cn=directory manager'
-+    user.delete()
-+    config.replace('nsslapd-plugin-binddn-tracking', 'off')
-+
-+
-+class WithObjectClass(Account):
-+    def __init__(self, instance, dn=None):
-+        super(WithObjectClass, self).__init__(instance, dn)
-+        self._rdn_attribute = 'uid'
-+        self._create_objectclasses = ['top', 'person', 'inetorgperson']
-+
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
-+def test_mentry01(topo, _create_inital):
-+    """Test Managed Entries basic functionality
-+
-+    :id: 9b87493b-0493-46f9-8364-6099d0e5d806
-+    :setup: Standalone Instance
-+    :steps:
-+        1. Check the plug-in status
-+        2. Add Template and definition entry
-+        3. Add our org units
-+        4. Add users with PosixAccount ObjectClass and verify creation of User Private Group
-+        5. Disable the plug-in and check the status
-+        6. Enable the plug-in and check the status the plug-in is disabled and creation of UPG should fail
-+        7. Add users with PosixAccount ObjectClass and verify creation of User Private Group
-+        8. Add users, run ModRDN operation and check the User Private group
-+        9. Add users, run LDAPMODIFY to change the gidNumber and check the User Private group
-+        10. Checking whether creation of User Private group fails for existing group entry
-+        11. Checking whether adding of posixAccount objectClass to existing user creates UPG
-+        12. Running ModRDN operation and checking the user private groups mepManagedBy attribute
-+        13. Deleting mepManagedBy attribute and running ModRDN operation to check if it creates a new UPG
-+        14. Change the RDN of template entry, DSA Unwilling to perform error expected
-+        15. Change the RDN of cn=Users to cn=TestUsers and check UPG are deleted
-+    :expected results:
-+        1. Success
-+        2. Success
-+        3. Success
-+        4. Success
-+        5. Success
-+        6. Success
-+        7. Success
-+        8. Success
-+        9. Success
-+        10. Success
-+        11. Success
-+        12. Success
-+        13. Success
-+        14. Fail(Unwilling to perform )
-+        15. Success
-+    """
-+    # Check the plug-in status
-+    mana = ManagedEntriesPlugin(topo.standalone)
-+    assert mana.status()
-+    # Add Template and definition entry
-+    org1 = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).create(properties={'ou': 'Users'})
-+    org2 = OrganizationalUnit(topo.standalone, f'ou=Groups,{DEFAULT_SUFFIX}')
-+    meps = MEPTemplates(topo.standalone, DEFAULT_SUFFIX)
-+    mep_template1 = meps.create(properties={
-+        'cn': 'UPG Template1',
-+        'mepRDNAttr': 'cn',
-+        'mepStaticAttr': 'objectclass: posixGroup',
-+        'mepMappedAttr': 'cn: $uid|gidNumber: $gidNumber|description: User private group for $uid'.split('|')})
-+    conf_mep = MEPConfigs(topo.standalone)
-+    mep_config = conf_mep.create(properties={
-+        'cn': 'UPG Definition2',
-+        'originScope': org1.dn,
-+        'originFilter': 'objectclass=posixaccount',
-+        'managedBase': org2.dn,
-+        'managedTemplate': mep_template1.dn})
-+    # Add users with PosixAccount ObjectClass and verify creation of User Private Group
-+    user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user()
-+    assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,ou=Groups,{DEFAULT_SUFFIX}'
-+    # Disable the plug-in and check the status
-+    mana.disable()
-+    user.delete()
-+    topo.standalone.restart()
-+    # Add users with PosixAccount ObjectClass when the plug-in is disabled and creation of UPG should fail
-+    user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user()
-+    assert not user.get_attr_val_utf8('mepManagedEntry')
-+    # Enable the plug-in and check the status
-+    mana.enable()
-+    user.delete()
-+    topo.standalone.restart()
-+    # Add users with PosixAccount ObjectClass and verify creation of User Private Group
-+    user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user()
-+    assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,ou=Groups,{DEFAULT_SUFFIX}'
-+    # Add users, run ModRDN operation and check the User Private group
-+    # Add users, run LDAPMODIFY to change the gidNumber and check the User Private group
-+    user.rename(new_rdn='uid=UserNewRDN', newsuperior='ou=Users,dc=example,dc=com')
-+    assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=UserNewRDN,ou=Groups,{DEFAULT_SUFFIX}'
-+    user.replace('gidNumber', '20209')
-+    entry = Account(topo.standalone, f'cn=UserNewRDN,ou=Groups,{DEFAULT_SUFFIX}')
-+    assert entry.get_attr_val_utf8('gidNumber') == '20209'
-+    user.replace_many(('sn', 'new_modified_sn'), ('gidNumber', '31309'))
-+    assert entry.get_attr_val_utf8('gidNumber') == '31309'
-+    user.delete()
-+    # Checking whether creation of User Private group fails for existing group entry
-+    grp = Groups(topo.standalone, f'ou=Groups,{DEFAULT_SUFFIX}', rdn=None).create(properties={'cn': 'MENTRY_14'})
-+    user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user()
-+    with pytest.raises(ldap.NO_SUCH_OBJECT):
-+        entry.status()
-+    user.delete()
-+    # Checking whether adding of posixAccount objectClass to existing user creates UPG
-+    # Add Users without posixAccount objectClass
-+    users = WithObjectClass(topo.standalone, f'uid=test_test, ou=Users,{DEFAULT_SUFFIX}')
-+    user_properties1 = {'uid': 'test_test', 'cn': 'test', 'sn': 'test', 'mail': 'sasa@sasa.com', 'telephoneNumber': '123'}
-+    user = users.create(properties=user_properties1)
-+    assert not user.get_attr_val_utf8('mepManagedEntry')
-+    # Add posixAccount objectClass
-+    user.replace_many(('objectclass', ['top', 'person', 'inetorgperson', 'posixAccount']),
-+                      ('homeDirectory', '/home/ok'),
-+                      ('uidNumber', '61603'), ('gidNumber', '61603'))
-+    assert not user.get_attr_val_utf8('mepManagedEntry')
-+    user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user()
-+    entry = Account(topo.standalone, 'cn=test_user_1000,ou=Groups,dc=example,dc=com')
-+    # Add inetuser objectClass
-+    user.replace_many(
-+        ('objectclass', ['top', 'account', 'posixaccount', 'inetOrgPerson',
-+                         'organizationalPerson', 'nsMemberOf', 'nsAccount',
-+                         'person', 'mepOriginEntry', 'inetuser']),
-+        ('memberOf', entry.dn))
-+    assert entry.status()
-+    user.delete()
-+    user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user()
-+    entry = Account(topo.standalone, 'cn=test_user_1000,ou=Groups,dc=example,dc=com')
-+    # Add groupofNames objectClass
-+    user.replace_many(
-+        ('objectclass', ['top', 'account', 'posixaccount', 'inetOrgPerson',
-+                         'organizationalPerson', 'nsMemberOf', 'nsAccount',
-+                         'person', 'mepOriginEntry', 'groupofNames']),
-+        ('memberOf', user.dn))
-+    assert entry.status()
-+    # Running ModRDN operation and checking the user private groups mepManagedBy attribute
-+    user.replace('mepManagedEntry', f'uid=CheckModRDN,ou=Users,{DEFAULT_SUFFIX}')
-+    user.rename(new_rdn='uid=UserNewRDN', newsuperior='ou=Users,dc=example,dc=com')
-+    assert user.get_attr_val_utf8('mepManagedEntry') == f'uid=CheckModRDN,ou=Users,{DEFAULT_SUFFIX}'
-+    # Deleting mepManagedBy attribute and running ModRDN operation to check if it creates a new UPG
-+    user.remove('mepManagedEntry', f'uid=CheckModRDN,ou=Users,{DEFAULT_SUFFIX}')
-+    user.rename(new_rdn='uid=UserNewRDN1', newsuperior='ou=Users,dc=example,dc=com')
-+    assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=UserNewRDN1,ou=Groups,{DEFAULT_SUFFIX}'
-+    # Change the RDN of template entry, DSA Unwilling to perform error expected
-+    mep = MEPTemplate(topo.standalone, f'cn=UPG Template,{DEFAULT_SUFFIX}')
-+    with pytest.raises(ldap.UNWILLING_TO_PERFORM):
-+        mep.rename(new_rdn='cn=UPG Template2', newsuperior='dc=example,dc=com')
-+    # Change the RDN of cn=Users to cn=TestUsers and check UPG are deleted
-+    before = user.get_attr_val_utf8('mepManagedEntry')
-+    user.rename(new_rdn='uid=Anuj', newsuperior='ou=Users,dc=example,dc=com')
-+    assert user.get_attr_val_utf8('mepManagedEntry') != before
-+
-+
-+def test_managed_entry_removal(topo):
-+    """Check that we can't remove managed entry manually
-+
-+    :id: cf9c5be5-97ef-46fc-b199-8346acf4c296
-+    :setup: Standalone Instance
-+    :steps:
-+        1. Enable the plugin
-+        2. Restart the instance
-+        3. Add our org units
-+        4. Set up config entry and template entry for the org units
-+        5. Add an entry that meets the MEP scope
-+        6. Check if a managed group entry was created
-+        7. Try to remove the entry while bound as Admin (non-DM)
-+        8. Remove the entry while bound as DM
-+        9. Check that the managing entry can be deleted too
-+    :expectedresults:
-+        1. Success
-+        2. Success
-+        3. Success
-+        4. Success
-+        5. Success
-+        6. Success
-+        7. Should fail
-+        8. Success
-+        9. Success
-+    """
-+
-+    inst = topo.standalone
-+
-+    # Add ACI so we can test that non-DM user can't delete managed entry
-+    domain = Domain(inst, DEFAULT_SUFFIX)
-+    ACI_TARGET = f"(target = \"ldap:///{DEFAULT_SUFFIX}\")"
-+    ACI_TARGETATTR = "(targetattr = *)"
-+    ACI_ALLOW = "(version 3.0; acl \"Admin Access\"; allow (all) "
-+    ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)"
-+    ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT
-+    domain.add('aci', ACI_BODY)
-+
-+    # stop the plugin, and start it
-+    plugin = ManagedEntriesPlugin(inst)
-+    plugin.disable()
-+    plugin.enable()
-+
-+    # Add our org units
-+    ous = OrganizationalUnits(inst, DEFAULT_SUFFIX)
-+    ou_people = ous.create(properties={'ou': 'managed_people'})
-+    ou_groups = ous.create(properties={'ou': 'managed_groups'})
-+
-+    mep_templates = MEPTemplates(inst, DEFAULT_SUFFIX)
-+    mep_template1 = mep_templates.create(properties={
-+        'cn': 'MEP template',
-+        'mepRDNAttr': 'cn',
-+        'mepStaticAttr': 'objectclass: groupOfNames|objectclass: extensibleObject'.split('|'),
-+        'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|')
-+    })
-+    mep_configs = MEPConfigs(inst)
-+    mep_configs.create(properties={'cn': 'config',
-+                                   'originScope': ou_people.dn,
-+                                   'originFilter': 'objectclass=posixAccount',
-+                                   'managedBase': ou_groups.dn,
-+                                   'managedTemplate': mep_template1.dn})
-+    inst.restart()
-+
-+    # Add an entry that meets the MEP scope
-+    test_users_m1 = UserAccounts(inst, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn))
-+    managing_entry = test_users_m1.create_test_user(1001)
-+    managing_entry.reset_password(USER_PASSWORD)
-+    user_bound_conn = managing_entry.bind(USER_PASSWORD)
-+
-+    # Get the managed entry
-+    managed_groups = Groups(inst, ou_groups.dn, rdn=None)
-+    managed_entry = managed_groups.get(managing_entry.rdn)
-+
-+    # Check that the managed entry was created
-+    assert managed_entry.exists()
-+
-+    # Try to remove the entry while bound as Admin (non-DM)
-+    managed_groups_user_conn = Groups(user_bound_conn, ou_groups.dn, rdn=None)
-+    managed_entry_user_conn = managed_groups_user_conn.get(managed_entry.rdn)
-+    with pytest.raises(ldap.UNWILLING_TO_PERFORM):
-+        managed_entry_user_conn.delete()
-+    assert managed_entry_user_conn.exists()
-+
-+    # Remove the entry while bound as DM
-+    managed_entry.delete()
-+    assert not managed_entry.exists()
-+
-+    # Check that the managing entry can be deleted too
-+    managing_entry.delete()
-+    assert not managing_entry.exists()
-+
-+
-+if __name__ == '__main__':
-+    # Run isolated
-+    # -s for DEBUG mode
-+    CURRENT_FILE = os.path.realpath(__file__)
-+    pytest.main("-s %s" % CURRENT_FILE)
-diff --git a/dirsrvtests/tests/suites/plugins/memberof_test.py b/dirsrvtests/tests/suites/plugins/memberof_test.py
-index bc99eef7d..d3b32c856 100644
---- a/dirsrvtests/tests/suites/plugins/memberof_test.py
-+++ b/dirsrvtests/tests/suites/plugins/memberof_test.py
-@@ -2655,7 +2655,8 @@ def test_complex_group_scenario_9(topology_st):
-     verify_post_025(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5,
-                     memofuser1, memofuser2, memofuser3, memofuser4)
- 
--
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_memberof_auto_add_oc(topology_st):
-     """Test the auto add objectclass (OC) feature. The plugin should add a predefined
-     objectclass that will allow memberOf to be added to an entry.
-diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
-index 5610e3c19..f0cd99cfc 100644
---- a/dirsrvtests/tests/suites/replication/cleanallruv_test.py
-+++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
-@@ -223,7 +223,7 @@ def test_clean(topology_m4, m4rid):
- 
-     log.info('test_clean PASSED, restoring supplier 4...')
- 
--
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_clean_restart(topology_m4, m4rid):
-     """Check that cleanallruv task works properly after a restart
- 
-@@ -295,6 +295,7 @@ def test_clean_restart(topology_m4, m4rid):
-     log.info('test_clean_restart PASSED, restoring supplier 4...')
- 
- 
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_clean_force(topology_m4, m4rid):
-     """Check that multiple tasks with a 'force' option work properly
- 
-@@ -353,6 +354,7 @@ def test_clean_force(topology_m4, m4rid):
-     log.info('test_clean_force PASSED, restoring supplier 4...')
- 
- 
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_abort(topology_m4, m4rid):
-     """Test the abort task basic functionality
- 
-@@ -408,6 +410,7 @@ def test_abort(topology_m4, m4rid):
-     log.info('test_abort PASSED, restoring supplier 4...')
- 
- 
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_abort_restart(topology_m4, m4rid):
-     """Test the abort task can handle a restart, and then resume
- 
-@@ -486,6 +489,7 @@ def test_abort_restart(topology_m4, m4rid):
-     log.info('test_abort_restart PASSED, restoring supplier 4...')
- 
- 
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_abort_certify(topology_m4, m4rid):
-     """Test the abort task with a replica-certify-all option
- 
-@@ -555,6 +559,7 @@ def test_abort_certify(topology_m4, m4rid):
-     log.info('test_abort_certify PASSED, restoring supplier 4...')
- 
- 
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_stress_clean(topology_m4, m4rid):
-     """Put each server(m1 - m4) under a stress, and perform the entire clean process
- 
-@@ -641,6 +646,7 @@ def test_stress_clean(topology_m4, m4rid):
-     ldbm_config.set('nsslapd-readonly', 'off')
- 
- 
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
- def test_multiple_tasks_with_force(topology_m4, m4rid):
-     """Check that multiple tasks with a 'force' option work properly
- 
-diff --git a/dirsrvtests/tests/suites/replication/encryption_cl5_test.py b/dirsrvtests/tests/suites/replication/encryption_cl5_test.py
-index 7ae7e1b13..b69863f53 100644
---- a/dirsrvtests/tests/suites/replication/encryption_cl5_test.py
-+++ b/dirsrvtests/tests/suites/replication/encryption_cl5_test.py
-@@ -73,10 +73,10 @@ def _check_unhashed_userpw_encrypted(inst, change_type, user_dn, user_pw, is_enc
-                 assert user_pw_attr in entry, 'Changelog entry does not contain clear text password'
-     assert count, 'Operation type and DN of the entry not matched in changelog'
- 
--
--@pytest.mark.parametrize("encryption", ["AES", "3DES"])
--def test_algorithm_unhashed(topology_with_tls, encryption):
--    """Check encryption algowithm AES and 3DES.
-+#unstable or unstatus tests, skipped for now
-+@pytest.mark.flaky(max_runs=2, min_passes=1)
-+def test_algorithm_unhashed(topology_with_tls):
-+    """Check encryption algorithm AES
-     And check unhashed#user#password attribute for encryption.
- 
-     :id: b7a37bf8-4b2e-4dbd-9891-70117d67558c
-diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py
-deleted file mode 100644
-index 112c73cb9..000000000
---- a/dirsrvtests/tests/suites/retrocl/basic_test.py
-+++ /dev/null
-@@ -1,292 +0,0 @@
--# --- BEGIN COPYRIGHT BLOCK ---
--# Copyright (C) 2021 Red Hat, Inc.
--# All rights reserved.
--#
--# License: GPL (version 3 or any later version).
--# See LICENSE for details.
--# --- END COPYRIGHT BLOCK ---
--
--import logging
--import ldap
--import time
--import pytest
--from lib389.topologies import topology_st
--from lib389.plugins import RetroChangelogPlugin
--from lib389._constants import *
--from lib389.utils import *
--from lib389.tasks import *
--from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance
--from lib389.cli_base.dsrc import dsrc_arg_concat
--from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add
--from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts
--
--pytestmark = pytest.mark.tier1
--
--USER1_DN = 'uid=user1,ou=people,'+ DEFAULT_SUFFIX
--USER2_DN = 'uid=user2,ou=people,'+ DEFAULT_SUFFIX
--USER_PW = 'password'
--ATTR_HOMEPHONE = 'homePhone'
--ATTR_CARLICENSE = 'carLicense'
--
--log = logging.getLogger(__name__)
--
--def test_retrocl_exclude_attr_add(topology_st):
--    """ Test exclude attribute feature of the retrocl plugin for add operation
--
--    :id: 3481650f-2070-45ef-9600-2500cfc51559
--
--    :setup: Standalone instance
--
--    :steps:
--        1. Enable dynamic plugins
--        2. Confige retro changelog plugin
--        3. Add an entry
--        4. Ensure entry attrs are in the changelog
--        5. Exclude an attr
--        6. Add another entry
--        7. Ensure excluded attr is not in the changelog
--
--    :expectedresults:
--        1. Success
--        2. Success
--        3. Success
--        4. Success
--        5. Success
--        6. Success
--        7. Success
--    """
--
--    st = topology_st.standalone
--
--    log.info('Enable dynamic plugins')
--    try:
--        st.config.set('nsslapd-dynamic-plugins', 'on')
--    except ldap.LDAPError as e:
--        ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc'])
--        assert False
--
--    log.info('Configure retrocl plugin')
--    rcl = RetroChangelogPlugin(st)
--    rcl.disable()
--    rcl.enable()
--    rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')
--
--    log.info('Restarting instance')
--    try:
--        st.restart()
--    except ldap.LDAPError as e:
--        ldap.error('Failed to restart instance ' + e.args[0]['desc'])
--        assert False
--
--    users = UserAccounts(st, DEFAULT_SUFFIX)
--
--    log.info('Adding user1')
--    try:
--        user1 = users.create(properties={
--            'sn': '1',
--            'cn': 'user 1',
--            'uid': 'user1',
--            'uidNumber': '11',
--            'gidNumber': '111',
--            'givenname': 'user1',
--            'homePhone': '0861234567',
--            'carLicense': '131D16674',
--            'mail': 'user1@whereever.com',
--            'homeDirectory': '/home/user1',
--            'userpassword': USER_PW})
--    except ldap.ALREADY_EXISTS:
--        pass
--    except ldap.LDAPError as e:
--        log.error("Failed to add user1")
--
--    log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
--    try:
--        cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
--    except ldap.LDAPError as e:
--        log.fatal("Changelog search failed, error: " +str(e))
--        assert False
--    assert len(cllist) > 0
--    if  cllist[0].hasAttr('changes'):
--        clstr = (cllist[0].getValue('changes')).decode()
--        assert ATTR_HOMEPHONE in clstr
--        assert ATTR_CARLICENSE in clstr
--
--    log.info('Excluding attribute ' + ATTR_HOMEPHONE)
--    args = FakeArgs()
--    args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM]
--    args.instance = 'standalone1'
--    args.basedn = None
--    args.binddn = None
--    args.starttls = False
--    args.pwdfile = None
--    args.bindpw = None
--    args.prompt = False
--    args.exclude_attrs = ATTR_HOMEPHONE
--    args.func = retrochangelog_add
--    dsrc_inst = dsrc_arg_concat(args, None)
--    inst = connect_instance(dsrc_inst, False, args)
--    result = args.func(inst, None, log, args)
--    disconnect_instance(inst)
--    assert result is None
--
--    log.info("5s delay for retrocl plugin to restart")
--    time.sleep(5)
--
--    log.info('Adding user2')
--    try:
--        user2 = users.create(properties={
--            'sn': '2',
--            'cn': 'user 2',
--            'uid': 'user2',
--            'uidNumber': '22',
--            'gidNumber': '222',
--            'givenname': 'user2',
--            'homePhone': '0879088363',
--            'carLicense': '04WX11038',
--            'mail': 'user2@whereever.com',
--            'homeDirectory': '/home/user2',
--            'userpassword': USER_PW})
--    except ldap.ALREADY_EXISTS:
--        pass
--    except ldap.LDAPError as e:
--        log.error("Failed to add user2")
--
--    log.info('Verify homePhone attr is not in the changelog changestring')
--    try:
--        cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN)
--        assert len(cllist) > 0
--        if  cllist[0].hasAttr('changes'):
--            clstr = (cllist[0].getValue('changes')).decode()
--            assert ATTR_HOMEPHONE not in clstr
--            assert ATTR_CARLICENSE in clstr
--    except ldap.LDAPError as e:
--        log.fatal("Changelog search failed, error: " +str(e))
--        assert False
--
--def test_retrocl_exclude_attr_mod(topology_st):
--    """ Test exclude attribute feature of the retrocl plugin for mod operation
--
--    :id: f6bef689-685b-4f86-a98d-f7e6b1fcada3
--
--    :setup: Standalone instance
--
--    :steps:
--        1. Enable dynamic plugins
--        2. Confige retro changelog plugin
--        3. Add user1 entry
--        4. Ensure entry attrs are in the changelog
--        5. Exclude an attr
--        6. Modify user1 entry
--        7. Ensure excluded attr is not in the changelog
--
--    :expectedresults:
--        1. Success
--        2. Success
--        3. Success
--        4. Success
--        5. Success
--        6. Success
--        7. Success
--    """
--
--    st = topology_st.standalone
--
--    log.info('Enable dynamic plugins')
--    try:
--        st.config.set('nsslapd-dynamic-plugins', 'on')
--    except ldap.LDAPError as e:
--        ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc'])
--        assert False
--
--    log.info('Configure retrocl plugin')
--    rcl = RetroChangelogPlugin(st)
--    rcl.disable()
--    rcl.enable()
--    rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')
--
--    log.info('Restarting instance')
--    try:
--        st.restart()
--    except ldap.LDAPError as e:
--        ldap.error('Failed to restart instance ' + e.args[0]['desc'])
--        assert False
--
--    users = UserAccounts(st, DEFAULT_SUFFIX)
--
--    log.info('Adding user1')
--    try:
--        user1 = users.create(properties={
--            'sn': '1',
--            'cn': 'user 1',
--            'uid': 'user1',
--            'uidNumber': '11',
--            'gidNumber': '111',
--            'givenname': 'user1',
--            'homePhone': '0861234567',
--            'carLicense': '131D16674',
--            'mail': 'user1@whereever.com',
--            'homeDirectory': '/home/user1',
--            'userpassword': USER_PW})
--    except ldap.ALREADY_EXISTS:
--        pass
--    except ldap.LDAPError as e:
--        log.error("Failed to add user1")
--
--    log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
--    try:
--        cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
--    except ldap.LDAPError as e:
--        log.fatal("Changelog search failed, error: " +str(e))
--        assert False
--    assert len(cllist) > 0
--    if  cllist[0].hasAttr('changes'):
--        clstr = (cllist[0].getValue('changes')).decode()
--        assert ATTR_HOMEPHONE in clstr
--        assert ATTR_CARLICENSE in clstr
--
--    log.info('Excluding attribute ' + ATTR_CARLICENSE)
--    args = FakeArgs()
--    args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM]
--    args.instance = 'standalone1'
--    args.basedn = None
--    args.binddn = None
--    args.starttls = False
--    args.pwdfile = None
--    args.bindpw = None
--    args.prompt = False
--    args.exclude_attrs = ATTR_CARLICENSE
--    args.func = retrochangelog_add
--    dsrc_inst = dsrc_arg_concat(args, None)
--    inst = connect_instance(dsrc_inst, False, args)
--    result = args.func(inst, None, log, args)
--    disconnect_instance(inst)
--    assert result is None
--
--    log.info("5s delay for retrocl plugin to restart")
--    time.sleep(5)
--
--    log.info('Modify user1 carLicense attribute')
--    try:
--        st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")])
--    except ldap.LDAPError as e:
--        log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc'])
--        assert False
--
--    log.info('Verify carLicense attr is not in the changelog changestring')
--    try:
--        cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
--        assert len(cllist) > 0
--        # There will be 2 entries in the changelog for this user, we are only
--        #interested in the second one, the modify operation.
--        if  cllist[1].hasAttr('changes'):
--            clstr = (cllist[1].getValue('changes')).decode()
--            assert ATTR_CARLICENSE not in clstr
--    except ldap.LDAPError as e:
--        log.fatal("Changelog search failed, error: " +str(e))
--        assert False
--
--if __name__ == '__main__':
--    # Run isolated
--    # -s for DEBUG mode
--    CURRENT_FILE = os.path.realpath(__file__)
--    pytest.main("-s %s" % CURRENT_FILE)
--- 
-2.26.3
-
diff --git a/SOURCES/0002-Issue-4701-RFE-Exclude-attributes-from-retro-changel.patch b/SOURCES/0002-Issue-4701-RFE-Exclude-attributes-from-retro-changel.patch
deleted file mode 100644
index 1b86463..0000000
--- a/SOURCES/0002-Issue-4701-RFE-Exclude-attributes-from-retro-changel.patch
+++ /dev/null
@@ -1,322 +0,0 @@
-From 1e1c2b23c35282481628af7e971ac683da334502 Mon Sep 17 00:00:00 2001
-From: James Chapman <jachapma@redhat.com>
-Date: Tue, 27 Apr 2021 17:00:15 +0100
-Subject: [PATCH 02/12] Issue 4701 - RFE - Exclude attributes from retro
- changelog (#4723)
-
-Description: When the retro changelog plugin is enabled it writes the
-             added/modified values to the "cn-changelog" suffix. In
-             some cases an entries attribute values can be of a
-             sensitive nature and should be excluded. This RFE adds
-             functionality that will allow an admin exclude certain
-             attributes from the retro changelog DB.
-
-Relates: https://github.com/389ds/389-ds-base/issues/4701
-
-Reviewed by: mreynolds389, droideck (Thanks folks)
----
- .../tests/suites/retrocl/basic_test.py        | 292 ++++++++++++++++++
- 1 file changed, 292 insertions(+)
- create mode 100644 dirsrvtests/tests/suites/retrocl/basic_test.py
-
-diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py
-new file mode 100644
-index 000000000..112c73cb9
---- /dev/null
-+++ b/dirsrvtests/tests/suites/retrocl/basic_test.py
-@@ -0,0 +1,292 @@
-+# --- BEGIN COPYRIGHT BLOCK ---
-+# Copyright (C) 2021 Red Hat, Inc.
-+# All rights reserved.
-+#
-+# License: GPL (version 3 or any later version).
-+# See LICENSE for details.
-+# --- END COPYRIGHT BLOCK ---
-+
-+import logging
-+import ldap
-+import time
-+import pytest
-+from lib389.topologies import topology_st
-+from lib389.plugins import RetroChangelogPlugin
-+from lib389._constants import *
-+from lib389.utils import *
-+from lib389.tasks import *
-+from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance
-+from lib389.cli_base.dsrc import dsrc_arg_concat
-+from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add
-+from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts
-+
-+pytestmark = pytest.mark.tier1
-+
-+USER1_DN = 'uid=user1,ou=people,'+ DEFAULT_SUFFIX
-+USER2_DN = 'uid=user2,ou=people,'+ DEFAULT_SUFFIX
-+USER_PW = 'password'
-+ATTR_HOMEPHONE = 'homePhone'
-+ATTR_CARLICENSE = 'carLicense'
-+
-+log = logging.getLogger(__name__)
-+
-+def test_retrocl_exclude_attr_add(topology_st):
-+    """ Test exclude attribute feature of the retrocl plugin for add operation
-+
-+    :id: 3481650f-2070-45ef-9600-2500cfc51559
-+
-+    :setup: Standalone instance
-+
-+    :steps:
-+        1. Enable dynamic plugins
-+        2. Confige retro changelog plugin
-+        3. Add an entry
-+        4. Ensure entry attrs are in the changelog
-+        5. Exclude an attr
-+        6. Add another entry
-+        7. Ensure excluded attr is not in the changelog
-+
-+    :expectedresults:
-+        1. Success
-+        2. Success
-+        3. Success
-+        4. Success
-+        5. Success
-+        6. Success
-+        7. Success
-+    """
-+
-+    st = topology_st.standalone
-+
-+    log.info('Enable dynamic plugins')
-+    try:
-+        st.config.set('nsslapd-dynamic-plugins', 'on')
-+    except ldap.LDAPError as e:
-+        ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc'])
-+        assert False
-+
-+    log.info('Configure retrocl plugin')
-+    rcl = RetroChangelogPlugin(st)
-+    rcl.disable()
-+    rcl.enable()
-+    rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')
-+
-+    log.info('Restarting instance')
-+    try:
-+        st.restart()
-+    except ldap.LDAPError as e:
-+        ldap.error('Failed to restart instance ' + e.args[0]['desc'])
-+        assert False
-+
-+    users = UserAccounts(st, DEFAULT_SUFFIX)
-+
-+    log.info('Adding user1')
-+    try:
-+        user1 = users.create(properties={
-+            'sn': '1',
-+            'cn': 'user 1',
-+            'uid': 'user1',
-+            'uidNumber': '11',
-+            'gidNumber': '111',
-+            'givenname': 'user1',
-+            'homePhone': '0861234567',
-+            'carLicense': '131D16674',
-+            'mail': 'user1@whereever.com',
-+            'homeDirectory': '/home/user1',
-+            'userpassword': USER_PW})
-+    except ldap.ALREADY_EXISTS:
-+        pass
-+    except ldap.LDAPError as e:
-+        log.error("Failed to add user1")
-+
-+    log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
-+    try:
-+        cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
-+    except ldap.LDAPError as e:
-+        log.fatal("Changelog search failed, error: " +str(e))
-+        assert False
-+    assert len(cllist) > 0
-+    if  cllist[0].hasAttr('changes'):
-+        clstr = (cllist[0].getValue('changes')).decode()
-+        assert ATTR_HOMEPHONE in clstr
-+        assert ATTR_CARLICENSE in clstr
-+
-+    log.info('Excluding attribute ' + ATTR_HOMEPHONE)
-+    args = FakeArgs()
-+    args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM]
-+    args.instance = 'standalone1'
-+    args.basedn = None
-+    args.binddn = None
-+    args.starttls = False
-+    args.pwdfile = None
-+    args.bindpw = None
-+    args.prompt = False
-+    args.exclude_attrs = ATTR_HOMEPHONE
-+    args.func = retrochangelog_add
-+    dsrc_inst = dsrc_arg_concat(args, None)
-+    inst = connect_instance(dsrc_inst, False, args)
-+    result = args.func(inst, None, log, args)
-+    disconnect_instance(inst)
-+    assert result is None
-+
-+    log.info("5s delay for retrocl plugin to restart")
-+    time.sleep(5)
-+
-+    log.info('Adding user2')
-+    try:
-+        user2 = users.create(properties={
-+            'sn': '2',
-+            'cn': 'user 2',
-+            'uid': 'user2',
-+            'uidNumber': '22',
-+            'gidNumber': '222',
-+            'givenname': 'user2',
-+            'homePhone': '0879088363',
-+            'carLicense': '04WX11038',
-+            'mail': 'user2@whereever.com',
-+            'homeDirectory': '/home/user2',
-+            'userpassword': USER_PW})
-+    except ldap.ALREADY_EXISTS:
-+        pass
-+    except ldap.LDAPError as e:
-+        log.error("Failed to add user2")
-+
-+    log.info('Verify homePhone attr is not in the changelog changestring')
-+    try:
-+        cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN)
-+        assert len(cllist) > 0
-+        if  cllist[0].hasAttr('changes'):
-+            clstr = (cllist[0].getValue('changes')).decode()
-+            assert ATTR_HOMEPHONE not in clstr
-+            assert ATTR_CARLICENSE in clstr
-+    except ldap.LDAPError as e:
-+        log.fatal("Changelog search failed, error: " +str(e))
-+        assert False
-+
-+def test_retrocl_exclude_attr_mod(topology_st):
-+    """ Test exclude attribute feature of the retrocl plugin for mod operation
-+
-+    :id: f6bef689-685b-4f86-a98d-f7e6b1fcada3
-+
-+    :setup: Standalone instance
-+
-+    :steps:
-+        1. Enable dynamic plugins
-+        2. Confige retro changelog plugin
-+        3. Add user1 entry
-+        4. Ensure entry attrs are in the changelog
-+        5. Exclude an attr
-+        6. Modify user1 entry
-+        7. Ensure excluded attr is not in the changelog
-+
-+    :expectedresults:
-+        1. Success
-+        2. Success
-+        3. Success
-+        4. Success
-+        5. Success
-+        6. Success
-+        7. Success
-+    """
-+
-+    st = topology_st.standalone
-+
-+    log.info('Enable dynamic plugins')
-+    try:
-+        st.config.set('nsslapd-dynamic-plugins', 'on')
-+    except ldap.LDAPError as e:
-+        ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc'])
-+        assert False
-+
-+    log.info('Configure retrocl plugin')
-+    rcl = RetroChangelogPlugin(st)
-+    rcl.disable()
-+    rcl.enable()
-+    rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')
-+
-+    log.info('Restarting instance')
-+    try:
-+        st.restart()
-+    except ldap.LDAPError as e:
-+        ldap.error('Failed to restart instance ' + e.args[0]['desc'])
-+        assert False
-+
-+    users = UserAccounts(st, DEFAULT_SUFFIX)
-+
-+    log.info('Adding user1')
-+    try:
-+        user1 = users.create(properties={
-+            'sn': '1',
-+            'cn': 'user 1',
-+            'uid': 'user1',
-+            'uidNumber': '11',
-+            'gidNumber': '111',
-+            'givenname': 'user1',
-+            'homePhone': '0861234567',
-+            'carLicense': '131D16674',
-+            'mail': 'user1@whereever.com',
-+            'homeDirectory': '/home/user1',
-+            'userpassword': USER_PW})
-+    except ldap.ALREADY_EXISTS:
-+        pass
-+    except ldap.LDAPError as e:
-+        log.error("Failed to add user1")
-+
-+    log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
-+    try:
-+        cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
-+    except ldap.LDAPError as e:
-+        log.fatal("Changelog search failed, error: " +str(e))
-+        assert False
-+    assert len(cllist) > 0
-+    if  cllist[0].hasAttr('changes'):
-+        clstr = (cllist[0].getValue('changes')).decode()
-+        assert ATTR_HOMEPHONE in clstr
-+        assert ATTR_CARLICENSE in clstr
-+
-+    log.info('Excluding attribute ' + ATTR_CARLICENSE)
-+    args = FakeArgs()
-+    args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM]
-+    args.instance = 'standalone1'
-+    args.basedn = None
-+    args.binddn = None
-+    args.starttls = False
-+    args.pwdfile = None
-+    args.bindpw = None
-+    args.prompt = False
-+    args.exclude_attrs = ATTR_CARLICENSE
-+    args.func = retrochangelog_add
-+    dsrc_inst = dsrc_arg_concat(args, None)
-+    inst = connect_instance(dsrc_inst, False, args)
-+    result = args.func(inst, None, log, args)
-+    disconnect_instance(inst)
-+    assert result is None
-+
-+    log.info("5s delay for retrocl plugin to restart")
-+    time.sleep(5)
-+
-+    log.info('Modify user1 carLicense attribute')
-+    try:
-+        st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")])
-+    except ldap.LDAPError as e:
-+        log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc'])
-+        assert False
-+
-+    log.info('Verify carLicense attr is not in the changelog changestring')
-+    try:
-+        cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
-+        assert len(cllist) > 0
-+        # There will be 2 entries in the changelog for this user, we are only
-+        #interested in the second one, the modify operation.
-+        if  cllist[1].hasAttr('changes'):
-+            clstr = (cllist[1].getValue('changes')).decode()
-+            assert ATTR_CARLICENSE not in clstr
-+    except ldap.LDAPError as e:
-+        log.fatal("Changelog search failed, error: " +str(e))
-+        assert False
-+
-+if __name__ == '__main__':
-+    # Run isolated
-+    # -s for DEBUG mode
-+    CURRENT_FILE = os.path.realpath(__file__)
-+    pytest.main("-s %s" % CURRENT_FILE)
--- 
-2.26.3
-
diff --git a/SOURCES/0002-Issue-4943-Fix-csn-generator-to-limit-time-skew-drif.patch b/SOURCES/0002-Issue-4943-Fix-csn-generator-to-limit-time-skew-drif.patch
new file mode 100644
index 0000000..51f8170
--- /dev/null
+++ b/SOURCES/0002-Issue-4943-Fix-csn-generator-to-limit-time-skew-drif.patch
@@ -0,0 +1,621 @@
+From 968ad6b5039d839bfbc61da755c252cc7598415b Mon Sep 17 00:00:00 2001
+From: progier389 <progier@redhat.com>
+Date: Mon, 25 Oct 2021 17:09:57 +0200
+Subject: [PATCH 02/12] Issue 4943 - Fix csn generator to limit time skew drift
+ - PR 4946
+
+---
+ ldap/servers/slapd/csngen.c       | 433 +++++++++++++++++-------------
+ ldap/servers/slapd/slapi-plugin.h |   9 +
+ 2 files changed, 255 insertions(+), 187 deletions(-)
+
+diff --git a/ldap/servers/slapd/csngen.c b/ldap/servers/slapd/csngen.c
+index fcd88b4cc..c7c5c2ba8 100644
+--- a/ldap/servers/slapd/csngen.c
++++ b/ldap/servers/slapd/csngen.c
+@@ -18,8 +18,9 @@
+ #include "prcountr.h"
+ #include "slap.h"
+ 
++
+ #define CSN_MAX_SEQNUM 0xffff              /* largest sequence number */
+-#define CSN_MAX_TIME_ADJUST 24 * 60 * 60   /* maximum allowed time adjustment (in seconds) = 1 day */
++#define CSN_MAX_TIME_ADJUST _SEC_PER_DAY   /* maximum allowed time adjustment (in seconds) = 1 day */
+ #define ATTR_CSN_GENERATOR_STATE "nsState" /* attribute that stores csn state information */
+ #define STATE_FORMAT "%8x%8x%8x%4hx%4hx"
+ #define STATE_LENGTH 32
+@@ -27,6 +28,8 @@
+ #define CSN_CALC_TSTAMP(gen) ((gen)->state.sampled_time + \
+                               (gen)->state.local_offset + \
+                               (gen)->state.remote_offset)
++#define TIME_DIFF_WARNING_DELAY  (30*_SEC_PER_DAY)  /* log an info message when difference
++                                                       between clock is greater than this delay */
+ 
+ /*
+  * **************************************************************************
+@@ -63,6 +66,7 @@ typedef struct csngen_state
+ struct csngen
+ {
+     csngen_state state;      /* persistent state of the generator */
++    int32_t (*gettime)(struct timespec *tp); /* Get local time */
+     callback_list callbacks; /* list of callbacks registered with the generator */
+     Slapi_RWLock *lock;      /* concurrency control */
+ };
+@@ -78,7 +82,7 @@ static int _csngen_init_callbacks(CSNGen *gen);
+ static void _csngen_call_callbacks(const CSNGen *gen, const CSN *csn, PRBool abort);
+ static int _csngen_cmp_callbacks(const void *el1, const void *el2);
+ static void _csngen_free_callbacks(CSNGen *gen);
+-static int _csngen_adjust_local_time(CSNGen *gen, time_t cur_time);
++static int _csngen_adjust_local_time(CSNGen *gen);
+ 
+ /*
+  * **************************************************************************
+@@ -121,6 +125,7 @@ csngen_new(ReplicaId rid, Slapi_Attr *state)
+     _csngen_init_callbacks(gen);
+ 
+     gen->state.rid = rid;
++    gen->gettime = slapi_clock_utc_gettime;
+ 
+     if (state) {
+         rc = _csngen_parse_state(gen, state);
+@@ -164,10 +169,7 @@ csngen_free(CSNGen **gen)
+ int
+ csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify)
+ {
+-    struct timespec now = {0};
+     int rc = CSN_SUCCESS;
+-    time_t cur_time;
+-    int delta;
+ 
+     if (gen == NULL || csn == NULL) {
+         slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn", "Invalid argument\n");
+@@ -180,39 +182,13 @@ csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify)
+         return CSN_MEMORY_ERROR;
+     }
+ 
+-    if ((rc = slapi_clock_gettime(&now)) != 0) {
+-        /* Failed to get system time, we must abort */
+-        slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn",
+-                "Failed to get system time (%s)\n",
+-                slapd_system_strerror(rc));
+-        return CSN_TIME_ERROR;
+-    }
+-    cur_time = now.tv_sec;
+-
+     slapi_rwlock_wrlock(gen->lock);
+ 
+-    /* check if the time should be adjusted */
+-    delta = cur_time - gen->state.sampled_time;
+-    if (delta > _SEC_PER_DAY || delta < (-1 * _SEC_PER_DAY)) {
+-        /* We had a jump larger than a day */
+-        slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn",
+-                "Detected large jump in CSN time.  Delta: %d (current time: %ld  vs  previous time: %ld)\n",
+-                delta, cur_time, gen->state.sampled_time);
+-    }
+-    if (delta > 0) {
+-        rc = _csngen_adjust_local_time(gen, cur_time);
+-        if (rc != CSN_SUCCESS) {
+-            slapi_rwlock_unlock(gen->lock);
+-            return rc;
+-        }
++    rc = _csngen_adjust_local_time(gen);
++    if (rc != CSN_SUCCESS) {
++        slapi_rwlock_unlock(gen->lock);
++        return rc;
+     }
+-    /* if (delta < 0) this means the local system time was set back
+-     * the new csn will be generated based on sampled time, which is
+-     * ahead of system time and previously generated csns.
+-     * the time stamp of the csn will not change until system time
+-     * catches up or is corrected by remote csns.
+-     * But we need to ensure that the seq_num does not overflow.
+-     */
+ 
+     if (gen->state.seq_num == CSN_MAX_SEQNUM) {
+         slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn", "Sequence rollover; "
+@@ -261,13 +237,36 @@ csngen_rewrite_rid(CSNGen *gen, ReplicaId rid)
+ }
+ 
+ /* this function should be called when a remote CSN for the same part of
+-   the dit becomes known to the server (for instance, as part of RUV during
+-   replication session. In response, the generator would adjust its notion
+-   of time so that it does not generate smaller csns */
++ * the dit becomes known to the server (for instance, as part of RUV during
++ * replication session. In response, the generator would adjust its notion
++ * of time so that it does not generate smaller csns
++ *
++ * The following counters are updated
++ *   - when a new csn is generated
++ *   - when csngen is adjusted (beginning of a incoming (extop) or outgoing
++ *     (inc_protocol) session)
++ *
++ * sampled_time: It takes the value of current system time.
++ *
++ * remote offset: it is updated when 'csn' argument is ahead of the next csn
++ * that the csn generator will generate. It is the MAX jump ahead, it is not
++ * cumulative counter (e.g. if remote_offset=7 and 'csn' is 5sec ahead
++ * remote_offset stays the same. The jump ahead (5s) pour into the local offset.
++ * It is not clear of the interest of this counter. It gives an indication of
++ * the maximum jump ahead but not much.
++ *
++ * local offset: it is increased if
++ *   - system time is going backward (compare sampled_time)
++ *   - if 'csn' argument is ahead of csn that the csn generator would generate
++ *     AND diff('csn', csngen.new_csn) < remote_offset
++ *     then the diff "pour" into local_offset
++ *  It is decreased as the clock is ticking, local offset is "consumed" as
++ *  sampled_time progresses.
++ */
+ int
+ csngen_adjust_time(CSNGen *gen, const CSN *csn)
+ {
+-    time_t remote_time, remote_offset, cur_time;
++    time_t remote_time, remote_offset, cur_time, old_time, new_time;
+     PRUint16 remote_seqnum;
+     int rc;
+     extern int config_get_ignore_time_skew(void);
+@@ -281,6 +280,11 @@ csngen_adjust_time(CSNGen *gen, const CSN *csn)
+ 
+     slapi_rwlock_wrlock(gen->lock);
+ 
++    /* Get last local csn time */
++    old_time = CSN_CALC_TSTAMP(gen);
++    /* update local offset and sample_time */
++    rc = _csngen_adjust_local_time(gen);
++
+     if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
+         cur_time = CSN_CALC_TSTAMP(gen);
+         slapi_log_err(SLAPI_LOG_REPL, "csngen_adjust_time",
+@@ -290,79 +294,60 @@ csngen_adjust_time(CSNGen *gen, const CSN *csn)
+                       gen->state.local_offset,
+                       gen->state.remote_offset);
+     }
+-    /* make sure we have the current time */
+-    cur_time = slapi_current_utc_time();
+-
+-    /* make sure sampled_time is current */
+-    /* must only call adjust_local_time if the current time is greater than
+-       the generator state time */
+-    if ((cur_time > gen->state.sampled_time) &&
+-        (CSN_SUCCESS != (rc = _csngen_adjust_local_time(gen, cur_time)))) {
++    if (rc != CSN_SUCCESS) {
+         /* _csngen_adjust_local_time will log error */
+         slapi_rwlock_unlock(gen->lock);
+-        csngen_dump_state(gen);
++        csngen_dump_state(gen, SLAPI_LOG_DEBUG);
+         return rc;
+     }
+ 
+-    cur_time = CSN_CALC_TSTAMP(gen);
+-    if (remote_time >= cur_time) {
+-        time_t new_time = 0;
+-
+-        if (remote_seqnum > gen->state.seq_num) {
+-            if (remote_seqnum < CSN_MAX_SEQNUM) {
+-                gen->state.seq_num = remote_seqnum + 1;
+-            } else {
+-                remote_time++;
+-            }
+-        }
+-
+-        remote_offset = remote_time - cur_time;
+-        if (remote_offset > gen->state.remote_offset) {
+-            if (ignore_time_skew || (remote_offset <= CSN_MAX_TIME_ADJUST)) {
+-                gen->state.remote_offset = remote_offset;
+-            } else /* remote_offset > CSN_MAX_TIME_ADJUST */
+-            {
+-                slapi_log_err(SLAPI_LOG_ERR, "csngen_adjust_time",
+-                              "Adjustment limit exceeded; value - %ld, limit - %ld\n",
+-                              remote_offset, (long)CSN_MAX_TIME_ADJUST);
+-                slapi_rwlock_unlock(gen->lock);
+-                csngen_dump_state(gen);
+-                return CSN_LIMIT_EXCEEDED;
+-            }
+-        } else if (remote_offset > 0) { /* still need to account for this */
+-            gen->state.local_offset += remote_offset;
++    remote_offset = remote_time - CSN_CALC_TSTAMP(gen);
++    if (remote_offset > 0) {
++        if (!ignore_time_skew && (gen->state.remote_offset + remote_offset > CSN_MAX_TIME_ADJUST)) {
++            slapi_log_err(SLAPI_LOG_ERR, "csngen_adjust_time",
++                          "Adjustment limit exceeded; value - %ld, limit - %ld\n",
++                          remote_offset, (long)CSN_MAX_TIME_ADJUST);
++            slapi_rwlock_unlock(gen->lock);
++            csngen_dump_state(gen, SLAPI_LOG_DEBUG);
++            return CSN_LIMIT_EXCEEDED;
+         }
+-
+-        new_time = CSN_CALC_TSTAMP(gen);
+-        /* let's revisit the seq num - if the new time is > the old
+-           tiem, we should reset the seq number to remote + 1 if
+-           this won't cause a wrap around */
+-        if (new_time >= cur_time) {
+-            /* just set seq_num regardless of whether the current one
+-               is < or > than the remote one - the goal of this function
+-               is to make sure we generate CSNs > the remote CSN - if
+-               we have increased the time, we can decrease the seqnum
+-               and still guarantee that any new CSNs generated will be
+-               > any current CSNs we have generated */
+-            if (remote_seqnum < gen->state.seq_num) {
+-                gen->state.seq_num ++;
+-            } else {
+-                gen->state.seq_num = remote_seqnum + 1;
+-            }
++        gen->state.remote_offset += remote_offset;
++        /* To avoid beat phenomena between suppliers let put 1 second in local_offset
++         * it will be eaten at next clock tick rather than increasing remote offset
++         * If we do not do that we will have a time skew drift of 1 second per 2 seconds
++         * if suppliers are desynchronized by 0.5 second 
++         */
++        if (gen->state.local_offset == 0) {
++            gen->state.local_offset++;
++            gen->state.remote_offset--;
+         }
+-        if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
+-            slapi_log_err(SLAPI_LOG_REPL, "csngen_adjust_time",
+-                          "gen state after %08lx%04x:%ld:%ld:%ld\n",
+-                          new_time, gen->state.seq_num,
+-                          gen->state.sampled_time,
+-                          gen->state.local_offset,
+-                          gen->state.remote_offset);
++    }
++    /* Time to compute seqnum so that 
++     *   new csn >= remote csn and new csn >= old local csn 
++     */
++    new_time = CSN_CALC_TSTAMP(gen);
++    PR_ASSERT(new_time >= old_time);
++    PR_ASSERT(new_time >= remote_time);
++    if (new_time > old_time) {
++        /* Can reset (local) seqnum */
++        gen->state.seq_num = 0;
++    }
++    if (new_time == remote_time && remote_seqnum >= gen->state.seq_num) {
++        if (remote_seqnum >= CSN_MAX_SEQNUM) {
++            gen->state.seq_num = 0;
++            gen->state.local_offset++;
++        } else {
++            gen->state.seq_num = remote_seqnum + 1;
+         }
+-    } else if (gen->state.remote_offset > 0) {
+-        /* decrease remote offset? */
+-        /* how to decrease remote offset but ensure that we don't
+-           generate a duplicate CSN, or a CSN smaller than one we've already
+-           generated? */
++    }
++
++    if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
++        slapi_log_err(SLAPI_LOG_REPL, "csngen_adjust_time",
++                      "gen state after %08lx%04x:%ld:%ld:%ld\n",
++                      new_time, gen->state.seq_num,
++                      gen->state.sampled_time,
++                      gen->state.local_offset,
++                      gen->state.remote_offset);
+     }
+ 
+     slapi_rwlock_unlock(gen->lock);
+@@ -435,16 +420,16 @@ csngen_unregister_callbacks(CSNGen *gen, void *cookie)
+ 
+ /* debugging function */
+ void
+-csngen_dump_state(const CSNGen *gen)
++csngen_dump_state(const CSNGen *gen, int severity)
+ {
+     if (gen) {
+         slapi_rwlock_rdlock(gen->lock);
+-        slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "CSN generator's state:\n");
+-        slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\treplica id: %d\n", gen->state.rid);
+-        slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tsampled time: %ld\n", gen->state.sampled_time);
+-        slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tlocal offset: %ld\n", gen->state.local_offset);
+-        slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tremote offset: %ld\n", gen->state.remote_offset);
+-        slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tsequence number: %d\n", gen->state.seq_num);
++        slapi_log_err(severity, "csngen_dump_state", "CSN generator's state:\n");
++        slapi_log_err(severity, "csngen_dump_state", "\treplica id: %d\n", gen->state.rid);
++        slapi_log_err(severity, "csngen_dump_state", "\tsampled time: %ld\n", gen->state.sampled_time);
++        slapi_log_err(severity, "csngen_dump_state", "\tlocal offset: %ld\n", gen->state.local_offset);
++        slapi_log_err(severity, "csngen_dump_state", "\tremote offset: %ld\n", gen->state.remote_offset);
++        slapi_log_err(severity, "csngen_dump_state", "\tsequence number: %d\n", gen->state.seq_num);
+         slapi_rwlock_unlock(gen->lock);
+     }
+ }
+@@ -459,7 +444,7 @@ csngen_test()
+     CSNGen *gen = csngen_new(255, NULL);
+ 
+     slapi_log_err(SLAPI_LOG_DEBUG, "csngen_test", "staring csn generator test ...");
+-    csngen_dump_state(gen);
++    csngen_dump_state(gen, SLAPI_LOG_INFO);
+ 
+     rc = _csngen_start_test_threads(gen);
+     if (rc == 0) {
+@@ -469,7 +454,7 @@ csngen_test()
+     }
+ 
+     _csngen_stop_test_threads();
+-    csngen_dump_state(gen);
++    csngen_dump_state(gen, SLAPI_LOG_INFO);
+     slapi_log_err(SLAPI_LOG_DEBUG, "csngen_test", "csn generator test is complete...");
+ }
+ 
+@@ -574,94 +559,93 @@ _csngen_cmp_callbacks(const void *el1, const void *el2)
+         return 1;
+ }
+ 
++/* Get time and adjust local offset */
+ static int
+-_csngen_adjust_local_time(CSNGen *gen, time_t cur_time)
++_csngen_adjust_local_time(CSNGen *gen)
+ {
+     extern int config_get_ignore_time_skew(void);
+     int ignore_time_skew = config_get_ignore_time_skew();
+-    time_t time_diff = cur_time - gen->state.sampled_time;
++    struct timespec now = {0};
++    time_t time_diff;
++    time_t cur_time;
++    int rc;
+ 
++    
++    if ((rc = gen->gettime(&now)) != 0) {
++        /* Failed to get system time, we must abort */
++        slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn",
++                "Failed to get system time (%s)\n",
++                slapd_system_strerror(rc));
++        return CSN_TIME_ERROR;
++    }
++    cur_time = now.tv_sec;
++    time_diff = cur_time - gen->state.sampled_time;
++
++    /* check if the time should be adjusted */
+     if (time_diff == 0) {
+         /* This is a no op - _csngen_adjust_local_time should never be called
+            in this case, because there is nothing to adjust - but just return
+            here to protect ourselves
+         */
+         return CSN_SUCCESS;
+-    } else if (time_diff > 0) {
+-        time_t ts_before = CSN_CALC_TSTAMP(gen);
+-        time_t ts_after = 0;
+-        if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
+-            time_t new_time = CSN_CALC_TSTAMP(gen);
+-            slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
+-                          "gen state before %08lx%04x:%ld:%ld:%ld\n",
+-                          new_time, gen->state.seq_num,
+-                          gen->state.sampled_time,
+-                          gen->state.local_offset,
+-                          gen->state.remote_offset);
+-        }
+-
+-        gen->state.sampled_time = cur_time;
+-        if (time_diff > gen->state.local_offset)
+-            gen->state.local_offset = 0;
+-        else
+-            gen->state.local_offset = gen->state.local_offset - time_diff;
+-
+-        /* only reset the seq_num if the new timestamp part of the CSN
+-           is going to be greater than the old one - if they are the
+-           same after the above adjustment (which can happen if
+-           csngen_adjust_time has to store the offset in the
+-           local_offset field) we must not allow the CSN to regress or
+-           generate duplicate numbers */
+-        ts_after = CSN_CALC_TSTAMP(gen);
+-        if (ts_after > ts_before) {
+-            gen->state.seq_num = 0; /* only reset if new time > old time */
+-        }
+-
+-        if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
+-            time_t new_time = CSN_CALC_TSTAMP(gen);
+-            slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
+-                          "gen state after %08lx%04x:%ld:%ld:%ld\n",
+-                          new_time, gen->state.seq_num,
+-                          gen->state.sampled_time,
+-                          gen->state.local_offset,
+-                          gen->state.remote_offset);
+-        }
+-        return CSN_SUCCESS;
+-    } else /* time was turned back */
+-    {
+-        if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
+-            time_t new_time = CSN_CALC_TSTAMP(gen);
+-            slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
+-                          "gen state back before %08lx%04x:%ld:%ld:%ld\n",
+-                          new_time, gen->state.seq_num,
+-                          gen->state.sampled_time,
+-                          gen->state.local_offset,
+-                          gen->state.remote_offset);
+-        }
++    }
++    if (labs(time_diff) > TIME_DIFF_WARNING_DELAY) {
++        /* We had a jump larger than a day */
++        slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn",
++                "Detected large jump in CSN time.  Delta: %ld (current time: %ld  vs  previous time: %ld)\n",
++                time_diff, cur_time, gen->state.sampled_time);
++    }
++    if (!ignore_time_skew && (gen->state.local_offset - time_diff > CSN_MAX_TIME_ADJUST)) {
++        slapi_log_err(SLAPI_LOG_ERR, "_csngen_adjust_local_time",
++                      "Adjustment limit exceeded; value - %ld, limit - %d\n",
++                      gen->state.local_offset - time_diff, CSN_MAX_TIME_ADJUST);
++        return CSN_LIMIT_EXCEEDED;
++    }
+ 
+-        if (!ignore_time_skew && (labs(time_diff) > CSN_MAX_TIME_ADJUST)) {
+-            slapi_log_err(SLAPI_LOG_ERR, "_csngen_adjust_local_time",
+-                          "Adjustment limit exceeded; value - %ld, limit - %d\n",
+-                          labs(time_diff), CSN_MAX_TIME_ADJUST);
+-            return CSN_LIMIT_EXCEEDED;
+-        }
++    time_t ts_before = CSN_CALC_TSTAMP(gen);
++    time_t ts_after = 0;
++    if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
++        time_t new_time = CSN_CALC_TSTAMP(gen);
++        slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
++                      "gen state before %08lx%04x:%ld:%ld:%ld\n",
++                      new_time, gen->state.seq_num,
++                      gen->state.sampled_time,
++                      gen->state.local_offset,
++                      gen->state.remote_offset);
++    }
+ 
+-        gen->state.sampled_time = cur_time;
+-        gen->state.local_offset = MAX_VAL(gen->state.local_offset, labs(time_diff));
+-        gen->state.seq_num = 0;
++    gen->state.sampled_time = cur_time;
++    gen->state.local_offset = MAX_VAL(0, gen->state.local_offset - time_diff);
++    /* new local_offset = MAX_VAL(0, old sample_time + old local_offset - cur_time)
++     * ==> new local_offset >= 0 and 
++     *     new local_offset + cur_time >= old sample_time + old local_offset
++     * ==> new local_offset + cur_time + remote_offset >=
++     *            sample_time + old local_offset + remote_offset
++     * ==> CSN_CALC_TSTAMP(new gen) >= CSN_CALC_TSTAMP(old gen)
++     */
+ 
+-        if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
+-            time_t new_time = CSN_CALC_TSTAMP(gen);
+-            slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
+-                          "gen state back after %08lx%04x:%ld:%ld:%ld\n",
+-                          new_time, gen->state.seq_num,
+-                          gen->state.sampled_time,
+-                          gen->state.local_offset,
+-                          gen->state.remote_offset);
+-        }
++    /* only reset the seq_num if the new timestamp part of the CSN
++       is going to be greater than the old one - if they are the
++       same after the above adjustment (which can happen if
++       csngen_adjust_time has to store the offset in the
++       local_offset field) we must not allow the CSN to regress or
++       generate duplicate numbers */
++    ts_after = CSN_CALC_TSTAMP(gen);
++    PR_ASSERT(ts_after >= ts_before);
++    if (ts_after > ts_before) {
++        gen->state.seq_num = 0; /* only reset if new time > old time */
++    }
+ 
+-        return CSN_SUCCESS;
++    if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
++        time_t new_time = CSN_CALC_TSTAMP(gen);
++        slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
++                      "gen state after %08lx%04x:%ld:%ld:%ld\n",
++                      new_time, gen->state.seq_num,
++                      gen->state.sampled_time,
++                      gen->state.local_offset,
++                      gen->state.remote_offset);
+     }
++    return CSN_SUCCESS;
+ }
+ 
+ /*
+@@ -799,7 +783,7 @@ _csngen_remote_tester_main(void *data)
+                               "Failed to adjust generator's time; csn error - %d\n", rc);
+             }
+ 
+-            csngen_dump_state(gen);
++            csngen_dump_state(gen, SLAPI_LOG_INFO);
+         }
+         csn_free(&csn);
+ 
+@@ -825,8 +809,83 @@ _csngen_local_tester_main(void *data)
+         /*
+          * g_sampled_time -= slapi_rand () % 100;
+          */
+-        csngen_dump_state(gen);
++        csngen_dump_state(gen, SLAPI_LOG_INFO);
+     }
+ 
+     PR_AtomicDecrement(&s_thread_count);
+ }
++
++int _csngen_tester_state;
++int _csngen_tester_state_rid;
++
++static int
++_mynoise(int time, int len, double height)
++{
++   if (((time/len) % 2) == 0) {
++        return -height + 2 * height * ( time % len ) / (len-1);
++   } else {
++        return height - 2 * height * ( time % len ) / (len-1);
++   }
++}
++
++
++int32_t _csngen_tester_gettime(struct timespec *tp)
++{
++    int vtime = _csngen_tester_state ;
++    tp->tv_sec = 0x1000000 + vtime + 2 * _csngen_tester_state_rid;
++    if (_csngen_tester_state_rid == 3) {
++        /* tp->tv_sec += _mynoise(vtime, 10, 1.5); */
++        tp->tv_sec += _mynoise(vtime, 30, 15);
++    }
++    return 0;
++}
++
++/* Mimic a fully meshed multi suplier topology */
++void csngen_multi_suppliers_test(void)
++{
++#define NB_TEST_MASTERS	6
++#define NB_TEST_STATES	500
++    CSNGen *gen[NB_TEST_MASTERS];
++    struct timespec now = {0};
++    CSN *last_csn = NULL;
++    CSN *csn = NULL;
++    int i,j,rc;
++
++    _csngen_tester_gettime(&now);
++
++    for (i=0; i< NB_TEST_MASTERS; i++) {
++        gen[i] = csngen_new(i+1, NULL);
++        gen[i]->gettime = _csngen_tester_gettime;
++        gen[i]->state.sampled_time = now.tv_sec;
++    }
++
++    for (_csngen_tester_state=0; _csngen_tester_state < NB_TEST_STATES; _csngen_tester_state++) {
++        for (i=0; i< NB_TEST_MASTERS; i++) {
++            _csngen_tester_state_rid = i+1;
++            rc = csngen_new_csn(gen[i], &csn, PR_FALSE);
++            if (rc) {
++                continue;
++            }
++            csngen_dump_state(gen[i], SLAPI_LOG_INFO);
++
++            if (csn_compare(csn, last_csn) <= 0) {
++                slapi_log_err(SLAPI_LOG_ERR, "csngen_multi_suppliers_test",
++                              "CSN generated in disorder state=%d rid=%d\n", _csngen_tester_state, _csngen_tester_state_rid);
++                _csngen_tester_state = NB_TEST_STATES;
++                break;
++            }
++            last_csn = csn;
++
++            for (j=0; j< NB_TEST_MASTERS; j++) {
++                if (i==j) {
++                    continue;
++                }
++                _csngen_tester_state_rid = j+1;
++                rc = csngen_adjust_time(gen[j], csn);
++                if (rc) {
++                    continue;
++                }
++            }
++        }
++    }
++}
+diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
+index 56765fdfb..59c5ec9ab 100644
+--- a/ldap/servers/slapd/slapi-plugin.h
++++ b/ldap/servers/slapd/slapi-plugin.h
+@@ -6762,8 +6762,17 @@ time_t slapi_current_time(void) __attribute__((deprecated));
+  *
+  * \param tp - a timespec struct where the system time is set
+  * \return result code, upon success tp is set to the system time
++ * as a clock in UTC timezone. This clock adjusts with ntp steps,
++ * and should NOT be used for timer information.
+  */
+ int32_t slapi_clock_gettime(struct timespec *tp);
++/* 
++ * slapi_clock_gettime should have better been called
++ * slapi_clock_utc_gettime but sice the function pre-existed
++ * we are just adding an alias (to avoid risking to break
++ * some custom plugins)
++ */
++#define slapi_clock_utc_gettime slapi_clock_gettime
+ 
+ /**
+  * Returns the current system time as a hr clock relative to uptime
+-- 
+2.31.1
+
diff --git a/SOURCES/0003-Issue-3584-Fix-PBKDF2_SHA256-hashing-in-FIPS-mode-49.patch b/SOURCES/0003-Issue-3584-Fix-PBKDF2_SHA256-hashing-in-FIPS-mode-49.patch
new file mode 100644
index 0000000..59c4435
--- /dev/null
+++ b/SOURCES/0003-Issue-3584-Fix-PBKDF2_SHA256-hashing-in-FIPS-mode-49.patch
@@ -0,0 +1,240 @@
+From 957ffd53b041c19d27753a028e6f514dcc75dfbd Mon Sep 17 00:00:00 2001
+From: Simon Pichugin <spichugi@redhat.com>
+Date: Tue, 26 Oct 2021 15:51:24 -0700
+Subject: [PATCH 03/12] Issue 3584 - Fix PBKDF2_SHA256 hashing in FIPS mode
+ (#4949)
+
+Issue Description: Use PK11_Decrypt function to get hash data
+because PK11_ExtractKeyValue function is forbidden in FIPS mode.
+We can't extract keys while in FIPS mode. But we use PK11_ExtractKeyValue
+for hashes, and it's not forbidden.
+
+We can't use OpenSSL's PBKDF2-SHA256 implementation right now because
+we need to support an upgrade procedure while in FIPS mode (update
+hash on bind). For that, we should fix existing PBKDF2 usage, and we can
+switch to OpenSSL's PBKDF2-SHA256 in the following versions.
+
+Fix Description: Use PK11_Decrypt function to get the data.
+
+Enable TLS on all CI test topologies while in FIPS because without
+that we don't set up the NSS database correctly.
+
+Add PBKDF2-SHA256 (OpenSSL) to ldif templates, so the password scheme is
+discoverable by internal functions.
+
+https://github.com/389ds/389-ds-base/issues/3584
+
+Reviewed by: @progier389, @mreynolds389, @Firstyear, @tbordaz (Thanks!!)
+---
+ .../healthcheck/health_security_test.py       | 10 ---
+ ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c  | 62 ++++++++++++++++---
+ ldap/servers/slapd/main.c                     | 12 ++++
+ src/lib389/lib389/__init__.py                 |  4 ++
+ src/lib389/lib389/topologies.py               |  6 +-
+ src/lib389/lib389/utils.py                    | 13 ++++
+ 6 files changed, 86 insertions(+), 21 deletions(-)
+
+diff --git a/dirsrvtests/tests/suites/healthcheck/health_security_test.py b/dirsrvtests/tests/suites/healthcheck/health_security_test.py
+index 6c0d27aaa..c1dc7938c 100644
+--- a/dirsrvtests/tests/suites/healthcheck/health_security_test.py
++++ b/dirsrvtests/tests/suites/healthcheck/health_security_test.py
+@@ -40,16 +40,6 @@ else:
+ log = logging.getLogger(__name__)
+ 
+ 
+-def is_fips():
+-    if os.path.exists('/proc/sys/crypto/fips_enabled'):
+-        with open('/proc/sys/crypto/fips_enabled', 'r') as f:
+-            state = f.readline().strip()
+-            if state == '1':
+-                return True
+-            else:
+-                return False
+-
+-
+ def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searched_code2=None):
+     args = FakeArgs()
+     args.instance = instance.serverid
+diff --git a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
+index d310dc792..dcac4fcdd 100644
+--- a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
++++ b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
+@@ -91,10 +91,11 @@ pbkdf2_sha256_extract(char *hash_in, SECItem *salt, uint32_t *iterations)
+ SECStatus
+ pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *salt, uint32_t iterations)
+ {
+-    SECItem *result = NULL;
+     SECAlgorithmID *algid = NULL;
+     PK11SlotInfo *slot = NULL;
+     PK11SymKey *symkey = NULL;
++    SECItem *wrapKeyData = NULL;
++    SECStatus rv = SECFailure;
+ 
+     /* We assume that NSS is already started. */
+     algid = PK11_CreatePBEV2AlgorithmID(SEC_OID_PKCS5_PBKDF2, SEC_OID_HMAC_SHA256, SEC_OID_HMAC_SHA256, hash_out_len, iterations, salt);
+@@ -104,7 +105,6 @@ pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *s
+         slot = PK11_GetBestSlotMultiple(mechanism_array, 2, NULL);
+         if (slot != NULL) {
+             symkey = PK11_PBEKeyGen(slot, algid, pwd, PR_FALSE, NULL);
+-            PK11_FreeSlot(slot);
+             if (symkey == NULL) {
+                 /* We try to get the Error here but NSS has two or more error interfaces, and sometimes it uses none of them. */
+                 int32_t status = PORT_GetError();
+@@ -123,18 +123,60 @@ pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *s
+         return SECFailure;
+     }
+ 
+-    if (PK11_ExtractKeyValue(symkey) == SECSuccess) {
+-        result = PK11_GetKeyData(symkey);
+-        if (result != NULL && result->len <= hash_out_len) {
+-            memcpy(hash_out, result->data, result->len);
+-            PK11_FreeSymKey(symkey);
++    /*
++     * First, we need to generate a wrapped key for PK11_Decrypt call:
++     * slot is the same slot we used in PK11_PBEKeyGen()
++     * 256 bits / 8 bit per byte
++     */
++    PK11SymKey *wrapKey = PK11_KeyGen(slot, CKM_AES_ECB, NULL, 256/8, NULL);
++    PK11_FreeSlot(slot);
++    if (wrapKey == NULL) {
++        slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to generate a wrapped key.\n");
++        return SECFailure;
++	}
++
++    wrapKeyData = (SECItem *)PORT_Alloc(sizeof(SECItem));
++    /* Align the wrapped key with 32 bytes. */
++    wrapKeyData->len = (PK11_GetKeyLength(symkey) + 31) & ~31;
++    /* Allocate the aligned space for pkc5PBE key plus AESKey block */
++    wrapKeyData->data = (unsigned char *)slapi_ch_calloc(wrapKeyData->len, sizeof(unsigned char));
++
++    /* Get symkey wrapped with wrapKey - required for PK11_Decrypt call */
++    rv = PK11_WrapSymKey(CKM_AES_ECB, NULL, wrapKey, symkey, wrapKeyData);
++    if (rv != SECSuccess) {
++        PK11_FreeSymKey(symkey);
++        PK11_FreeSymKey(wrapKey);
++        SECITEM_FreeItem(wrapKeyData, PR_TRUE);
++        slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to wrap the symkey. (%d)\n", rv);
++        return SECFailure;
++    }
++
++    /* Allocate the space for our result */
++    void *result = (char *)slapi_ch_calloc(wrapKeyData->len, sizeof(char));
++    unsigned int result_len = 0;
++
++    /* User wrapKey to decrypt the wrapped contents.
++     * result is the hash that we need;
++     * result_len is the actual lengh of the data;
++     * has_out_len is the maximum (the space we allocted for hash_out)
++     */
++    rv = PK11_Decrypt(wrapKey, CKM_AES_ECB, NULL, result, &result_len, hash_out_len, wrapKeyData->data, wrapKeyData->len);
++    PK11_FreeSymKey(symkey);
++    PK11_FreeSymKey(wrapKey);
++    SECITEM_FreeItem(wrapKeyData, PR_TRUE);
++
++    if (rv == SECSuccess) {
++        if (result != NULL && result_len <= hash_out_len) {
++            memcpy(hash_out, result, result_len);
++            slapi_ch_free((void **)&result);
+         } else {
+-            PK11_FreeSymKey(symkey);
+-            slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to retrieve (get) hash output.\n");
++            slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to retrieve (get) hash output.\n");
++            slapi_ch_free((void **)&result);
+             return SECFailure;
+         }
+     } else {
+-        slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to extract hash output.\n");
++        slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to extract hash output. (%d)\n", rv);
++        slapi_ch_free((void **)&result);
+         return SECFailure;
+     }
+ 
+diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
+index 61ed40b7d..04d0494f8 100644
+--- a/ldap/servers/slapd/main.c
++++ b/ldap/servers/slapd/main.c
+@@ -2895,9 +2895,21 @@ slapd_do_all_nss_ssl_init(int slapd_exemode, int importexport_encrypt, int s_por
+      * is enabled or not. We use NSS for random number generation and
+      * other things even if we are not going to accept SSL connections.
+      * We also need NSS for attribute encryption/decryption on import and export.
++     *
++     * It's important to remember that while in FIPS mode the administrator should always enable
++     * the security, otherwise we don't call slapd_pk11_authenticate which is a requirement for FIPS mode
+      */
++    PRBool isFIPS = slapd_pk11_isFIPS();
+     int init_ssl = config_get_security();
+ 
++    if (isFIPS && !init_ssl) {
++        slapi_log_err(SLAPI_LOG_WARNING, "slapd_do_all_nss_ssl_init",
++                      "ERROR: TLS is not enabled, and the machine is in FIPS mode. "
++                      "Some functionality won't work correctly (for example, "
++                      "users with PBKDF2_SHA256 password scheme won't be able to log in). "
++                      "It's highly advisable to enable TLS on this instance.\n");
++    }
++
+     if (slapd_exemode == SLAPD_EXEMODE_SLAPD) {
+         init_ssl = init_ssl && (0 != s_port) && (s_port <= LDAP_PORT_MAX);
+     } else {
+diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
+index 29ee5245a..e0299c5b4 100644
+--- a/src/lib389/lib389/__init__.py
++++ b/src/lib389/lib389/__init__.py
+@@ -1588,6 +1588,10 @@ class DirSrv(SimpleLDAPObject, object):
+         :param post_open: Open the server connection after restart.
+         :type post_open: bool
+         """
++        if self.config.get_attr_val_utf8_l("nsslapd-security") == 'on':
++            self.restart(post_open=post_open)
++            return
++
+         # If it doesn't exist, create a cadb.
+         ssca = NssSsl(dbpath=self.get_ssca_dir())
+         if not ssca._db_exists():
+diff --git a/src/lib389/lib389/topologies.py b/src/lib389/lib389/topologies.py
+index e9969f524..e7d56582d 100644
+--- a/src/lib389/lib389/topologies.py
++++ b/src/lib389/lib389/topologies.py
+@@ -15,7 +15,7 @@ import socket
+ import pytest
+ 
+ from lib389 import DirSrv
+-from lib389.utils import generate_ds_params
++from lib389.utils import generate_ds_params, is_fips
+ from lib389.mit_krb5 import MitKrb5
+ from lib389.saslmap import SaslMappings
+ from lib389.replica import ReplicationManager, Replicas
+@@ -108,6 +108,10 @@ def _create_instances(topo_dict, suffix):
+             if role == ReplicaRole.HUB:
+                 hs[instance.serverid] = instance
+                 instances.update(hs)
++            # We should always enable TLS while in FIPS mode because otherwise NSS database won't be
++            # configured in a FIPS compliant way
++            if is_fips():
++                instance.enable_tls()
+             log.info("Instance with parameters {} was created.".format(args_instance))
+ 
+     if "standalone1" in instances and len(instances) == 1:
+diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py
+index b270784ce..5ba0c6676 100644
+--- a/src/lib389/lib389/utils.py
++++ b/src/lib389/lib389/utils.py
+@@ -1430,3 +1430,16 @@ def is_valid_hostname(hostname):
+         hostname = hostname[:-1] # strip exactly one dot from the right, if present
+     allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
+     return all(allowed.match(x) for x in hostname.split("."))
++
++
++def is_fips():
++    if os.path.exists('/proc/sys/crypto/fips_enabled'):
++        with open('/proc/sys/crypto/fips_enabled', 'r') as f:
++            state = f.readline().strip()
++            if state == '1':
++                return True
++            else:
++                return False
++    else:
++        return False
++
+-- 
+2.31.1
+
diff --git a/SOURCES/0003-Ticket-137-Implement-EntryUUID-plugin.patch b/SOURCES/0003-Ticket-137-Implement-EntryUUID-plugin.patch
deleted file mode 100644
index 67ccf0c..0000000
--- a/SOURCES/0003-Ticket-137-Implement-EntryUUID-plugin.patch
+++ /dev/null
@@ -1,5307 +0,0 @@
-From eff14f0c884f3d2f541e3be6d9df86087177a76d Mon Sep 17 00:00:00 2001
-From: William Brown <william@blackhats.net.au>
-Date: Mon, 16 Mar 2020 14:59:56 +1000
-Subject: [PATCH 03/12] Ticket 137 - Implement EntryUUID plugin
-
-Bug Description: This implements EntryUUID - A plugin that generates
-uuid's on attributes, which can be used by external applications to
-associate an entry uniquely.
-
-Fix Description: This change is quite large as it contains multiple parts:
-
-* Schema for entryuuid.
-    ldap/schema/02common.ldif
-    ldap/schema/03entryuuid.ldif
-* Documentation of the plugin design
-    src/README.md
-* A rust plugin api.
-    src/slapi_r_plugin/Cargo.toml
-    src/slapi_r_plugin/README.md
-    src/slapi_r_plugin/build.rs
-    src/slapi_r_plugin/src/backend.rs
-    src/slapi_r_plugin/src/ber.rs
-    src/slapi_r_plugin/src/constants.rs
-    src/slapi_r_plugin/src/dn.rs
-    src/slapi_r_plugin/src/entry.rs
-    src/slapi_r_plugin/src/error.rs
-    src/slapi_r_plugin/src/init.c
-    src/slapi_r_plugin/src/lib.rs
-    src/slapi_r_plugin/src/log.rs
-    src/slapi_r_plugin/src/macros.rs
-    src/slapi_r_plugin/src/pblock.rs
-    src/slapi_r_plugin/src/plugin.rs
-    src/slapi_r_plugin/src/search.rs
-    src/slapi_r_plugin/src/syntax_plugin.rs
-    src/slapi_r_plugin/src/task.rs
-    src/slapi_r_plugin/src/value.rs
-* An entry uuid syntax plugin, that has functional indexing
-    src/plugins/entryuuid_syntax/Cargo.toml
-    src/plugins/entryuuid_syntax/src/lib.rs
-* A entry uuid plugin that generates entryuuid's and has a fixup task.
-    src/plugins/entryuuid/Cargo.toml
-    src/plugins/entryuuid/src/lib.rs
-* Supporting changes in the server core to enable and provide apis for the plugins.
-    ldap/servers/slapd/config.c
-    ldap/servers/slapd/entry.c
-    ldap/servers/slapd/fedse.c
-* A test suite for for the entryuuid plugin
-    dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif
-    dirsrvtests/tests/suites/entryuuid/basic_test.py
-* Supporting changes in lib389
-    src/lib389/lib389/_constants.py
-    src/lib389/lib389/backend.py
-    src/lib389/lib389/instance/setup.py
-    src/lib389/lib389/plugins.py
-    src/lib389/lib389/tasks.py
-* Changes to support building the plugins
-    Makefile.am
-    configure.ac
-* Execution of cargo fmt on the tree, causing some clean up of files.
-    src/Cargo.lock
-    src/Cargo.toml
-    src/librnsslapd/build.rs
-    src/librnsslapd/src/lib.rs
-    src/librslapd/Cargo.toml
-    src/librslapd/build.rs
-    src/librslapd/src/lib.rs
-    src/libsds/sds/lib.rs
-    src/libsds/sds/tqueue.rs
-    src/slapd/src/error.rs
-    src/slapd/src/fernet.rs
-    src/slapd/src/lib.rs
-
-https://pagure.io/389-ds-base/issue/137
-
-Author: William Brown <william@blackhats.net.au>
-
-Review by: mreynolds, lkrispenz (Thanks)
----
- Makefile.am                                   |  96 +-
- ...ocalhost-userRoot-2020_03_30_13_14_47.ldif | 233 +++++
- .../tests/suites/entryuuid/basic_test.py      | 226 +++++
- ldap/schema/02common.ldif                     |   1 +
- ldap/schema/03entryuuid.ldif                  |  16 +
- ldap/servers/slapd/config.c                   |  17 +
- ldap/servers/slapd/entry.c                    |  12 +
- ldap/servers/slapd/fedse.c                    |  28 +
- src/Cargo.lock                                | 241 +++--
- src/Cargo.toml                                |  11 +-
- src/README.md                                 |   0
- src/lib389/lib389/_constants.py               |   1 +
- src/lib389/lib389/backend.py                  |   2 +-
- src/lib389/lib389/instance/setup.py           |  14 +
- src/lib389/lib389/plugins.py                  |  30 +
- src/lib389/lib389/tasks.py                    |  14 +
- src/librnsslapd/build.rs                      |  19 +-
- src/librnsslapd/src/lib.rs                    |  16 +-
- src/librslapd/Cargo.toml                      |   4 -
- src/librslapd/build.rs                        |  19 +-
- src/librslapd/src/lib.rs                      |  11 +-
- src/libsds/sds/lib.rs                         |   2 -
- src/libsds/sds/tqueue.rs                      |  23 +-
- src/plugins/entryuuid/Cargo.toml              |  21 +
- src/plugins/entryuuid/src/lib.rs              | 196 ++++
- src/plugins/entryuuid_syntax/Cargo.toml       |  21 +
- src/plugins/entryuuid_syntax/src/lib.rs       | 145 +++
- src/slapd/src/error.rs                        |   2 -
- src/slapd/src/fernet.rs                       |  31 +-
- src/slapd/src/lib.rs                          |   3 -
- src/slapi_r_plugin/Cargo.toml                 |  19 +
- src/slapi_r_plugin/README.md                  | 216 +++++
- src/slapi_r_plugin/build.rs                   |   8 +
- src/slapi_r_plugin/src/backend.rs             |  71 ++
- src/slapi_r_plugin/src/ber.rs                 |  90 ++
- src/slapi_r_plugin/src/constants.rs           | 203 +++++
- src/slapi_r_plugin/src/dn.rs                  | 108 +++
- src/slapi_r_plugin/src/entry.rs               |  92 ++
- src/slapi_r_plugin/src/error.rs               |  61 ++
- src/slapi_r_plugin/src/init.c                 |   8 +
- src/slapi_r_plugin/src/lib.rs                 |  36 +
- src/slapi_r_plugin/src/log.rs                 |  87 ++
- src/slapi_r_plugin/src/macros.rs              | 835 ++++++++++++++++++
- src/slapi_r_plugin/src/pblock.rs              | 275 ++++++
- src/slapi_r_plugin/src/plugin.rs              | 117 +++
- src/slapi_r_plugin/src/search.rs              | 127 +++
- src/slapi_r_plugin/src/syntax_plugin.rs       | 169 ++++
- src/slapi_r_plugin/src/task.rs                | 148 ++++
- src/slapi_r_plugin/src/value.rs               | 235 +++++
- 49 files changed, 4213 insertions(+), 147 deletions(-)
- create mode 100644 dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif
- create mode 100644 dirsrvtests/tests/suites/entryuuid/basic_test.py
- create mode 100644 ldap/schema/03entryuuid.ldif
- create mode 100644 src/README.md
- create mode 100644 src/plugins/entryuuid/Cargo.toml
- create mode 100644 src/plugins/entryuuid/src/lib.rs
- create mode 100644 src/plugins/entryuuid_syntax/Cargo.toml
- create mode 100644 src/plugins/entryuuid_syntax/src/lib.rs
- create mode 100644 src/slapi_r_plugin/Cargo.toml
- create mode 100644 src/slapi_r_plugin/README.md
- create mode 100644 src/slapi_r_plugin/build.rs
- create mode 100644 src/slapi_r_plugin/src/backend.rs
- create mode 100644 src/slapi_r_plugin/src/ber.rs
- create mode 100644 src/slapi_r_plugin/src/constants.rs
- create mode 100644 src/slapi_r_plugin/src/dn.rs
- create mode 100644 src/slapi_r_plugin/src/entry.rs
- create mode 100644 src/slapi_r_plugin/src/error.rs
- create mode 100644 src/slapi_r_plugin/src/init.c
- create mode 100644 src/slapi_r_plugin/src/lib.rs
- create mode 100644 src/slapi_r_plugin/src/log.rs
- create mode 100644 src/slapi_r_plugin/src/macros.rs
- create mode 100644 src/slapi_r_plugin/src/pblock.rs
- create mode 100644 src/slapi_r_plugin/src/plugin.rs
- create mode 100644 src/slapi_r_plugin/src/search.rs
- create mode 100644 src/slapi_r_plugin/src/syntax_plugin.rs
- create mode 100644 src/slapi_r_plugin/src/task.rs
- create mode 100644 src/slapi_r_plugin/src/value.rs
-
-diff --git a/Makefile.am b/Makefile.am
-index 668a095da..627953850 100644
---- a/Makefile.am
-+++ b/Makefile.am
-@@ -38,6 +38,7 @@ if RUST_ENABLE
- RUST_ON = 1
- CARGO_FLAGS = @cargo_defs@
- RUSTC_FLAGS = @asan_rust_defs@ @msan_rust_defs@ @tsan_rust_defs@ @debug_rust_defs@
-+# -L@abs_top_builddir@/rs/@rust_target_dir@
- RUST_LDFLAGS = -ldl -lpthread -lgcc_s -lc -lm -lrt -lutil
- RUST_DEFINES = -DRUST_ENABLE
- if RUST_ENABLE_OFFLINE
-@@ -298,7 +299,7 @@ clean-local:
- 	-rm -rf $(abs_top_builddir)/html
- 	-rm -rf $(abs_top_builddir)/man/man3
- if RUST_ENABLE
--	CARGO_TARGET_DIR=$(abs_top_builddir)/rs cargo clean --manifest-path=$(srcdir)/src/libsds/Cargo.toml
-+	CARGO_TARGET_DIR=$(abs_top_builddir)/rs cargo clean --manifest-path=$(srcdir)/src/Cargo.toml
- endif
- 
- dberrstrs.h: Makefile
-@@ -416,6 +417,11 @@ serverplugin_LTLIBRARIES = libacl-plugin.la \
- 	$(LIBPAM_PASSTHRU_PLUGIN) $(LIBDNA_PLUGIN) \
- 	$(LIBBITWISE_PLUGIN) $(LIBPRESENCE_PLUGIN) $(LIBPOSIX_WINSYNC_PLUGIN)
- 
-+if RUST_ENABLE
-+serverplugin_LTLIBRARIES += libentryuuid-plugin.la libentryuuid-syntax-plugin.la
-+endif
-+
-+
- noinst_LIBRARIES = libavl.a
- 
- dist_noinst_HEADERS = \
-@@ -757,6 +763,10 @@ systemschema_DATA = $(srcdir)/ldap/schema/00core.ldif \
- 	$(srcdir)/ldap/schema/60nss-ldap.ldif \
- 	$(LIBACCTPOLICY_SCHEMA)
- 
-+if RUST_ENABLE
-+systemschema_DATA += $(srcdir)/ldap/schema/03entryuuid.ldif
-+endif
-+
- schema_DATA = $(srcdir)/ldap/schema/99user.ldif
- 
- libexec_SCRIPTS =
-@@ -1227,7 +1237,7 @@ libsds_la_LDFLAGS = $(AM_LDFLAGS) $(SDS_LDFLAGS)
- 
- if RUST_ENABLE
- 
--noinst_LTLIBRARIES = librsds.la librslapd.la librnsslapd.la
-+noinst_LTLIBRARIES = librsds.la librslapd.la librnsslapd.la libentryuuid.la libentryuuid_syntax.la
- 
- ### Why does this exist?
- #
-@@ -1252,6 +1262,8 @@ librsds_la_EXTRA = src/libsds/Cargo.lock
- @abs_top_builddir@/rs/@rust_target_dir@/librsds.a: $(librsds_la_SOURCES)
- 	RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \
- 	CARGO_TARGET_DIR=$(abs_top_builddir)/rs \
-+	SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \
-+	SLAPD_HEADER_DIR=$(abs_top_builddir)/ \
- 		cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/libsds/Cargo.toml \
- 		$(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS)
- 
-@@ -1268,6 +1280,7 @@ librslapd_la_EXTRA = src/librslapd/Cargo.lock
- @abs_top_builddir@/rs/@rust_target_dir@/librslapd.a: $(librslapd_la_SOURCES)
- 	RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \
- 	CARGO_TARGET_DIR=$(abs_top_builddir)/rs \
-+	SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \
- 	SLAPD_HEADER_DIR=$(abs_top_builddir)/ \
- 		cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librslapd/Cargo.toml \
- 		$(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS)
-@@ -1288,6 +1301,7 @@ librnsslapd_la_EXTRA = src/librnsslapd/Cargo.lock
- @abs_top_builddir@/rs/@rust_target_dir@/librnsslapd.a: $(librnsslapd_la_SOURCES)
- 	RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \
- 	CARGO_TARGET_DIR=$(abs_top_builddir)/rs \
-+	SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \
- 	SLAPD_HEADER_DIR=$(abs_top_builddir)/ \
- 		cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librnsslapd/Cargo.toml \
- 		$(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS)
-@@ -1295,8 +1309,64 @@ librnsslapd_la_EXTRA = src/librnsslapd/Cargo.lock
- # The header needs the lib build first.
- rust-nsslapd-private.h: @abs_top_builddir@/rs/@rust_target_dir@/librnsslapd.a
- 
-+libslapi_r_plugin_SOURCES = \
-+	src/slapi_r_plugin/src/backend.rs \
-+	src/slapi_r_plugin/src/ber.rs \
-+	src/slapi_r_plugin/src/constants.rs \
-+	src/slapi_r_plugin/src/dn.rs \
-+	src/slapi_r_plugin/src/entry.rs \
-+	src/slapi_r_plugin/src/error.rs \
-+	src/slapi_r_plugin/src/log.rs \
-+	src/slapi_r_plugin/src/macros.rs \
-+	src/slapi_r_plugin/src/pblock.rs \
-+	src/slapi_r_plugin/src/plugin.rs \
-+	src/slapi_r_plugin/src/search.rs \
-+	src/slapi_r_plugin/src/syntax_plugin.rs \
-+	src/slapi_r_plugin/src/task.rs \
-+	src/slapi_r_plugin/src/value.rs \
-+	src/slapi_r_plugin/src/lib.rs
-+
-+# Build rust ns-slapd components as a library.
-+ENTRYUUID_LIB = @abs_top_builddir@/rs/@rust_target_dir@/libentryuuid.a
-+
-+libentryuuid_la_SOURCES = \
-+	src/plugins/entryuuid/Cargo.toml \
-+	src/plugins/entryuuid/src/lib.rs \
-+	$(libslapi_r_plugin_SOURCES)
-+
-+libentryuuid_la_EXTRA = src/plugin/entryuuid/Cargo.lock
-+
-+@abs_top_builddir@/rs/@rust_target_dir@/libentryuuid.a: $(libentryuuid_la_SOURCES) libslapd.la libentryuuid.la
-+	RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \
-+	CARGO_TARGET_DIR=$(abs_top_builddir)/rs \
-+	SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \
-+	SLAPD_HEADER_DIR=$(abs_top_builddir)/ \
-+		cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/plugins/entryuuid/Cargo.toml \
-+		$(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS)
-+	cp $(ENTRYUUID_LIB) @abs_top_builddir@/.libs/libentryuuid.a
-+
-+ENTRYUUID_SYNTAX_LIB = @abs_top_builddir@/rs/@rust_target_dir@/libentryuuid_syntax.a
-+
-+libentryuuid_syntax_la_SOURCES = \
-+	src/plugins/entryuuid_syntax/Cargo.toml \
-+	src/plugins/entryuuid_syntax/src/lib.rs \
-+	$(libslapi_r_plugin_SOURCES)
-+
-+libentryuuid_syntax_la_EXTRA = src/plugin/entryuuid_syntax/Cargo.lock
-+
-+@abs_top_builddir@/rs/@rust_target_dir@/libentryuuid_syntax.a: $(libentryuuid_syntax_la_SOURCES) libslapd.la libentryuuid_syntax.la
-+	RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \
-+	CARGO_TARGET_DIR=$(abs_top_builddir)/rs \
-+	SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \
-+	SLAPD_HEADER_DIR=$(abs_top_builddir)/ \
-+		cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/plugins/entryuuid_syntax/Cargo.toml \
-+		$(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS)
-+	cp $(ENTRYUUID_SYNTAX_LIB) @abs_top_builddir@/.libs/libentryuuid_syntax.a
-+
- EXTRA_DIST = $(librsds_la_SOURCES) $(librsds_la_EXTRA) \
- 			$(librslapd_la_SOURCES) $(librslapd_la_EXTRA) \
-+			$(libentryuuid_la_SOURCES) $(libentryuuid_la_EXTRA) \
-+			$(libentryuuid_syntax_la_SOURCES) $(libentryuuid_syntax_la_EXTRA) \
- 			$(librnsslapd_la_SOURCES) $(librnsslapd_la_EXTRA)
- 
- ## Run rust tests
-@@ -1306,13 +1376,17 @@ else
- check-local:
- 	RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \
- 	CARGO_TARGET_DIR=$(abs_top_builddir)/rs \
-+	SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \
-+	SLAPD_HEADER_DIR=$(abs_top_builddir)/ \
- 		cargo test $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/libsds/Cargo.toml
- 	RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \
- 	CARGO_TARGET_DIR=$(abs_top_builddir)/rs \
-+	SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \
- 	SLAPD_HEADER_DIR=$(abs_top_builddir)/ \
- 		cargo test $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librslapd/Cargo.toml
- 	RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \
- 	CARGO_TARGET_DIR=$(abs_top_builddir)/rs \
-+	SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \
- 	SLAPD_HEADER_DIR=$(abs_top_builddir)/ \
- 		cargo test $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librnsslapd/Cargo.toml
- endif
-@@ -1735,6 +1809,24 @@ libderef_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK)
- libderef_plugin_la_DEPENDENCIES = libslapd.la
- libderef_plugin_la_LDFLAGS = -avoid-version
- 
-+if RUST_ENABLE
-+#------------------------
-+# libentryuuid-syntax-plugin
-+#-----------------------
-+libentryuuid_syntax_plugin_la_SOURCES = src/slapi_r_plugin/src/init.c
-+libentryuuid_syntax_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) -lentryuuid_syntax
-+libentryuuid_syntax_plugin_la_DEPENDENCIES = libslapd.la $(ENTRYUUID_SYNTAX_LIB)
-+libentryuuid_syntax_plugin_la_LDFLAGS = -avoid-version
-+
-+#------------------------
-+# libentryuuid-plugin
-+#-----------------------
-+libentryuuid_plugin_la_SOURCES = src/slapi_r_plugin/src/init.c
-+libentryuuid_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) -lentryuuid
-+libentryuuid_plugin_la_DEPENDENCIES = libslapd.la $(ENTRYUUID_LIB)
-+libentryuuid_plugin_la_LDFLAGS = -avoid-version
-+endif
-+
- #------------------------
- # libpbe-plugin
- #-----------------------
-diff --git a/dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif b/dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif
-new file mode 100644
-index 000000000..b64090af7
---- /dev/null
-+++ b/dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif
-@@ -0,0 +1,233 @@
-+version: 1
-+
-+# entry-id: 1
-+dn: dc=example,dc=com
-+objectClass: top
-+objectClass: domain
-+dc: example
-+description: dc=example,dc=com
-+creatorsName: cn=Directory Manager
-+modifiersName: cn=Directory Manager
-+createTimestamp: 20200325015542Z
-+modifyTimestamp: 20200325015542Z
-+nsUniqueId: a2b33229-6e3b11ea-8de0c78c-83e27eda
-+aci: (targetattr="dc || description || objectClass")(targetfilter="(objectClas
-+ s=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search
-+ , compare)(userdn="ldap:///anyone");)
-+aci: (targetattr="ou || objectClass")(targetfilter="(objectClass=organizationa
-+ lUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compa
-+ re)(userdn="ldap:///anyone");)
-+
-+# entry-id: 2
-+dn: cn=389_ds_system,dc=example,dc=com
-+objectClass: top
-+objectClass: nscontainer
-+objectClass: ldapsubentry
-+cn: 389_ds_system
-+creatorsName: cn=Directory Manager
-+modifiersName: cn=Directory Manager
-+createTimestamp: 20200325015542Z
-+modifyTimestamp: 20200325015542Z
-+nsUniqueId: a2b3322a-6e3b11ea-8de0c78c-83e27eda
-+
-+# entry-id: 3
-+dn: ou=groups,dc=example,dc=com
-+objectClass: top
-+objectClass: organizationalunit
-+ou: groups
-+aci: (targetattr="cn || member || gidNumber || nsUniqueId || description || ob
-+ jectClass")(targetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enab
-+ le anyone group read"; allow (read, search, compare)(userdn="ldap:///anyone")
-+ ;)
-+aci: (targetattr="member")(targetfilter="(objectClass=groupOfNames)")(version 
-+ 3.0; acl "Enable group_modify to alter members"; allow (write)(groupdn="ldap:
-+ ///cn=group_modify,ou=permissions,dc=example,dc=com");)
-+aci: (targetattr="cn || member || gidNumber || description || objectClass")(ta
-+ rgetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enable group_admin
-+  to manage groups"; allow (write, add, delete)(groupdn="ldap:///cn=group_admi
-+ n,ou=permissions,dc=example,dc=com");)
-+creatorsName: cn=Directory Manager
-+modifiersName: cn=Directory Manager
-+createTimestamp: 20200325015543Z
-+modifyTimestamp: 20200325015543Z
-+nsUniqueId: a2b3322b-6e3b11ea-8de0c78c-83e27eda
-+
-+# entry-id: 4
-+dn: ou=people,dc=example,dc=com
-+objectClass: top
-+objectClass: organizationalunit
-+ou: people
-+aci: (targetattr="objectClass || description || nsUniqueId || uid || displayNa
-+ me || loginShell || uidNumber || gidNumber || gecos || homeDirectory || cn ||
-+  memberOf || mail || nsSshPublicKey || nsAccountLock || userCertificate")(tar
-+ getfilter="(objectClass=posixaccount)")(version 3.0; acl "Enable anyone user 
-+ read"; allow (read, search, compare)(userdn="ldap:///anyone");)
-+aci: (targetattr="displayName || legalName || userPassword || nsSshPublicKey")
-+ (version 3.0; acl "Enable self partial modify"; allow (write)(userdn="ldap://
-+ /self");)
-+aci: (targetattr="legalName || telephoneNumber || mobile || sn")(targetfilter=
-+ "(|(objectClass=nsPerson)(objectClass=inetOrgPerson))")(version 3.0; acl "Ena
-+ ble self legalname read"; allow (read, search, compare)(userdn="ldap:///self"
-+ );)
-+aci: (targetattr="legalName || telephoneNumber")(targetfilter="(objectClass=ns
-+ Person)")(version 3.0; acl "Enable user legalname read"; allow (read, search,
-+  compare)(groupdn="ldap:///cn=user_private_read,ou=permissions,dc=example,dc=
-+ com");)
-+aci: (targetattr="uid || description || displayName || loginShell || uidNumber
-+  || gidNumber || gecos || homeDirectory || cn || memberOf || mail || legalNam
-+ e || telephoneNumber || mobile")(targetfilter="(&(objectClass=nsPerson)(objec
-+ tClass=nsAccount))")(version 3.0; acl "Enable user admin create"; allow (writ
-+ e, add, delete, read)(groupdn="ldap:///cn=user_admin,ou=permissions,dc=exampl
-+ e,dc=com");)
-+aci: (targetattr="uid || description || displayName || loginShell || uidNumber
-+  || gidNumber || gecos || homeDirectory || cn || memberOf || mail || legalNam
-+ e || telephoneNumber || mobile")(targetfilter="(&(objectClass=nsPerson)(objec
-+ tClass=nsAccount))")(version 3.0; acl "Enable user modify to change users"; a
-+ llow (write, read)(groupdn="ldap:///cn=user_modify,ou=permissions,dc=example,
-+ dc=com");)
-+aci: (targetattr="userPassword || nsAccountLock || userCertificate || nsSshPub
-+ licKey")(targetfilter="(objectClass=nsAccount)")(version 3.0; acl "Enable use
-+ r password reset"; allow (write, read)(groupdn="ldap:///cn=user_passwd_reset,
-+ ou=permissions,dc=example,dc=com");)
-+creatorsName: cn=Directory Manager
-+modifiersName: cn=Directory Manager
-+createTimestamp: 20200325015543Z
-+modifyTimestamp: 20200325015543Z
-+nsUniqueId: a2b3322c-6e3b11ea-8de0c78c-83e27eda
-+
-+# entry-id: 5
-+dn: ou=permissions,dc=example,dc=com
-+objectClass: top
-+objectClass: organizationalunit
-+ou: permissions
-+creatorsName: cn=Directory Manager
-+modifiersName: cn=Directory Manager
-+createTimestamp: 20200325015543Z
-+modifyTimestamp: 20200325015543Z
-+nsUniqueId: a2b3322d-6e3b11ea-8de0c78c-83e27eda
-+
-+# entry-id: 6
-+dn: ou=services,dc=example,dc=com
-+objectClass: top
-+objectClass: organizationalunit
-+ou: services
-+aci: (targetattr="objectClass || description || nsUniqueId || cn || memberOf |
-+ | nsAccountLock ")(targetfilter="(objectClass=netscapeServer)")(version 3.0; 
-+ acl "Enable anyone service account read"; allow (read, search, compare)(userd
-+ n="ldap:///anyone");)
-+creatorsName: cn=Directory Manager
-+modifiersName: cn=Directory Manager
-+createTimestamp: 20200325015544Z
-+modifyTimestamp: 20200325015544Z
-+nsUniqueId: a2b3322e-6e3b11ea-8de0c78c-83e27eda
-+
-+# entry-id: 7
-+dn: uid=demo_user,ou=people,dc=example,dc=com
-+objectClass: top
-+objectClass: nsPerson
-+objectClass: nsAccount
-+objectClass: nsOrgPerson
-+objectClass: posixAccount
-+uid: demo_user
-+cn: Demo User
-+displayName: Demo User
-+legalName: Demo User Name
-+uidNumber: 99998
-+gidNumber: 99998
-+homeDirectory: /var/empty
-+loginShell: /bin/false
-+nsAccountLock: true
-+creatorsName: cn=Directory Manager
-+modifiersName: cn=Directory Manager
-+createTimestamp: 20200325015544Z
-+modifyTimestamp: 20200325061615Z
-+nsUniqueId: a2b3322f-6e3b11ea-8de0c78c-83e27eda
-+entryUUID: 973e1bbf-ba9c-45d4-b01b-ff7371fd9008
-+
-+# entry-id: 8
-+dn: cn=demo_group,ou=groups,dc=example,dc=com
-+objectClass: top
-+objectClass: groupOfNames
-+objectClass: posixGroup
-+objectClass: nsMemberOf
-+cn: demo_group
-+gidNumber: 99999
-+creatorsName: cn=Directory Manager
-+modifiersName: cn=Directory Manager
-+createTimestamp: 20200325015544Z
-+modifyTimestamp: 20200325015544Z
-+nsUniqueId: a2b33230-6e3b11ea-8de0c78c-83e27eda
-+entryUUID: f6df8fe9-6b30-46aa-aa13-f0bf755371e8
-+
-+# entry-id: 9
-+dn: cn=group_admin,ou=permissions,dc=example,dc=com
-+objectClass: top
-+objectClass: groupOfNames
-+objectClass: nsMemberOf
-+cn: group_admin
-+creatorsName: cn=Directory Manager
-+modifiersName: cn=Directory Manager
-+createTimestamp: 20200325015545Z
-+modifyTimestamp: 20200325015545Z
-+nsUniqueId: a2b33231-6e3b11ea-8de0c78c-83e27eda
-+
-+# entry-id: 10
-+dn: cn=group_modify,ou=permissions,dc=example,dc=com
-+objectClass: top
-+objectClass: groupOfNames
-+objectClass: nsMemberOf
-+cn: group_modify
-+creatorsName: cn=Directory Manager
-+modifiersName: cn=Directory Manager
-+createTimestamp: 20200325015545Z
-+modifyTimestamp: 20200325015545Z
-+nsUniqueId: a2b33232-6e3b11ea-8de0c78c-83e27eda
-+
-+# entry-id: 11
-+dn: cn=user_admin,ou=permissions,dc=example,dc=com
-+objectClass: top
-+objectClass: groupOfNames
-+objectClass: nsMemberOf
-+cn: user_admin
-+creatorsName: cn=Directory Manager
-+modifiersName: cn=Directory Manager
-+createTimestamp: 20200325015545Z
-+modifyTimestamp: 20200325015545Z
-+nsUniqueId: a2b33233-6e3b11ea-8de0c78c-83e27eda
-+
-+# entry-id: 12
-+dn: cn=user_modify,ou=permissions,dc=example,dc=com
-+objectClass: top
-+objectClass: groupOfNames
-+objectClass: nsMemberOf
-+cn: user_modify
-+creatorsName: cn=Directory Manager
-+modifiersName: cn=Directory Manager
-+createTimestamp: 20200325015546Z
-+modifyTimestamp: 20200325015546Z
-+nsUniqueId: a2b33234-6e3b11ea-8de0c78c-83e27eda
-+
-+# entry-id: 13
-+dn: cn=user_passwd_reset,ou=permissions,dc=example,dc=com
-+objectClass: top
-+objectClass: groupOfNames
-+objectClass: nsMemberOf
-+cn: user_passwd_reset
-+creatorsName: cn=Directory Manager
-+modifiersName: cn=Directory Manager
-+createTimestamp: 20200325015546Z
-+modifyTimestamp: 20200325015546Z
-+nsUniqueId: a2b33235-6e3b11ea-8de0c78c-83e27eda
-+
-+# entry-id: 14
-+dn: cn=user_private_read,ou=permissions,dc=example,dc=com
-+objectClass: top
-+objectClass: groupOfNames
-+objectClass: nsMemberOf
-+cn: user_private_read
-+creatorsName: cn=Directory Manager
-+modifiersName: cn=Directory Manager
-+createTimestamp: 20200325015547Z
-+modifyTimestamp: 20200325015547Z
-+nsUniqueId: a2b33236-6e3b11ea-8de0c78c-83e27eda
-+
-diff --git a/dirsrvtests/tests/suites/entryuuid/basic_test.py b/dirsrvtests/tests/suites/entryuuid/basic_test.py
-new file mode 100644
-index 000000000..beb73701d
---- /dev/null
-+++ b/dirsrvtests/tests/suites/entryuuid/basic_test.py
-@@ -0,0 +1,226 @@
-+# --- BEGIN COPYRIGHT BLOCK ---
-+# Copyright (C) 2020 William Brown <william@blackhats.net.au>
-+# All rights reserved.
-+#
-+# License: GPL (version 3 or any later version).
-+# See LICENSE for details.
-+# --- END COPYRIGHT BLOCK ---
-+
-+import ldap
-+import pytest
-+import time
-+import shutil
-+from lib389.idm.user import nsUserAccounts, UserAccounts
-+from lib389.idm.account import Accounts
-+from lib389.topologies import topology_st as topology
-+from lib389.backend import Backends
-+from lib389.paths import Paths
-+from lib389.utils import ds_is_older
-+from lib389._constants import *
-+from lib389.plugins import EntryUUIDPlugin
-+
-+default_paths = Paths()
-+
-+pytestmark = pytest.mark.tier1
-+
-+DATADIR1 = os.path.join(os.path.dirname(__file__), '../../data/entryuuid/')
-+IMPORT_UUID_A = "973e1bbf-ba9c-45d4-b01b-ff7371fd9008"
-+UUID_BETWEEN = "eeeeeeee-0000-0000-0000-000000000000"
-+IMPORT_UUID_B = "f6df8fe9-6b30-46aa-aa13-f0bf755371e8"
-+UUID_MIN = "00000000-0000-0000-0000-000000000000"
-+UUID_MAX = "ffffffff-ffff-ffff-ffff-ffffffffffff"
-+
-+def _entryuuid_import_and_search(topology):
-+    # 1
-+    ldif_dir = topology.standalone.get_ldif_dir()
-+    target_ldif = os.path.join(ldif_dir, 'localhost-userRoot-2020_03_30_13_14_47.ldif')
-+    import_ldif = os.path.join(DATADIR1, 'localhost-userRoot-2020_03_30_13_14_47.ldif')
-+    shutil.copyfile(import_ldif, target_ldif)
-+
-+    be = Backends(topology.standalone).get('userRoot')
-+    task = be.import_ldif([target_ldif])
-+    task.wait()
-+    assert(task.is_complete() and task.get_exit_code() == 0)
-+
-+    accounts = Accounts(topology.standalone, DEFAULT_SUFFIX)
-+    # 2 - positive eq test
-+    r2 = accounts.filter("(entryUUID=%s)" % IMPORT_UUID_A)
-+    assert(len(r2) == 1)
-+    r3 = accounts.filter("(entryuuid=%s)" % IMPORT_UUID_B)
-+    assert(len(r3) == 1)
-+    # 3 - negative eq test
-+    r4 = accounts.filter("(entryuuid=%s)" % UUID_MAX)
-+    assert(len(r4) == 0)
-+    # 4 - le search
-+    r5 = accounts.filter("(entryuuid<=%s)" % UUID_BETWEEN)
-+    assert(len(r5) == 1)
-+    # 5 - ge search
-+    r6 = accounts.filter("(entryuuid>=%s)" % UUID_BETWEEN)
-+    assert(len(r6) == 1)
-+    # 6 - le 0 search
-+    r7 = accounts.filter("(entryuuid<=%s)" % UUID_MIN)
-+    assert(len(r7) == 0)
-+    # 7 - ge f search
-+    r8 = accounts.filter("(entryuuid>=%s)" % UUID_MAX)
-+    assert(len(r8) == 0)
-+    # 8 - export db
-+    task = be.export_ldif()
-+    task.wait()
-+    assert(task.is_complete() and task.get_exit_code() == 0)
-+
-+
-+@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions")
-+def test_entryuuid_indexed_import_and_search(topology):
-+    """ Test that an ldif of entries containing entryUUID's can be indexed and searched
-+    correctly. As https://tools.ietf.org/html/rfc4530 states, the MR's are equality and
-+    ordering, so we check these are correct.
-+
-+    :id: c98ee6dc-a7ee-4bd4-974d-597ea966dad9
-+
-+    :setup: Standalone instance
-+
-+    :steps:
-+        1. Import the db from the ldif
-+        2. EQ search for an entryuuid (match)
-+        3. EQ search for an entryuuid that does not exist
-+        4. LE search for an entryuuid lower (1 res)
-+        5. GE search for an entryuuid greater (1 res)
-+        6. LE for the 0 uuid (0 res)
-+        7. GE for the f uuid (0 res)
-+        8. export the db to ldif
-+
-+    :expectedresults:
-+        1. Success
-+        2. 1 match
-+        3. 0 match
-+        4. 1 match
-+        5. 1 match
-+        6. 0 match
-+        7. 0 match
-+        8. success
-+    """
-+    # Assert that the index correctly exists.
-+    be = Backends(topology.standalone).get('userRoot')
-+    indexes = be.get_indexes()
-+    indexes.ensure_state(properties={
-+        'cn': 'entryUUID',
-+        'nsSystemIndex': 'false',
-+        'nsIndexType': ['eq', 'pres'],
-+    })
-+    _entryuuid_import_and_search(topology)
-+
-+@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions")
-+def test_entryuuid_unindexed_import_and_search(topology):
-+    """ Test that an ldif of entries containing entryUUID's can be UNindexed searched
-+    correctly. As https://tools.ietf.org/html/rfc4530 states, the MR's are equality and
-+    ordering, so we check these are correct.
-+
-+    :id: b652b54d-f009-464b-b5bd-299a33f97243
-+
-+    :setup: Standalone instance
-+
-+    :steps:
-+        1. Import the db from the ldif
-+        2. EQ search for an entryuuid (match)
-+        3. EQ search for an entryuuid that does not exist
-+        4. LE search for an entryuuid lower (1 res)
-+        5. GE search for an entryuuid greater (1 res)
-+        6. LE for the 0 uuid (0 res)
-+        7. GE for the f uuid (0 res)
-+        8. export the db to ldif
-+
-+    :expectedresults:
-+        1. Success
-+        2. 1 match
-+        3. 0 match
-+        4. 1 match
-+        5. 1 match
-+        6. 0 match
-+        7. 0 match
-+        8. success
-+    """
-+    # Assert that the index does NOT exist for this test.
-+    be = Backends(topology.standalone).get('userRoot')
-+    indexes = be.get_indexes()
-+    try:
-+        idx = indexes.get('entryUUID')
-+        idx.delete()
-+    except ldap.NO_SUCH_OBJECT:
-+        # It's already not present, move along, nothing to see here.
-+        pass
-+    _entryuuid_import_and_search(topology)
-+
-+# Test entryUUID generation
-+@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions")
-+def test_entryuuid_generation_on_add(topology):
-+    """ Test that when an entry is added, the entryuuid is added.
-+
-+    :id: a7439b0a-dcee-4cd6-b8ef-771476c0b4f6
-+
-+    :setup: Standalone instance
-+
-+    :steps:
-+        1. Create a new entry in the db
-+        2. Check it has an entry uuid
-+
-+    :expectedresults:
-+        1. Success
-+        2. An entry uuid is present
-+    """
-+    # Step one - create a user!
-+    account = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX).create_test_user()
-+    # Step two - does it have an entryuuid?
-+    euuid = account.get_attr_val_utf8('entryUUID')
-+    print(euuid)
-+    assert(euuid is not None)
-+
-+# Test fixup task
-+@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions")
-+def test_entryuuid_fixup_task(topology):
-+    """Test that when an entries without UUID's can have one generated via
-+    the fixup process.
-+
-+    :id: ad42bba2-ffb2-4c22-a37d-cbe7bcf73d6b
-+
-+    :setup: Standalone instance
-+
-+    :steps:
-+        1. Disable the entryuuid plugin
-+        2. Create an entry
-+        3. Enable the entryuuid plugin
-+        4. Run the fixup
-+        5. Assert the entryuuid now exists
-+
-+    :expectedresults:
-+        1. Success
-+        2. Success
-+        3. Success
-+        4. Success
-+        5. Suddenly EntryUUID!
-+    """
-+    # 1. Disable the plugin
-+    plug = EntryUUIDPlugin(topology.standalone)
-+    plug.disable()
-+    topology.standalone.restart()
-+
-+    # 2. create the account
-+    account = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX).create_test_user(uid=2000)
-+    euuid = account.get_attr_val_utf8('entryUUID')
-+    assert(euuid is None)
-+
-+    # 3. enable the plugin
-+    plug.enable()
-+    topology.standalone.restart()
-+
-+    # 4. run the fix up
-+    # For now set the log level to high!
-+    topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE))
-+    task = plug.fixup(DEFAULT_SUFFIX)
-+    task.wait()
-+    assert(task.is_complete() and task.get_exit_code() == 0)
-+    topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,))
-+
-+    # 5. Assert the uuid.
-+    euuid = account.get_attr_val_utf8('entryUUID')
-+    assert(euuid is not None)
-+
-diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif
-index 57e6be3b3..3b0ad0a97 100644
---- a/ldap/schema/02common.ldif
-+++ b/ldap/schema/02common.ldif
-@@ -11,6 +11,7 @@
- #
- # Core schema, highly recommended but not required to start the Directory Server itself.
- #
-+#
- dn: cn=schema
- #
- # attributes
-diff --git a/ldap/schema/03entryuuid.ldif b/ldap/schema/03entryuuid.ldif
-new file mode 100644
-index 000000000..cbde981fe
---- /dev/null
-+++ b/ldap/schema/03entryuuid.ldif
-@@ -0,0 +1,16 @@
-+#
-+# BEGIN COPYRIGHT BLOCK
-+# Copyright (C) 2020 William Brown <william@blackhats.net.au>
-+# All rights reserved.
-+#
-+# License: GPL (version 3 or any later version).
-+# See LICENSE for details.
-+# END COPYRIGHT BLOCK
-+#
-+# Core schema, highly recommended but not required to start the Directory Server itself.
-+#
-+dn: cn=schema
-+#
-+# attributes
-+#
-+attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )
-diff --git a/ldap/servers/slapd/config.c b/ldap/servers/slapd/config.c
-index 7e1618e79..bf5476272 100644
---- a/ldap/servers/slapd/config.c
-+++ b/ldap/servers/slapd/config.c
-@@ -35,6 +35,10 @@ extern char *slapd_SSL3ciphers;
- extern char *localuser;
- char *rel2abspath(char *);
- 
-+/*
-+ * WARNING - this can only bootstrap PASSWORD and SYNTAX plugins!
-+ * see fedse.c instead!
-+ */
- static char *bootstrap_plugins[] = {
-     "dn: cn=PBKDF2_SHA256,cn=Password Storage Schemes,cn=plugins,cn=config\n"
-     "objectclass: top\n"
-@@ -45,6 +49,19 @@ static char *bootstrap_plugins[] = {
-     "nsslapd-plugintype: pwdstoragescheme\n"
-     "nsslapd-pluginenabled: on",
- 
-+    "dn: cn=entryuuid_syntax,cn=plugins,cn=config\n"
-+    "objectclass: top\n"
-+    "objectclass: nsSlapdPlugin\n"
-+    "cn: entryuuid_syntax\n"
-+    "nsslapd-pluginpath: libentryuuid-syntax-plugin\n"
-+    "nsslapd-plugininitfunc: entryuuid_syntax_plugin_init\n"
-+    "nsslapd-plugintype: syntax\n"
-+    "nsslapd-pluginenabled: on\n"
-+    "nsslapd-pluginId: entryuuid_syntax\n"
-+    "nsslapd-pluginVersion: none\n"
-+    "nsslapd-pluginVendor: 389 Project\n"
-+    "nsslapd-pluginDescription: entryuuid_syntax\n",
-+
-     NULL
- };
- 
-diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c
-index 7697e2b88..9ae9523e2 100644
---- a/ldap/servers/slapd/entry.c
-+++ b/ldap/servers/slapd/entry.c
-@@ -2882,6 +2882,18 @@ slapi_entry_attr_get_bool(const Slapi_Entry *e, const char *type)
-     return slapi_entry_attr_get_bool_ext(e, type, PR_FALSE);
- }
- 
-+const struct slapi_value **
-+slapi_entry_attr_get_valuearray(const Slapi_Entry *e, const char *attrname)
-+{
-+    Slapi_Attr *attr;
-+
-+    if (slapi_entry_attr_find(e, attrname, &attr) != 0) {
-+        return NULL;
-+    }
-+
-+    return attr->a_present_values.va;
-+}
-+
- /*
-  * Extract a single value from an entry (as a string). You do not need
-  * to free the returned string value.
-diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
-index 3b076eb17..0d645f909 100644
---- a/ldap/servers/slapd/fedse.c
-+++ b/ldap/servers/slapd/fedse.c
-@@ -119,6 +119,34 @@ static const char *internal_entries[] =
-         "cn:SNMP\n"
-         "nsSNMPEnabled: on\n",
- 
-+#ifdef RUST_ENABLE
-+        "dn: cn=entryuuid_syntax,cn=plugins,cn=config\n"
-+        "objectclass: top\n"
-+        "objectclass: nsSlapdPlugin\n"
-+        "cn: entryuuid_syntax\n"
-+        "nsslapd-pluginpath: libentryuuid-syntax-plugin\n"
-+        "nsslapd-plugininitfunc: entryuuid_syntax_plugin_init\n"
-+        "nsslapd-plugintype: syntax\n"
-+        "nsslapd-pluginenabled: on\n"
-+        "nsslapd-pluginId: entryuuid_syntax\n"
-+        "nsslapd-pluginVersion: none\n"
-+        "nsslapd-pluginVendor: 389 Project\n"
-+        "nsslapd-pluginDescription: entryuuid_syntax\n",
-+
-+        "dn: cn=entryuuid,cn=plugins,cn=config\n"
-+        "objectclass: top\n"
-+        "objectclass: nsSlapdPlugin\n"
-+        "cn: entryuuid\n"
-+        "nsslapd-pluginpath: libentryuuid-plugin\n"
-+        "nsslapd-plugininitfunc: entryuuid_plugin_init\n"
-+        "nsslapd-plugintype: betxnpreoperation\n"
-+        "nsslapd-pluginenabled: on\n"
-+        "nsslapd-pluginId: entryuuid\n"
-+        "nsslapd-pluginVersion: none\n"
-+        "nsslapd-pluginVendor: 389 Project\n"
-+        "nsslapd-pluginDescription: entryuuid\n",
-+#endif
-+
-         "dn: cn=Password Storage Schemes,cn=plugins,cn=config\n"
-         "objectclass: top\n"
-         "objectclass: nsContainer\n"
-diff --git a/src/Cargo.lock b/src/Cargo.lock
-index ce3c7ed27..33d7b8f23 100644
---- a/src/Cargo.lock
-+++ b/src/Cargo.lock
-@@ -28,12 +28,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
- 
- [[package]]
- name = "base64"
--version = "0.10.1"
-+version = "0.13.0"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e"
--dependencies = [
-- "byteorder",
--]
-+checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
- 
- [[package]]
- name = "bitflags"
-@@ -43,9 +40,9 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
- 
- [[package]]
- name = "byteorder"
--version = "1.4.2"
-+version = "1.4.3"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b"
-+checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
- 
- [[package]]
- name = "cbindgen"
-@@ -66,15 +63,12 @@ dependencies = [
- 
- [[package]]
- name = "cc"
--version = "1.0.66"
-+version = "1.0.67"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48"
--
--[[package]]
--name = "cfg-if"
--version = "0.1.10"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
-+checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd"
-+dependencies = [
-+ "jobserver",
-+]
- 
- [[package]]
- name = "cfg-if"
-@@ -97,16 +91,39 @@ dependencies = [
-  "vec_map",
- ]
- 
-+[[package]]
-+name = "entryuuid"
-+version = "0.1.0"
-+dependencies = [
-+ "cc",
-+ "libc",
-+ "paste",
-+ "slapi_r_plugin",
-+ "uuid",
-+]
-+
-+[[package]]
-+name = "entryuuid_syntax"
-+version = "0.1.0"
-+dependencies = [
-+ "cc",
-+ "libc",
-+ "paste",
-+ "slapi_r_plugin",
-+ "uuid",
-+]
-+
- [[package]]
- name = "fernet"
--version = "0.1.3"
-+version = "0.1.4"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "e7ac567fd75ce6bc28b68e63b5beaa3ce34f56bafd1122f64f8647c822e38a8b"
-+checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f"
- dependencies = [
-  "base64",
-  "byteorder",
-  "getrandom",
-  "openssl",
-+ "zeroize",
- ]
- 
- [[package]]
-@@ -126,20 +143,20 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
- 
- [[package]]
- name = "getrandom"
--version = "0.1.16"
-+version = "0.2.3"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce"
-+checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
- dependencies = [
-- "cfg-if 1.0.0",
-+ "cfg-if",
-  "libc",
-  "wasi",
- ]
- 
- [[package]]
- name = "hermit-abi"
--version = "0.1.17"
-+version = "0.1.18"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8"
-+checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
- dependencies = [
-  "libc",
- ]
-@@ -150,6 +167,15 @@ version = "0.4.7"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
- 
-+[[package]]
-+name = "jobserver"
-+version = "0.1.22"
-+source = "registry+https://github.com/rust-lang/crates.io-index"
-+checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd"
-+dependencies = [
-+ "libc",
-+]
-+
- [[package]]
- name = "lazy_static"
- version = "1.4.0"
-@@ -158,9 +184,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
- 
- [[package]]
- name = "libc"
--version = "0.2.82"
-+version = "0.2.94"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929"
-+checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e"
- 
- [[package]]
- name = "librnsslapd"
-@@ -182,32 +208,38 @@ dependencies = [
- 
- [[package]]
- name = "log"
--version = "0.4.11"
-+version = "0.4.14"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
-+checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
- dependencies = [
-- "cfg-if 0.1.10",
-+ "cfg-if",
- ]
- 
-+[[package]]
-+name = "once_cell"
-+version = "1.7.2"
-+source = "registry+https://github.com/rust-lang/crates.io-index"
-+checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3"
-+
- [[package]]
- name = "openssl"
--version = "0.10.32"
-+version = "0.10.34"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70"
-+checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8"
- dependencies = [
-  "bitflags",
-- "cfg-if 1.0.0",
-+ "cfg-if",
-  "foreign-types",
-- "lazy_static",
-  "libc",
-+ "once_cell",
-  "openssl-sys",
- ]
- 
- [[package]]
- name = "openssl-sys"
--version = "0.9.60"
-+version = "0.9.63"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "921fc71883267538946025deffb622905ecad223c28efbfdef9bb59a0175f3e6"
-+checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98"
- dependencies = [
-  "autocfg",
-  "cc",
-@@ -216,6 +248,25 @@ dependencies = [
-  "vcpkg",
- ]
- 
-+[[package]]
-+name = "paste"
-+version = "0.1.18"
-+source = "registry+https://github.com/rust-lang/crates.io-index"
-+checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
-+dependencies = [
-+ "paste-impl",
-+ "proc-macro-hack",
-+]
-+
-+[[package]]
-+name = "paste-impl"
-+version = "0.1.18"
-+source = "registry+https://github.com/rust-lang/crates.io-index"
-+checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
-+dependencies = [
-+ "proc-macro-hack",
-+]
-+
- [[package]]
- name = "pkg-config"
- version = "0.3.19"
-@@ -228,31 +279,36 @@ version = "0.2.10"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
- 
-+[[package]]
-+name = "proc-macro-hack"
-+version = "0.5.19"
-+source = "registry+https://github.com/rust-lang/crates.io-index"
-+checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
-+
- [[package]]
- name = "proc-macro2"
--version = "1.0.24"
-+version = "1.0.27"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
-+checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038"
- dependencies = [
-  "unicode-xid",
- ]
- 
- [[package]]
- name = "quote"
--version = "1.0.8"
-+version = "1.0.9"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df"
-+checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
- dependencies = [
-  "proc-macro2",
- ]
- 
- [[package]]
- name = "rand"
--version = "0.7.3"
-+version = "0.8.3"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
-+checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
- dependencies = [
-- "getrandom",
-  "libc",
-  "rand_chacha",
-  "rand_core",
-@@ -261,9 +317,9 @@ dependencies = [
- 
- [[package]]
- name = "rand_chacha"
--version = "0.2.2"
-+version = "0.3.0"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
-+checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
- dependencies = [
-  "ppv-lite86",
-  "rand_core",
-@@ -271,27 +327,30 @@ dependencies = [
- 
- [[package]]
- name = "rand_core"
--version = "0.5.1"
-+version = "0.6.2"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
-+checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7"
- dependencies = [
-  "getrandom",
- ]
- 
- [[package]]
- name = "rand_hc"
--version = "0.2.0"
-+version = "0.3.0"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
-+checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
- dependencies = [
-  "rand_core",
- ]
- 
- [[package]]
- name = "redox_syscall"
--version = "0.1.57"
-+version = "0.2.8"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
-+checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc"
-+dependencies = [
-+ "bitflags",
-+]
- 
- [[package]]
- name = "remove_dir_all"
-@@ -314,18 +373,18 @@ checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
- 
- [[package]]
- name = "serde"
--version = "1.0.118"
-+version = "1.0.126"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "06c64263859d87aa2eb554587e2d23183398d617427327cf2b3d0ed8c69e4800"
-+checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03"
- dependencies = [
-  "serde_derive",
- ]
- 
- [[package]]
- name = "serde_derive"
--version = "1.0.118"
-+version = "1.0.126"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df"
-+checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43"
- dependencies = [
-  "proc-macro2",
-  "quote",
-@@ -334,9 +393,9 @@ dependencies = [
- 
- [[package]]
- name = "serde_json"
--version = "1.0.61"
-+version = "1.0.64"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a"
-+checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79"
- dependencies = [
-  "itoa",
-  "ryu",
-@@ -350,6 +409,16 @@ dependencies = [
-  "fernet",
- ]
- 
-+[[package]]
-+name = "slapi_r_plugin"
-+version = "0.1.0"
-+dependencies = [
-+ "lazy_static",
-+ "libc",
-+ "paste",
-+ "uuid",
-+]
-+
- [[package]]
- name = "strsim"
- version = "0.8.0"
-@@ -358,22 +427,34 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
- 
- [[package]]
- name = "syn"
--version = "1.0.58"
-+version = "1.0.72"
-+source = "registry+https://github.com/rust-lang/crates.io-index"
-+checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82"
-+dependencies = [
-+ "proc-macro2",
-+ "quote",
-+ "unicode-xid",
-+]
-+
-+[[package]]
-+name = "synstructure"
-+version = "0.12.4"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5"
-+checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701"
- dependencies = [
-  "proc-macro2",
-  "quote",
-+ "syn",
-  "unicode-xid",
- ]
- 
- [[package]]
- name = "tempfile"
--version = "3.1.0"
-+version = "3.2.0"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
-+checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
- dependencies = [
-- "cfg-if 0.1.10",
-+ "cfg-if",
-  "libc",
-  "rand",
-  "redox_syscall",
-@@ -407,15 +488,24 @@ checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
- 
- [[package]]
- name = "unicode-xid"
--version = "0.2.1"
-+version = "0.2.2"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
-+checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
-+
-+[[package]]
-+name = "uuid"
-+version = "0.8.2"
-+source = "registry+https://github.com/rust-lang/crates.io-index"
-+checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
-+dependencies = [
-+ "getrandom",
-+]
- 
- [[package]]
- name = "vcpkg"
--version = "0.2.11"
-+version = "0.2.12"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb"
-+checksum = "cbdbff6266a24120518560b5dc983096efb98462e51d0d68169895b237be3e5d"
- 
- [[package]]
- name = "vec_map"
-@@ -425,9 +515,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
- 
- [[package]]
- name = "wasi"
--version = "0.9.0+wasi-snapshot-preview1"
-+version = "0.10.2+wasi-snapshot-preview1"
- source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
-+checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
- 
- [[package]]
- name = "winapi"
-@@ -450,3 +540,24 @@ name = "winapi-x86_64-pc-windows-gnu"
- version = "0.4.0"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
-+
-+[[package]]
-+name = "zeroize"
-+version = "1.3.0"
-+source = "registry+https://github.com/rust-lang/crates.io-index"
-+checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd"
-+dependencies = [
-+ "zeroize_derive",
-+]
-+
-+[[package]]
-+name = "zeroize_derive"
-+version = "1.1.0"
-+source = "registry+https://github.com/rust-lang/crates.io-index"
-+checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1"
-+dependencies = [
-+ "proc-macro2",
-+ "quote",
-+ "syn",
-+ "synstructure",
-+]
-diff --git a/src/Cargo.toml b/src/Cargo.toml
-index f6dac010f..1ad2b21b0 100644
---- a/src/Cargo.toml
-+++ b/src/Cargo.toml
-@@ -1,10 +1,13 @@
- 
- [workspace]
- members = [
--	"librslapd",
--	"librnsslapd",
--	"libsds",
--	"slapd",
-+    "librslapd",
-+    "librnsslapd",
-+    "libsds",
-+    "slapd",
-+    "slapi_r_plugin",
-+    "plugins/entryuuid",
-+    "plugins/entryuuid_syntax",
- ]
- 
- [profile.release]
-diff --git a/src/README.md b/src/README.md
-new file mode 100644
-index 000000000..e69de29bb
-diff --git a/src/lib389/lib389/_constants.py b/src/lib389/lib389/_constants.py
-index 52aac0f21..c184c8d4f 100644
---- a/src/lib389/lib389/_constants.py
-+++ b/src/lib389/lib389/_constants.py
-@@ -150,6 +150,7 @@ DN_IMPORT_TASK = "cn=import,%s" % DN_TASKS
- DN_BACKUP_TASK = "cn=backup,%s" % DN_TASKS
- DN_RESTORE_TASK = "cn=restore,%s" % DN_TASKS
- DN_MBO_TASK = "cn=memberOf task,%s" % DN_TASKS
-+DN_EUUID_TASK = "cn=entryuuid task,%s" % DN_TASKS
- DN_TOMB_FIXUP_TASK = "cn=fixup tombstones,%s" % DN_TASKS
- DN_FIXUP_LINKED_ATTIBUTES = "cn=fixup linked attributes,%s" % DN_TASKS
- DN_AUTOMEMBER_REBUILD_TASK = "cn=automember rebuild membership,%s" % DN_TASKS
-diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
-index aab07c028..bcd7b383f 100644
---- a/src/lib389/lib389/backend.py
-+++ b/src/lib389/lib389/backend.py
-@@ -765,7 +765,7 @@ class Backend(DSLdapObject):
-                 enc_attr.delete()
-                 break
- 
--    def import_ldif(self, ldifs, chunk_size=None, encrypted=False, gen_uniq_id=False, only_core=False,
-+    def import_ldif(self, ldifs, chunk_size=None, encrypted=False, gen_uniq_id=None, only_core=False,
-                     include_suffixes=None, exclude_suffixes=None):
-         """Do an import of the suffix"""
- 
-diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
-index 530fb367a..ac0fe1a8c 100644
---- a/src/lib389/lib389/instance/setup.py
-+++ b/src/lib389/lib389/instance/setup.py
-@@ -34,6 +34,7 @@ from lib389.instance.options import General2Base, Slapd2Base, Backend2Base
- from lib389.paths import Paths
- from lib389.saslmap import SaslMappings
- from lib389.instance.remove import remove_ds_instance
-+from lib389.index import Indexes
- from lib389.utils import (
-     assert_c,
-     is_a_dn,
-@@ -928,6 +929,19 @@ class SetupDs(object):
-         if slapd['self_sign_cert']:
-             ds_instance.config.set('nsslapd-security', 'on')
- 
-+        # Before we create any backends, create any extra default indexes that may be
-+        # dynamicly provisioned, rather than from template-dse.ldif. Looking at you
-+        # entryUUID (requires rust enabled).
-+        #
-+        # Indexes defaults to default_index_dn
-+        indexes = Indexes(ds_instance)
-+        if ds_instance.ds_paths.rust_enabled:
-+            indexes.create(properties={
-+                'cn': 'entryUUID',
-+                'nsSystemIndex': 'false',
-+                'nsIndexType': ['eq', 'pres'],
-+            })
-+
-         # Create the backends as listed
-         # Load example data if needed.
-         for backend in backends:
-diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
-index 16899f6d3..2d88e60bd 100644
---- a/src/lib389/lib389/plugins.py
-+++ b/src/lib389/lib389/plugins.py
-@@ -2244,3 +2244,33 @@ class ContentSyncPlugin(Plugin):
- 
-     def __init__(self, instance, dn="cn=Content Synchronization,cn=plugins,cn=config"):
-         super(ContentSyncPlugin, self).__init__(instance, dn)
-+
-+
-+class EntryUUIDPlugin(Plugin):
-+    """The EntryUUID plugin configuration
-+    :param instance: An instance
-+    :type instance: lib389.DirSrv
-+    :param dn: Entry DN
-+    :type dn: str
-+    """
-+    def __init__(self, instance, dn="cn=entryuuid,cn=plugins,cn=config"):
-+        super(EntryUUIDPlugin, self).__init__(instance, dn)
-+
-+    def fixup(self, basedn, _filter=None):
-+        """Create an entryuuid fixup task
-+
-+        :param basedn: Basedn to fix up
-+        :type basedn: str
-+        :param _filter: a filter for entries to fix up
-+        :type _filter: str
-+
-+        :returns: an instance of Task(DSLdapObject)
-+        """
-+
-+        task = tasks.EntryUUIDFixupTask(self._instance)
-+        task_properties = {'basedn': basedn}
-+        if _filter is not None:
-+            task_properties['filter'] = _filter
-+        task.create(properties=task_properties)
-+
-+        return task
-diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
-index b19e7918d..590c6ee79 100644
---- a/src/lib389/lib389/tasks.py
-+++ b/src/lib389/lib389/tasks.py
-@@ -203,6 +203,20 @@ class USNTombstoneCleanupTask(Task):
-         return super(USNTombstoneCleanupTask, self)._validate(rdn, properties, basedn)
- 
- 
-+class EntryUUIDFixupTask(Task):
-+    """A single instance of memberOf task entry
-+
-+    :param instance: An instance
-+    :type instance: lib389.DirSrv
-+    """
-+
-+    def __init__(self, instance, dn=None):
-+        self.cn = 'entryuuid_fixup_' + Task._get_task_date()
-+        dn = "cn=" + self.cn + "," + DN_EUUID_TASK
-+        super(EntryUUIDFixupTask, self).__init__(instance, dn)
-+        self._must_attributes.extend(['basedn'])
-+
-+
- class SchemaReloadTask(Task):
-     """A single instance of schema reload task entry
- 
-diff --git a/src/librnsslapd/build.rs b/src/librnsslapd/build.rs
-index 9b953b246..13f6d2e03 100644
---- a/src/librnsslapd/build.rs
-+++ b/src/librnsslapd/build.rs
-@@ -3,13 +3,14 @@ extern crate cbindgen;
- use std::env;
- 
- fn main() {
--    let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
--    let out_dir = env::var("SLAPD_HEADER_DIR").unwrap();
--
--    cbindgen::Builder::new()
--        .with_language(cbindgen::Language::C)
--        .with_crate(crate_dir)
--        .generate()
--        .expect("Unable to generate bindings")
--        .write_to_file(format!("{}/rust-nsslapd-private.h", out_dir));
-+    if let Ok(crate_dir) = env::var("CARGO_MANIFEST_DIR") {
-+        if let Ok(out_dir) = env::var("SLAPD_HEADER_DIR") {
-+            cbindgen::Builder::new()
-+                .with_language(cbindgen::Language::C)
-+                .with_crate(crate_dir)
-+                .generate()
-+                .expect("Unable to generate bindings")
-+                .write_to_file(format!("{}/rust-nsslapd-private.h", out_dir));
-+        }
-+    }
- }
-diff --git a/src/librnsslapd/src/lib.rs b/src/librnsslapd/src/lib.rs
-index c5fd2bbaf..dffe4ce1c 100644
---- a/src/librnsslapd/src/lib.rs
-+++ b/src/librnsslapd/src/lib.rs
-@@ -4,9 +4,9 @@
- // Remember this is just a c-bindgen stub, all logic should come from slapd!
- 
- extern crate libc;
--use slapd;
- use libc::c_char;
--use std::ffi::{CString, CStr};
-+use slapd;
-+use std::ffi::{CStr, CString};
- 
- #[no_mangle]
- pub extern "C" fn do_nothing_again_rust() -> usize {
-@@ -29,9 +29,7 @@ pub extern "C" fn fernet_generate_token(dn: *const c_char, raw_key: *const c_cha
-                     // We have to move string memory ownership by copying so the system
-                     // allocator has it.
-                     let raw = tok.into_raw();
--                    let dup_tok = unsafe {
--                        libc::strdup(raw)
--                    };
-+                    let dup_tok = unsafe { libc::strdup(raw) };
-                     unsafe {
-                         CString::from_raw(raw);
-                     };
-@@ -45,7 +43,12 @@ pub extern "C" fn fernet_generate_token(dn: *const c_char, raw_key: *const c_cha
- }
- 
- #[no_mangle]
--pub extern "C" fn fernet_verify_token(dn: *const c_char, token: *const c_char, raw_key: *const c_char, ttl: u64) -> bool {
-+pub extern "C" fn fernet_verify_token(
-+    dn: *const c_char,
-+    token: *const c_char,
-+    raw_key: *const c_char,
-+    ttl: u64,
-+) -> bool {
-     if dn.is_null() || raw_key.is_null() || token.is_null() {
-         return false;
-     }
-@@ -67,4 +70,3 @@ pub extern "C" fn fernet_verify_token(dn: *const c_char, token: *const c_char, r
-         Err(_) => false,
-     }
- }
--
-diff --git a/src/librslapd/Cargo.toml b/src/librslapd/Cargo.toml
-index 1dd715ed2..08309c224 100644
---- a/src/librslapd/Cargo.toml
-+++ b/src/librslapd/Cargo.toml
-@@ -12,10 +12,6 @@ path = "src/lib.rs"
- name = "rslapd"
- crate-type = ["staticlib", "lib"]
- 
--# [profile.release]
--# panic = "abort"
--# lto = true
--
- [dependencies]
- slapd = { path = "../slapd" }
- libc = "0.2"
-diff --git a/src/librslapd/build.rs b/src/librslapd/build.rs
-index 4d4c1ce42..84aff156b 100644
---- a/src/librslapd/build.rs
-+++ b/src/librslapd/build.rs
-@@ -3,13 +3,14 @@ extern crate cbindgen;
- use std::env;
- 
- fn main() {
--    let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
--    let out_dir = env::var("SLAPD_HEADER_DIR").unwrap();
--
--    cbindgen::Builder::new()
--        .with_language(cbindgen::Language::C)
--        .with_crate(crate_dir)
--        .generate()
--        .expect("Unable to generate bindings")
--        .write_to_file(format!("{}/rust-slapi-private.h", out_dir));
-+    if let Ok(crate_dir) = env::var("CARGO_MANIFEST_DIR") {
-+        if let Ok(out_dir) = env::var("SLAPD_HEADER_DIR") {
-+            cbindgen::Builder::new()
-+                .with_language(cbindgen::Language::C)
-+                .with_crate(crate_dir)
-+                .generate()
-+                .expect("Unable to generate bindings")
-+                .write_to_file(format!("{}/rust-slapi-private.h", out_dir));
-+        }
-+    }
- }
-diff --git a/src/librslapd/src/lib.rs b/src/librslapd/src/lib.rs
-index 9cce193a0..cf283a7ce 100644
---- a/src/librslapd/src/lib.rs
-+++ b/src/librslapd/src/lib.rs
-@@ -8,7 +8,7 @@ extern crate libc;
- use slapd;
- 
- use libc::c_char;
--use std::ffi::{CString, CStr};
-+use std::ffi::{CStr, CString};
- 
- #[no_mangle]
- pub extern "C" fn do_nothing_rust() -> usize {
-@@ -18,9 +18,7 @@ pub extern "C" fn do_nothing_rust() -> usize {
- #[no_mangle]
- pub extern "C" fn rust_free_string(s: *mut c_char) {
-     if !s.is_null() {
--        let _ = unsafe {
--            CString::from_raw(s)
--        };
-+        let _ = unsafe { CString::from_raw(s) };
-     }
- }
- 
-@@ -35,9 +33,7 @@ pub extern "C" fn fernet_generate_new_key() -> *mut c_char {
-     match res_key {
-         Ok(key) => {
-             let raw = key.into_raw();
--            let dup_key = unsafe {
--                libc::strdup(raw)
--            };
-+            let dup_key = unsafe { libc::strdup(raw) };
-             rust_free_string(raw);
-             dup_key
-         }
-@@ -53,4 +49,3 @@ pub extern "C" fn fernet_validate_key(raw_key: *const c_char) -> bool {
-         Err(_) => false,
-     }
- }
--
-diff --git a/src/libsds/sds/lib.rs b/src/libsds/sds/lib.rs
-index aa70c7a8e..9e2973222 100644
---- a/src/libsds/sds/lib.rs
-+++ b/src/libsds/sds/lib.rs
-@@ -28,5 +28,3 @@ pub enum sds_result {
-     /// The list is exhausted, no more elements can be returned.
-     ListExhausted = 16,
- }
--
--
-diff --git a/src/libsds/sds/tqueue.rs b/src/libsds/sds/tqueue.rs
-index b7042e514..ebe1f4b6c 100644
---- a/src/libsds/sds/tqueue.rs
-+++ b/src/libsds/sds/tqueue.rs
-@@ -9,8 +9,8 @@
- #![warn(missing_docs)]
- 
- use super::sds_result;
--use std::sync::Mutex;
- use std::collections::LinkedList;
-+use std::sync::Mutex;
- 
- // Borrow from libc
- #[doc(hidden)]
-@@ -75,7 +75,10 @@ impl Drop for TQueue {
- /// C compatible wrapper around the TQueue. Given a valid point, a TQueue pointer
- /// is allocated on the heap and referenced in retq. free_fn_ptr may be NULL
- /// but if it references a function, this will be called during drop of the TQueue.
--pub extern fn sds_tqueue_init(retq: *mut *mut TQueue, free_fn_ptr: Option<extern "C" fn(*const c_void)>) -> sds_result {
-+pub extern "C" fn sds_tqueue_init(
-+    retq: *mut *mut TQueue,
-+    free_fn_ptr: Option<extern "C" fn(*const c_void)>,
-+) -> sds_result {
-     // This piece of type signature magic is because in rust types that extern C,
-     // with option has None resolve to null. What this causes is we can wrap
-     // our fn ptr with Option in rust, but the C side gives us fn ptr or NULL, and
-@@ -93,7 +96,7 @@ pub extern fn sds_tqueue_init(retq: *mut *mut TQueue, free_fn_ptr: Option<extern
- 
- #[no_mangle]
- /// Push an element to the tail of the queue. The element may be NULL
--pub extern fn sds_tqueue_enqueue(q: *const TQueue, elem: *const c_void) -> sds_result {
-+pub extern "C" fn sds_tqueue_enqueue(q: *const TQueue, elem: *const c_void) -> sds_result {
-     // Check for null ....
-     unsafe { (*q).enqueue(elem) };
-     sds_result::Success
-@@ -103,29 +106,27 @@ pub extern fn sds_tqueue_enqueue(q: *const TQueue, elem: *const c_void) -> sds_r
- /// Dequeue from the head of the queue. The result will be placed into elem.
- /// if elem is NULL no dequeue is attempted. If there are no more items
- /// ListExhausted is returned.
--pub extern fn sds_tqueue_dequeue(q: *const TQueue, elem: *mut *const c_void) -> sds_result {
-+pub extern "C" fn sds_tqueue_dequeue(q: *const TQueue, elem: *mut *const c_void) -> sds_result {
-     if elem.is_null() {
-         return sds_result::NullPointer;
-     }
-     match unsafe { (*q).dequeue() } {
-         Some(e) => {
--            unsafe { *elem = e; };
-+            unsafe {
-+                *elem = e;
-+            };
-             sds_result::Success
-         }
--        None => {
--            sds_result::ListExhausted
--        }
-+        None => sds_result::ListExhausted,
-     }
- }
- 
- #[no_mangle]
- /// Free the queue and all remaining elements. After this point it is
- /// not safe to access the queue.
--pub extern fn sds_tqueue_destroy(q: *mut TQueue) -> sds_result {
-+pub extern "C" fn sds_tqueue_destroy(q: *mut TQueue) -> sds_result {
-     // This will drop the queue and free it's content
-     // mem::drop(q);
-     let _q = unsafe { Box::from_raw(q) };
-     sds_result::Success
- }
--
--
-diff --git a/src/plugins/entryuuid/Cargo.toml b/src/plugins/entryuuid/Cargo.toml
-new file mode 100644
-index 000000000..c43d7a771
---- /dev/null
-+++ b/src/plugins/entryuuid/Cargo.toml
-@@ -0,0 +1,21 @@
-+[package]
-+name = "entryuuid"
-+version = "0.1.0"
-+authors = ["William Brown <william@blackhats.net.au>"]
-+edition = "2018"
-+
-+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
-+
-+[lib]
-+path = "src/lib.rs"
-+name = "entryuuid"
-+crate-type = ["staticlib", "lib"]
-+
-+[dependencies]
-+libc = "0.2"
-+paste = "0.1"
-+slapi_r_plugin = { path="../../slapi_r_plugin" }
-+uuid = { version = "0.8", features = [ "v4" ] }
-+
-+[build-dependencies]
-+cc = { version = "1.0", features = ["parallel"] }
-diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
-new file mode 100644
-index 000000000..6b5e8d1bb
---- /dev/null
-+++ b/src/plugins/entryuuid/src/lib.rs
-@@ -0,0 +1,196 @@
-+#[macro_use]
-+extern crate slapi_r_plugin;
-+use slapi_r_plugin::prelude::*;
-+use std::convert::{TryFrom, TryInto};
-+use std::os::raw::c_char;
-+use uuid::Uuid;
-+
-+#[derive(Debug)]
-+struct FixupData {
-+    basedn: Sdn,
-+    raw_filter: String,
-+}
-+
-+struct EntryUuid;
-+/*
-+ *                    /---- plugin ident
-+ *                    |          /---- Struct name.
-+ *                    V          V
-+ */
-+slapi_r_plugin_hooks!(entryuuid, EntryUuid);
-+
-+/*
-+ *                             /---- plugin ident
-+ *                             |          /---- cb ident
-+ *                             |          |                   /---- map function
-+ *                             V          V                   V
-+ */
-+slapi_r_search_callback_mapfn!(entryuuid, entryuuid_fixup_cb, entryuuid_fixup_mapfn);
-+
-+fn assign_uuid(e: &mut EntryRef) {
-+    let sdn = e.get_sdnref();
-+
-+    // We could consider making these lazy static.
-+    let config_sdn = Sdn::try_from("cn=config").expect("Invalid static dn");
-+    let schema_sdn = Sdn::try_from("cn=schema").expect("Invalid static dn");
-+
-+    if sdn.is_below_suffix(&*config_sdn) || sdn.is_below_suffix(&*schema_sdn) {
-+        // We don't need to assign to these suffixes.
-+        log_error!(
-+            ErrorLevel::Trace,
-+            "assign_uuid -> not assigning to {:?} as part of system suffix",
-+            sdn.to_dn_string()
-+        );
-+        return;
-+    }
-+
-+    // Generate a new Uuid.
-+    let u: Uuid = Uuid::new_v4();
-+    log_error!(
-+        ErrorLevel::Trace,
-+        "assign_uuid -> assigning {:?} to dn {}",
-+        u,
-+        sdn.to_dn_string()
-+    );
-+
-+    let uuid_value = Value::from(&u);
-+
-+    // Add it to the entry
-+    e.add_value("entryUUID", &uuid_value);
-+}
-+
-+impl SlapiPlugin3 for EntryUuid {
-+    // Indicate we have pre add
-+    fn has_betxn_pre_add() -> bool {
-+        true
-+    }
-+
-+    fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> {
-+        log_error!(ErrorLevel::Trace, "betxn_pre_add");
-+
-+        let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?;
-+        assign_uuid(&mut e);
-+
-+        Ok(())
-+    }
-+
-+    fn has_task_handler() -> Option<&'static str> {
-+        Some("entryuuid task")
-+    }
-+
-+    type TaskData = FixupData;
-+
-+    fn task_validate(e: &EntryRef) -> Result<Self::TaskData, LDAPError> {
-+        // Does the entry have what we need?
-+        let basedn: Sdn = match e.get_attr("basedn") {
-+            Some(values) => values
-+                .first()
-+                .ok_or_else(|| {
-+                    log_error!(
-+                        ErrorLevel::Trace,
-+                        "task_validate basedn error -> empty value array?"
-+                    );
-+                    LDAPError::Operation
-+                })?
-+                .as_ref()
-+                .try_into()
-+                .map_err(|e| {
-+                    log_error!(ErrorLevel::Trace, "task_validate basedn error -> {:?}", e);
-+                    LDAPError::Operation
-+                })?,
-+            None => return Err(LDAPError::ObjectClassViolation),
-+        };
-+
-+        let raw_filter: String = match e.get_attr("filter") {
-+            Some(values) => values
-+                .first()
-+                .ok_or_else(|| {
-+                    log_error!(
-+                        ErrorLevel::Trace,
-+                        "task_validate filter error -> empty value array?"
-+                    );
-+                    LDAPError::Operation
-+                })?
-+                .as_ref()
-+                .try_into()
-+                .map_err(|e| {
-+                    log_error!(ErrorLevel::Trace, "task_validate filter error -> {:?}", e);
-+                    LDAPError::Operation
-+                })?,
-+            None => {
-+                // Give a default filter.
-+                "(objectClass=*)".to_string()
-+            }
-+        };
-+
-+        // Error if the first filter is empty?
-+
-+        // Now, to make things faster, we wrap the filter in a exclude term.
-+        let raw_filter = format!("(&{}(!(entryuuid=*)))", raw_filter);
-+
-+        Ok(FixupData { basedn, raw_filter })
-+    }
-+
-+    fn task_be_dn_hint(data: &Self::TaskData) -> Option<Sdn> {
-+        Some(data.basedn.clone())
-+    }
-+
-+    fn task_handler(_task: &Task, data: Self::TaskData) -> Result<Self::TaskData, PluginError> {
-+        log_error!(
-+            ErrorLevel::Trace,
-+            "task_handler -> start thread with -> {:?}",
-+            data
-+        );
-+
-+        let search = Search::new_map_entry(
-+            &(*data.basedn),
-+            SearchScope::Subtree,
-+            &data.raw_filter,
-+            plugin_id(),
-+            &(),
-+            entryuuid_fixup_cb,
-+        )
-+        .map_err(|e| {
-+            log_error!(
-+                ErrorLevel::Error,
-+                "task_handler -> Unable to construct search -> {:?}",
-+                e
-+            );
-+            e
-+        })?;
-+
-+        match search.execute() {
-+            Ok(_) => {
-+                log_error!(ErrorLevel::Info, "task_handler -> fixup complete, success!");
-+                Ok(data)
-+            }
-+            Err(e) => {
-+                // log, and return
-+                log_error!(
-+                    ErrorLevel::Error,
-+                    "task_handler -> fixup complete, failed -> {:?}",
-+                    e
-+                );
-+                Err(PluginError::GenericFailure)
-+            }
-+        }
-+    }
-+
-+    fn start(_pb: &mut PblockRef) -> Result<(), PluginError> {
-+        log_error!(ErrorLevel::Trace, "plugin start");
-+        Ok(())
-+    }
-+
-+    fn close(_pb: &mut PblockRef) -> Result<(), PluginError> {
-+        log_error!(ErrorLevel::Trace, "plugin close");
-+        Ok(())
-+    }
-+}
-+
-+pub fn entryuuid_fixup_mapfn(mut e: EntryRef, _data: &()) -> Result<(), PluginError> {
-+    assign_uuid(&mut e);
-+    Ok(())
-+}
-+
-+#[cfg(test)]
-+mod tests {}
-diff --git a/src/plugins/entryuuid_syntax/Cargo.toml b/src/plugins/entryuuid_syntax/Cargo.toml
-new file mode 100644
-index 000000000..f7d3d64c9
---- /dev/null
-+++ b/src/plugins/entryuuid_syntax/Cargo.toml
-@@ -0,0 +1,21 @@
-+[package]
-+name = "entryuuid_syntax"
-+version = "0.1.0"
-+authors = ["William Brown <william@blackhats.net.au>"]
-+edition = "2018"
-+
-+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
-+
-+[lib]
-+path = "src/lib.rs"
-+name = "entryuuid_syntax"
-+crate-type = ["staticlib", "lib"]
-+
-+[dependencies]
-+libc = "0.2"
-+paste = "0.1"
-+slapi_r_plugin = { path="../../slapi_r_plugin" }
-+uuid = { version = "0.8", features = [ "v4" ] }
-+
-+[build-dependencies]
-+cc = { version = "1.0", features = ["parallel"] }
-diff --git a/src/plugins/entryuuid_syntax/src/lib.rs b/src/plugins/entryuuid_syntax/src/lib.rs
-new file mode 100644
-index 000000000..0a4b89f16
---- /dev/null
-+++ b/src/plugins/entryuuid_syntax/src/lib.rs
-@@ -0,0 +1,145 @@
-+#[macro_use]
-+extern crate slapi_r_plugin;
-+use slapi_r_plugin::prelude::*;
-+use std::cmp::Ordering;
-+use std::convert::TryInto;
-+use uuid::Uuid;
-+
-+struct EntryUuidSyntax;
-+
-+// https://tools.ietf.org/html/rfc4530
-+
-+slapi_r_syntax_plugin_hooks!(entryuuid_syntax, EntryUuidSyntax);
-+
-+impl SlapiSyntaxPlugin1 for EntryUuidSyntax {
-+    fn attr_oid() -> &'static str {
-+        "1.3.6.1.1.16.1"
-+    }
-+
-+    fn attr_compat_oids() -> Vec<&'static str> {
-+        Vec::new()
-+    }
-+
-+    fn attr_supported_names() -> Vec<&'static str> {
-+        vec!["1.3.6.1.1.16.1", "UUID"]
-+    }
-+
-+    fn syntax_validate(bval: &BerValRef) -> Result<(), PluginError> {
-+        let r: Result<Uuid, PluginError> = bval.try_into();
-+        r.map(|_| ())
-+    }
-+
-+    fn eq_mr_oid() -> &'static str {
-+        "1.3.6.1.1.16.2"
-+    }
-+
-+    fn eq_mr_name() -> &'static str {
-+        "UUIDMatch"
-+    }
-+
-+    fn eq_mr_desc() -> &'static str {
-+        "UUIDMatch matching rule."
-+    }
-+
-+    fn eq_mr_supported_names() -> Vec<&'static str> {
-+        vec!["1.3.6.1.1.16.2", "uuidMatch", "UUIDMatch"]
-+    }
-+
-+    fn filter_ava_eq(
-+        _pb: &mut PblockRef,
-+        bval_filter: &BerValRef,
-+        vals: &ValueArrayRef,
-+    ) -> Result<bool, PluginError> {
-+        let u = match bval_filter.try_into() {
-+            Ok(u) => u,
-+            Err(_e) => return Ok(false),
-+        };
-+
-+        let r = vals.iter().fold(false, |acc, va| {
-+            if acc {
-+                acc
-+            } else {
-+                // is u in va?
-+                log_error!(ErrorLevel::Trace, "filter_ava_eq debug -> {:?}", va);
-+                let res: Result<Uuid, PluginError> = (&*va).try_into();
-+                match res {
-+                    Ok(vu) => vu == u,
-+                    Err(_) => acc,
-+                }
-+            }
-+        });
-+        log_error!(ErrorLevel::Trace, "filter_ava_eq result -> {:?}", r);
-+        Ok(r)
-+    }
-+
-+    fn eq_mr_filter_values2keys(
-+        _pb: &mut PblockRef,
-+        vals: &ValueArrayRef,
-+    ) -> Result<ValueArray, PluginError> {
-+        vals.iter()
-+            .map(|va| {
-+                let u: Uuid = (&*va).try_into()?;
-+                Ok(Value::from(&u))
-+            })
-+            .collect()
-+    }
-+}
-+
-+impl SlapiSubMr for EntryUuidSyntax {}
-+
-+impl SlapiOrdMr for EntryUuidSyntax {
-+    fn ord_mr_oid() -> Option<&'static str> {
-+        Some("1.3.6.1.1.16.3")
-+    }
-+
-+    fn ord_mr_name() -> &'static str {
-+        "UUIDOrderingMatch"
-+    }
-+
-+    fn ord_mr_desc() -> &'static str {
-+        "UUIDMatch matching rule."
-+    }
-+
-+    fn ord_mr_supported_names() -> Vec<&'static str> {
-+        vec!["1.3.6.1.1.16.3", "uuidOrderingMatch", "UUIDOrderingMatch"]
-+    }
-+
-+    fn filter_ava_ord(
-+        _pb: &mut PblockRef,
-+        bval_filter: &BerValRef,
-+        vals: &ValueArrayRef,
-+    ) -> Result<Option<Ordering>, PluginError> {
-+        let u: Uuid = match bval_filter.try_into() {
-+            Ok(u) => u,
-+            Err(_e) => return Ok(None),
-+        };
-+
-+        let r = vals.iter().fold(None, |acc, va| {
-+            if acc.is_some() {
-+                acc
-+            } else {
-+                // is u in va?
-+                log_error!(ErrorLevel::Trace, "filter_ava_ord debug -> {:?}", va);
-+                let res: Result<Uuid, PluginError> = (&*va).try_into();
-+                match res {
-+                    Ok(vu) => {
-+                        // 1.partial_cmp(2) => ordering::less
-+                        vu.partial_cmp(&u)
-+                    }
-+                    Err(_) => acc,
-+                }
-+            }
-+        });
-+        log_error!(ErrorLevel::Trace, "filter_ava_ord result -> {:?}", r);
-+        Ok(r)
-+    }
-+
-+    fn filter_compare(a: &BerValRef, b: &BerValRef) -> Ordering {
-+        let ua: Uuid = a.try_into().expect("An invalid value a was given!");
-+        let ub: Uuid = b.try_into().expect("An invalid value b was given!");
-+        ua.cmp(&ub)
-+    }
-+}
-+
-+#[cfg(test)]
-+mod tests {}
-diff --git a/src/slapd/src/error.rs b/src/slapd/src/error.rs
-index 06ddb27b4..6f4d782ee 100644
---- a/src/slapd/src/error.rs
-+++ b/src/slapd/src/error.rs
-@@ -1,8 +1,6 @@
--
- pub enum SlapdError {
-     // This occurs when a string contains an inner null byte
-     // that cstring can't handle.
-     CStringInvalidError,
-     FernetInvalidKey,
- }
--
-diff --git a/src/slapd/src/fernet.rs b/src/slapd/src/fernet.rs
-index fcbd873f8..1a3251fd9 100644
---- a/src/slapd/src/fernet.rs
-+++ b/src/slapd/src/fernet.rs
-@@ -1,39 +1,30 @@
- // Routines for managing fernet encryption
- 
--use std::ffi::{CString, CStr};
--use fernet::Fernet;
- use crate::error::SlapdError;
-+use fernet::Fernet;
-+use std::ffi::{CStr, CString};
- 
- pub fn generate_new_key() -> Result<CString, SlapdError> {
-     let k = Fernet::generate_key();
--    CString::new(k)
--        .map_err(|_| {
--            SlapdError::CStringInvalidError
--        })
-+    CString::new(k).map_err(|_| SlapdError::CStringInvalidError)
- }
- 
- pub fn new(c_str_key: &CStr) -> Result<Fernet, SlapdError> {
--    let str_key = c_str_key.to_str()
-+    let str_key = c_str_key
-+        .to_str()
-         .map_err(|_| SlapdError::CStringInvalidError)?;
--    Fernet::new(str_key)
--        .ok_or(SlapdError::FernetInvalidKey)
-+    Fernet::new(str_key).ok_or(SlapdError::FernetInvalidKey)
- }
- 
- pub fn encrypt(fernet: &Fernet, dn: &CStr) -> Result<CString, SlapdError> {
-     let tok = fernet.encrypt(dn.to_bytes());
--    CString::new(tok)
--        .map_err(|_| {
--            SlapdError::CStringInvalidError
--        })
-+    CString::new(tok).map_err(|_| SlapdError::CStringInvalidError)
- }
- 
- pub fn decrypt(fernet: &Fernet, tok: &CStr, ttl: u64) -> Result<CString, SlapdError> {
--    let s = tok.to_str()
--        .map_err(|_| SlapdError::CStringInvalidError)?;
--    let r: Vec<u8> = fernet.decrypt_with_ttl(s, ttl)
-+    let s = tok.to_str().map_err(|_| SlapdError::CStringInvalidError)?;
-+    let r: Vec<u8> = fernet
-+        .decrypt_with_ttl(s, ttl)
-         .map_err(|_| SlapdError::FernetInvalidKey)?;
--    CString::new(r)
--        .map_err(|_| SlapdError::CStringInvalidError)
-+    CString::new(r).map_err(|_| SlapdError::CStringInvalidError)
- }
--
--
-diff --git a/src/slapd/src/lib.rs b/src/slapd/src/lib.rs
-index 5b1f20368..79f1600c2 100644
---- a/src/slapd/src/lib.rs
-+++ b/src/slapd/src/lib.rs
-@@ -1,5 +1,2 @@
--
- pub mod error;
- pub mod fernet;
--
--
-diff --git a/src/slapi_r_plugin/Cargo.toml b/src/slapi_r_plugin/Cargo.toml
-new file mode 100644
-index 000000000..c7958671a
---- /dev/null
-+++ b/src/slapi_r_plugin/Cargo.toml
-@@ -0,0 +1,19 @@
-+[package]
-+name = "slapi_r_plugin"
-+version = "0.1.0"
-+authors = ["William Brown <william@blackhats.net.au>"]
-+edition = "2018"
-+build = "build.rs"
-+
-+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
-+
-+[lib]
-+path = "src/lib.rs"
-+name = "slapi_r_plugin"
-+crate-type = ["staticlib", "lib"]
-+
-+[dependencies]
-+libc = "0.2"
-+paste = "0.1"
-+lazy_static = "1.4"
-+uuid = { version = "0.8", features = [ "v4" ] }
-diff --git a/src/slapi_r_plugin/README.md b/src/slapi_r_plugin/README.md
-new file mode 100644
-index 000000000..af9743ec9
---- /dev/null
-+++ b/src/slapi_r_plugin/README.md
-@@ -0,0 +1,216 @@
-+
-+# Slapi R(ust) Plugin Bindings
-+
-+If you are here, you are probably interested in the Rust bindings that allow plugins to be written
-+in Rust for the 389 Directory Server project. If you are, you should use `cargo doc --workspace --no-deps`
-+in `src`, as this contains the material you want for implementing safe plugins.
-+
-+This readme is intended for developers of the bindings that enable those plugins to work.
-+
-+As such it likely requires that you have an understanding both of C and
-+the [Rust Nomicon](https://doc.rust-lang.org/nomicon/index.html)
-+
-+> **WARNING** This place is not a place of honor ... no highly esteemed deed is commemorated here
-+> ... nothing valued is here. What is here is dangerous and repulsive to us. This message is a
-+> warning about danger.
-+
-+This document will not detail the specifics of unsafe or the invariants you must adhere to for rust
-+to work with C.
-+
-+If you still want to see more about the plugin bindings, go on ...
-+
-+## The Challenge
-+
-+Rust is a memory safe language - that means you may not dereference pointers or alter or interact
-+with uninitialised memory. There are whole classes of problems that this resolves, but it means
-+that Rust is opiniated about how it interacts with memory.
-+
-+C is an unsafe language - there are undefined behaviours all through out the specification, memory
-+can be interacted with without bounds which leads to many kinds of issues ranging from crashes,
-+silent data corruption, to code execution and explotation.
-+
-+While it would be nice to rewrite everything from C to Rust, this is a large task - instead we need
-+a way to allow Rust and C to interact.
-+
-+## The Goal
-+
-+To be able to define, a pure Rust, 100% safe (in rust terms) plugin for 389 Directory Server that
-+can perform useful tasks.
-+
-+## The 389 Directory Server Plugin API
-+
-+The 389-ds plugin system works by reading an ldap entry from cn=config, that directs to a shared
-+library. That shared library path is dlopened and an init symbol read and activated. At that
-+point the plugin is able to call-back into 389-ds to provide registration of function handlers for
-+various tasks that the plugin may wish to perform at defined points in a operations execution.
-+
-+During the execution of a plugin callback, the context of the environment is passed through a
-+parameter block (pblock). This pblock has a set of apis for accessing it's content, which may
-+or may not be defined based on the execution state of the server.
-+
-+Common plugin tasks involve the transformation of entries during write operation paths to provide
-+extra attributes to the entry or generation of other entries. Values in entries are represented by
-+internal structures that may or may not have sorting of content.
-+
-+Already at this point it can be seen there is a lot of surface area to access. For clarity in
-+our trivial example here we have required:
-+
-+* Pblock
-+* Entry
-+* ValueSet
-+* Value
-+* Sdn
-+* Result Codes
-+
-+We need to be able to interact with all of these - and more - to make useful plugins.
-+
-+## Structure of the Rust Plugin bindings.
-+
-+As a result, there are a number of items we must be able to implement:
-+
-+* Creation of the plugin function callback points
-+* Transformation of C pointer types into Rust structures that can be interacted with.
-+* Ability to have Rust interact with structures to achieve side effects in the C server
-+* Mapping of errors that C can understand
-+* Make all of it safe.
-+
-+In order to design this, it's useful to see what a plugin from Rust should look like - by designing
-+what the plugin should look like, we make the bindings that are preferable and ergonomic to rust
-+rather than compromising on quality and developer experience.
-+
-+Here is a minimal example of a plugin - it may not compile or be complete, it serves as an
-+example.
-+
-+```
-+#[macro_use]
-+extern crate slapi_r_plugin;
-+use slapi_r_plugin::prelude::*;
-+
-+struct NewPlugin;
-+
-+slapi_r_plugin_hooks!(plugin_name, NewPlugin);
-+
-+impl SlapiPlugin3 for NewPlugin {
-+    fn start(_pb: &mut PblockRef) -> Result<(), PluginError> {
-+        log_error!(ErrorLevel::Trace, "plugin start");
-+        Ok(())
-+    }
-+
-+    fn close(_pb: &mut PblockRef) -> Result<(), PluginError> {
-+        log_error!(ErrorLevel::Trace, "plugin close");
-+        Ok(())
-+    }
-+
-+    fn has_betxn_pre_add() -> bool {
-+        true
-+    }
-+
-+    fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> {
-+        let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?;
-+        let sdn = e.get_sdnref();
-+
-+        log_error!(ErrorLevel::Trace, "betxn_pre_add -> {:?}", sdn);
-+        Ok(())
-+    }
-+}
-+```
-+
-+Important details - there is no unsafe, we use rust native error handling and functions, there
-+is no indication of memory management, we are defined by a trait, error logging uses native
-+formatting. There are probably other details too - I'll leave it as an exercise for the reader
-+to play Where's Wally and find them all.
-+
-+With the end goal in mind, we can begin to look at the construction of the plugin system, and
-+the design choices that were made.
-+
-+## The Plugin Trait
-+
-+A significant choice was the use of a trait to define the possible plugin function operations
-+for rust implementors. This allows the compiler to guarantee that a plugin *will* have all
-+associated functions.
-+
-+> Traits are synonomous with java interfaces, defining methods you "promise" to implement, unlike
-+> object orientation with a class hierarchy.
-+
-+Now, you may notice that not all members of the trait are implemented. This is due to a feature
-+of rust known as default trait impls. This allows the trait origin (src/plugin.rs) to provide
-+template versions of these functions. If you "overwrite" them, your implementation is used. Unlike
-+OO, you may not inherit or call the default function. 
-+
-+If a default is not provided you *must* implement that function to be considered valid. Today (20200422)
-+this only applies to `start` and `close`.
-+
-+The default implementations all return "false" to the presence of callbacks, and if they are used,
-+they will always return an error.
-+
-+## Interface generation
-+
-+While it is nice to have this Rust interface for plugins, C is unable to call it (Rust uses a different
-+stack calling syntax to C, as well as symbol mangaling). To expose these, we must provide `extern C`
-+functions, where any function that requires a static symbol must be marked as no_mangle.
-+
-+Rather than ask all plugin authors to do this, we can use the rust macro system to generate these
-+interfaces at compile time. This is the reason for this line:
-+
-+```
-+slapi_r_plugin_hooks!(plugin_name, NewPlugin);
-+```
-+
-+This macro is defined in src/macros.rs, and is "the bridge" from C to Rust. Given a plugin name
-+and a struct of the trait SlapiPlugin3, this macro is able to generate all needed C compatible
-+functions. Based on the calls to `has_<op_type>`, the generated functions are registered to the pblock
-+that is provided.
-+
-+When a call back triggers, the function landing point is called. This then wraps all the pointer
-+types from C into Rust structs, and then dispatches to the struct instance.
-+
-+When the struct function returns, the result is unpacked and turned into C compatible result codes -
-+in some cases, the result codes are sanitised due to quirks in the C ds api - `[<$mod_ident _plugin_mr_filter_ava>]`
-+is an excellent example of this, where Rust returns are `true`/`false`, which would normally
-+be FFI safe to convert to 1/0 respectively, but 389-ds expects the inverse in this case, where
-+0 is true and all other values are false. To present a sane api to rust, the macro layer does this
-+(mind bending) transformation for us.
-+
-+## C Ptr Wrapper types
-+
-+This is likely the major, and important detail of the plugin api. By wrapping these C ptrs with
-+Rust types, we can create types that perform as rust expects, and adheres to the invariants required,
-+while providing safe - and useful interfaces to users.
-+
-+It's important to understand how Rust manages memory both on the stack and the heap - Please see
-+[the Rust Book](https://doc.rust-lang.org/book/ch04-00-understanding-ownership.html) for more.
-+
-+As a result, this means that we must express in code, assertions about the proper ownership of memory
-+and who is responsible for it (unlike C, where it can be hard to determine who or what is responsible
-+for freeing some value.) Failure to handle this correctly, can and will lead to crashes, leaks or
-+*hand waving* magical failures that are eXtReMeLy FuN to debug.
-+
-+### Reference Types
-+
-+There are a number of types, such as `SdnRef`, which have a suffix of `*Ref`. These types represent
-+values whos content is owned by the C server - that is, it is the responsibility of 389-ds to free
-+the content of the Pointer once it has been used. A majority of values that are provided to the
-+function callback points fall into this class.
-+
-+### Owned Types
-+
-+These types contain a pointer from the C server, but it is the responsibility of the Rust library
-+to indicate when that pointer and it's content should be disposed of. This is generally handled
-+by the `drop` trait, which is executed ... well, when an item is dropped.
-+
-+### Dispatch from the wrapper to C
-+
-+When a rust function against a wrapper is called, the type internally accesses it Ref type and
-+uses the ptr to dispatch into the C server. Any required invariants are upheld, and results are
-+mapped as required to match what rust callers expect.
-+
-+As a result, this involves horrendous amounts of unsafe, and a detailed analysis of both the DS C
-+api, what it expects, and the Rust nomicon to ensure you maintain all the invariants.
-+
-+## Conclusion
-+
-+Providing a bridge between C and Rust is challenging - but achievable - the result is plugins that
-+are clean, safe, efficent.
-+
-+
-+
-diff --git a/src/slapi_r_plugin/build.rs b/src/slapi_r_plugin/build.rs
-new file mode 100644
-index 000000000..29bbd52d4
---- /dev/null
-+++ b/src/slapi_r_plugin/build.rs
-@@ -0,0 +1,8 @@
-+use std::env;
-+
-+fn main() {
-+    if let Ok(lib_dir) = env::var("SLAPD_DYLIB_DIR") {
-+        println!("cargo:rustc-link-lib=dylib=slapd");
-+        println!("cargo:rustc-link-search=native={}", lib_dir);
-+    }
-+}
-diff --git a/src/slapi_r_plugin/src/backend.rs b/src/slapi_r_plugin/src/backend.rs
-new file mode 100644
-index 000000000..f308295aa
---- /dev/null
-+++ b/src/slapi_r_plugin/src/backend.rs
-@@ -0,0 +1,71 @@
-+use crate::dn::SdnRef;
-+use crate::pblock::Pblock;
-+// use std::ops::Deref;
-+
-+extern "C" {
-+    fn slapi_back_transaction_begin(pb: *const libc::c_void) -> i32;
-+    fn slapi_back_transaction_commit(pb: *const libc::c_void);
-+    fn slapi_back_transaction_abort(pb: *const libc::c_void);
-+    fn slapi_be_select_exact(sdn: *const libc::c_void) -> *const libc::c_void;
-+}
-+
-+pub struct BackendRef {
-+    raw_be: *const libc::c_void,
-+}
-+
-+impl BackendRef {
-+    pub fn new(dn: &SdnRef) -> Result<Self, ()> {
-+        let raw_be = unsafe { slapi_be_select_exact(dn.as_ptr()) };
-+        if raw_be.is_null() {
-+            Err(())
-+        } else {
-+            Ok(BackendRef { raw_be })
-+        }
-+    }
-+
-+    pub(crate) fn as_ptr(&self) -> *const libc::c_void {
-+        self.raw_be
-+    }
-+
-+    pub fn begin_txn(self) -> Result<BackendRefTxn, ()> {
-+        let mut pb = Pblock::new();
-+        if pb.set_op_backend(&self) != 0 {
-+            return Err(());
-+        }
-+        let rc = unsafe { slapi_back_transaction_begin(pb.as_ptr()) };
-+        if rc != 0 {
-+            Err(())
-+        } else {
-+            Ok(BackendRefTxn {
-+                pb,
-+                be: self,
-+                committed: false,
-+            })
-+        }
-+    }
-+}
-+
-+pub struct BackendRefTxn {
-+    pb: Pblock,
-+    be: BackendRef,
-+    committed: bool,
-+}
-+
-+impl BackendRefTxn {
-+    pub fn commit(mut self) {
-+        self.committed = true;
-+        unsafe {
-+            slapi_back_transaction_commit(self.pb.as_ptr());
-+        }
-+    }
-+}
-+
-+impl Drop for BackendRefTxn {
-+    fn drop(&mut self) {
-+        if self.committed == false {
-+            unsafe {
-+                slapi_back_transaction_abort(self.pb.as_ptr());
-+            }
-+        }
-+    }
-+}
-diff --git a/src/slapi_r_plugin/src/ber.rs b/src/slapi_r_plugin/src/ber.rs
-new file mode 100644
-index 000000000..a501fd642
---- /dev/null
-+++ b/src/slapi_r_plugin/src/ber.rs
-@@ -0,0 +1,90 @@
-+use crate::log::{log_error, ErrorLevel};
-+use libc;
-+use std::ffi::CString;
-+// use std::ptr;
-+use std::slice;
-+
-+use std::convert::TryFrom;
-+use uuid::Uuid;
-+
-+use crate::error::PluginError;
-+
-+#[repr(C)]
-+pub(crate) struct ol_berval {
-+    pub len: usize,
-+    pub data: *const u8,
-+}
-+
-+#[derive(Debug)]
-+pub struct BerValRef {
-+    pub(crate) raw_berval: *const ol_berval,
-+}
-+
-+impl BerValRef {
-+    pub fn new(raw_berval: *const libc::c_void) -> Self {
-+        // so we retype this
-+        let raw_berval = raw_berval as *const ol_berval;
-+        BerValRef { raw_berval }
-+    }
-+
-+    pub(crate) fn into_cstring(&self) -> Option<CString> {
-+        // Cstring does not need a trailing null, so if we have one, ignore it.
-+        let l: usize = unsafe { (*self.raw_berval).len };
-+        let d_slice = unsafe { slice::from_raw_parts((*self.raw_berval).data, l) };
-+        CString::new(d_slice)
-+            .or_else(|e| {
-+                // Try it again, but with one byte less to trim a potential trailing null that
-+                // could have been allocated, and ensure it has at least 1 byte of good data
-+                // remaining.
-+                if l > 1 {
-+                    let d_slice = unsafe { slice::from_raw_parts((*self.raw_berval).data, l - 1) };
-+                    CString::new(d_slice)
-+                } else {
-+                    Err(e)
-+                }
-+            })
-+            .map_err(|_| {
-+                log_error!(
-+                    ErrorLevel::Trace,
-+                    "invalid ber parse attempt, may contain a null byte? -> {:?}",
-+                    self
-+                );
-+                ()
-+            })
-+            .ok()
-+    }
-+
-+    pub fn into_string(&self) -> Option<String> {
-+        // Convert a Some to a rust string.
-+        self.into_cstring().and_then(|v| {
-+            v.into_string()
-+                .map_err(|_| {
-+                    log_error!(
-+                        ErrorLevel::Trace,
-+                        "failed to convert cstring to string -> {:?}",
-+                        self
-+                    );
-+                    ()
-+                })
-+                .ok()
-+        })
-+    }
-+}
-+
-+impl TryFrom<&BerValRef> for Uuid {
-+    type Error = PluginError;
-+
-+    fn try_from(value: &BerValRef) -> Result<Self, Self::Error> {
-+        let val_string = value.into_string().ok_or(PluginError::BervalString)?;
-+
-+        Uuid::parse_str(val_string.as_str())
-+            .map(|r| {
-+                log_error!(ErrorLevel::Trace, "valid uuid -> {:?}", r);
-+                r
-+            })
-+            .map_err(|_e| {
-+                log_error!(ErrorLevel::Plugin, "Invalid uuid");
-+                PluginError::InvalidSyntax
-+            })
-+    }
-+}
-diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs
-new file mode 100644
-index 000000000..cf76ccbdb
---- /dev/null
-+++ b/src/slapi_r_plugin/src/constants.rs
-@@ -0,0 +1,203 @@
-+use crate::error::RPluginError;
-+use std::convert::TryFrom;
-+use std::os::raw::c_char;
-+
-+pub const LDAP_SUCCESS: i32 = 0;
-+pub const PLUGIN_DEFAULT_PRECEDENCE: i32 = 50;
-+
-+#[repr(i32)]
-+/// The set of possible function handles we can register via the pblock. These
-+/// values correspond to slapi-plugin.h.
-+pub enum PluginFnType {
-+    /// SLAPI_PLUGIN_DESTROY_FN
-+    Destroy = 11,
-+    /// SLAPI_PLUGIN_CLOSE_FN
-+    Close = 210,
-+    /// SLAPI_PLUGIN_START_FN
-+    Start = 212,
-+    /// SLAPI_PLUGIN_PRE_BIND_FN
-+    PreBind = 401,
-+    /// SLAPI_PLUGIN_PRE_UNBIND_FN
-+    PreUnbind = 402,
-+    /// SLAPI_PLUGIN_PRE_SEARCH_FN
-+    PreSearch = 403,
-+    /// SLAPI_PLUGIN_PRE_COMPARE_FN
-+    PreCompare = 404,
-+    /// SLAPI_PLUGIN_PRE_MODIFY_FN
-+    PreModify = 405,
-+    /// SLAPI_PLUGIN_PRE_MODRDN_FN
-+    PreModRDN = 406,
-+    /// SLAPI_PLUGIN_PRE_ADD_FN
-+    PreAdd = 407,
-+    /// SLAPI_PLUGIN_PRE_DELETE_FN
-+    PreDelete = 408,
-+    /// SLAPI_PLUGIN_PRE_ABANDON_FN
-+    PreAbandon = 409,
-+    /// SLAPI_PLUGIN_PRE_ENTRY_FN
-+    PreEntry = 410,
-+    /// SLAPI_PLUGIN_PRE_REFERRAL_FN
-+    PreReferal = 411,
-+    /// SLAPI_PLUGIN_PRE_RESULT_FN
-+    PreResult = 412,
-+    /// SLAPI_PLUGIN_PRE_EXTOP_FN
-+    PreExtop = 413,
-+    /// SLAPI_PLUGIN_BE_PRE_ADD_FN
-+    BeTxnPreAdd = 460,
-+    /// SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN
-+    BeTxnPreModify = 461,
-+    /// SLAPI_PLUGIN_BE_TXN_PRE_MODRDN_FN
-+    BeTxnPreModRDN = 462,
-+    /// SLAPI_PLUGIN_BE_TXN_PRE_DELETE_FN
-+    BeTxnPreDelete = 463,
-+    /// SLAPI_PLUGIN_BE_TXN_PRE_DELETE_TOMBSTONE_FN
-+    BeTxnPreDeleteTombstone = 464,
-+    /// SLAPI_PLUGIN_POST_SEARCH_FN
-+    PostSearch = 503,
-+    /// SLAPI_PLUGIN_BE_POST_ADD_FN
-+    BeTxnPostAdd = 560,
-+    /// SLAPI_PLUGIN_BE_POST_MODIFY_FN
-+    BeTxnPostModify = 561,
-+    /// SLAPI_PLUGIN_BE_POST_MODRDN_FN
-+    BeTxnPostModRDN = 562,
-+    /// SLAPI_PLUGIN_BE_POST_DELETE_FN
-+    BeTxnPostDelete = 563,
-+
-+    /// SLAPI_PLUGIN_MR_FILTER_CREATE_FN
-+    MRFilterCreate = 600,
-+    /// SLAPI_PLUGIN_MR_INDEXER_CREATE_FN
-+    MRIndexerCreate = 601,
-+    /// SLAPI_PLUGIN_MR_FILTER_AVA
-+    MRFilterAva = 618,
-+    /// SLAPI_PLUGIN_MR_FILTER_SUB
-+    MRFilterSub = 619,
-+    /// SLAPI_PLUGIN_MR_VALUES2KEYS
-+    MRValuesToKeys = 620,
-+    /// SLAPI_PLUGIN_MR_ASSERTION2KEYS_AVA
-+    MRAssertionToKeysAva = 621,
-+    /// SLAPI_PLUGIN_MR_ASSERTION2KEYS_SUB
-+    MRAssertionToKeysSub = 622,
-+    /// SLAPI_PLUGIN_MR_COMPARE
-+    MRCompare = 625,
-+    /// SLAPI_PLUGIN_MR_NORMALIZE
-+    MRNormalize = 626,
-+
-+    /// SLAPI_PLUGIN_SYNTAX_FILTER_AVA
-+    SyntaxFilterAva = 700,
-+    /// SLAPI_PLUGIN_SYNTAX_FILTER_SUB
-+    SyntaxFilterSub = 701,
-+    /// SLAPI_PLUGIN_SYNTAX_VALUES2KEYS
-+    SyntaxValuesToKeys = 702,
-+    /// SLAPI_PLUGIN_SYNTAX_ASSERTION2KEYS_AVA
-+    SyntaxAssertion2KeysAva = 703,
-+    /// SLAPI_PLUGIN_SYNTAX_ASSERTION2KEYS_SUB
-+    SyntaxAssertion2KeysSub = 704,
-+    /// SLAPI_PLUGIN_SYNTAX_FLAGS
-+    SyntaxFlags = 707,
-+    /// SLAPI_PLUGIN_SYNTAX_COMPARE
-+    SyntaxCompare = 708,
-+    /// SLAPI_PLUGIN_SYNTAX_VALIDATE
-+    SyntaxValidate = 710,
-+    /// SLAPI_PLUGIN_SYNTAX_NORMALIZE
-+    SyntaxNormalize = 711,
-+}
-+
-+static SV01: [u8; 3] = [b'0', b'1', b'\0'];
-+static SV02: [u8; 3] = [b'0', b'2', b'\0'];
-+static SV03: [u8; 3] = [b'0', b'3', b'\0'];
-+
-+/// Corresponding plugin versions
-+pub enum PluginVersion {
-+    /// SLAPI_PLUGIN_VERSION_01
-+    V01,
-+    /// SLAPI_PLUGIN_VERSION_02
-+    V02,
-+    /// SLAPI_PLUGIN_VERSION_03
-+    V03,
-+}
-+
-+impl PluginVersion {
-+    pub fn to_char_ptr(&self) -> *const c_char {
-+        match self {
-+            PluginVersion::V01 => &SV01 as *const _ as *const c_char,
-+            PluginVersion::V02 => &SV02 as *const _ as *const c_char,
-+            PluginVersion::V03 => &SV03 as *const _ as *const c_char,
-+        }
-+    }
-+}
-+
-+static SMATCHINGRULE: [u8; 13] = [
-+    b'm', b'a', b't', b'c', b'h', b'i', b'n', b'g', b'r', b'u', b'l', b'e', b'\0',
-+];
-+
-+pub enum PluginType {
-+    MatchingRule,
-+}
-+
-+impl PluginType {
-+    pub fn to_char_ptr(&self) -> *const c_char {
-+        match self {
-+            PluginType::MatchingRule => &SMATCHINGRULE as *const _ as *const c_char,
-+        }
-+    }
-+}
-+
-+#[repr(i32)]
-+/// data types that we can get or retrieve from the pblock. This is only
-+/// used internally.
-+pub(crate) enum PblockType {
-+    /// SLAPI_PLUGIN_PRIVATE
-+    _PrivateData = 4,
-+    /// SLAPI_PLUGIN_VERSION
-+    Version = 8,
-+    /// SLAPI_PLUGIN_DESCRIPTION
-+    _Description = 12,
-+    /// SLAPI_PLUGIN_IDENTITY
-+    Identity = 13,
-+    /// SLAPI_PLUGIN_INTOP_RESULT
-+    OpResult = 15,
-+    /// SLAPI_ADD_ENTRY
-+    AddEntry = 60,
-+    /// SLAPI_BACKEND
-+    Backend = 130,
-+    /// SLAPI_PLUGIN_MR_NAMES
-+    MRNames = 624,
-+    /// SLAPI_PLUGIN_SYNTAX_NAMES
-+    SyntaxNames = 705,
-+    /// SLAPI_PLUGIN_SYNTAX_OID
-+    SyntaxOid = 706,
-+}
-+
-+/// See ./ldap/include/ldaprot.h
-+#[derive(PartialEq)]
-+pub enum FilterType {
-+    And = 0xa0,
-+    Or = 0xa1,
-+    Not = 0xa2,
-+    Equality = 0xa3,
-+    Substring = 0xa4,
-+    Ge = 0xa5,
-+    Le = 0xa6,
-+    Present = 0x87,
-+    Approx = 0xa8,
-+    Extended = 0xa9,
-+}
-+
-+impl TryFrom<i32> for FilterType {
-+    type Error = RPluginError;
-+
-+    fn try_from(value: i32) -> Result<Self, Self::Error> {
-+        match value {
-+            0xa0 => Ok(FilterType::And),
-+            0xa1 => Ok(FilterType::Or),
-+            0xa2 => Ok(FilterType::Not),
-+            0xa3 => Ok(FilterType::Equality),
-+            0xa4 => Ok(FilterType::Substring),
-+            0xa5 => Ok(FilterType::Ge),
-+            0xa6 => Ok(FilterType::Le),
-+            0x87 => Ok(FilterType::Present),
-+            0xa8 => Ok(FilterType::Approx),
-+            0xa9 => Ok(FilterType::Extended),
-+            _ => Err(RPluginError::FilterType),
-+        }
-+    }
-+}
-diff --git a/src/slapi_r_plugin/src/dn.rs b/src/slapi_r_plugin/src/dn.rs
-new file mode 100644
-index 000000000..5f8a65743
---- /dev/null
-+++ b/src/slapi_r_plugin/src/dn.rs
-@@ -0,0 +1,108 @@
-+use std::convert::TryFrom;
-+use std::ffi::{CStr, CString};
-+use std::ops::Deref;
-+use std::os::raw::c_char;
-+
-+extern "C" {
-+    fn slapi_sdn_get_dn(sdn: *const libc::c_void) -> *const c_char;
-+    fn slapi_sdn_new_dn_byval(dn: *const c_char) -> *const libc::c_void;
-+    fn slapi_sdn_issuffix(sdn: *const libc::c_void, suffix_sdn: *const libc::c_void) -> i32;
-+    fn slapi_sdn_free(sdn: *const *const libc::c_void);
-+    fn slapi_sdn_dup(sdn: *const libc::c_void) -> *const libc::c_void;
-+}
-+
-+#[derive(Debug)]
-+pub struct SdnRef {
-+    raw_sdn: *const libc::c_void,
-+}
-+
-+#[derive(Debug)]
-+pub struct NdnRef {
-+    raw_ndn: *const c_char,
-+}
-+
-+#[derive(Debug)]
-+pub struct Sdn {
-+    value: SdnRef,
-+}
-+
-+unsafe impl Send for Sdn {}
-+
-+impl From<&CStr> for Sdn {
-+    fn from(value: &CStr) -> Self {
-+        Sdn {
-+            value: SdnRef {
-+                raw_sdn: unsafe { slapi_sdn_new_dn_byval(value.as_ptr()) },
-+            },
-+        }
-+    }
-+}
-+
-+impl TryFrom<&str> for Sdn {
-+    type Error = ();
-+
-+    fn try_from(value: &str) -> Result<Self, Self::Error> {
-+        let cstr = CString::new(value).map_err(|_| ())?;
-+        Ok(Self::from(cstr.as_c_str()))
-+    }
-+}
-+
-+impl Clone for Sdn {
-+    fn clone(&self) -> Self {
-+        let raw_sdn = unsafe { slapi_sdn_dup(self.value.raw_sdn) };
-+        Sdn {
-+            value: SdnRef { raw_sdn },
-+        }
-+    }
-+}
-+
-+impl Drop for Sdn {
-+    fn drop(&mut self) {
-+        unsafe { slapi_sdn_free(&self.value.raw_sdn as *const *const libc::c_void) }
-+    }
-+}
-+
-+impl Deref for Sdn {
-+    type Target = SdnRef;
-+
-+    fn deref(&self) -> &Self::Target {
-+        &self.value
-+    }
-+}
-+
-+impl SdnRef {
-+    pub fn new(raw_sdn: *const libc::c_void) -> Self {
-+        SdnRef { raw_sdn }
-+    }
-+
-+    /// This is unsafe, as you need to ensure that the SdnRef associated lives at
-+    /// least as long as the NdnRef, else this may cause a use-after-free.
-+    pub unsafe fn as_ndnref(&self) -> NdnRef {
-+        let raw_ndn = slapi_sdn_get_dn(self.raw_sdn);
-+        NdnRef { raw_ndn }
-+    }
-+
-+    pub fn to_dn_string(&self) -> String {
-+        let dn_raw = unsafe { slapi_sdn_get_dn(self.raw_sdn) };
-+        let dn_cstr = unsafe { CStr::from_ptr(dn_raw) };
-+        dn_cstr.to_string_lossy().to_string()
-+    }
-+
-+    pub(crate) fn as_ptr(&self) -> *const libc::c_void {
-+        self.raw_sdn
-+    }
-+
-+    pub fn is_below_suffix(&self, other: &SdnRef) -> bool {
-+        if unsafe { slapi_sdn_issuffix(self.raw_sdn, other.raw_sdn) } == 0 {
-+            false
-+        } else {
-+            true
-+        }
-+    }
-+}
-+
-+impl NdnRef {
-+    pub(crate) fn as_ptr(&self) -> *const c_char {
-+        self.raw_ndn
-+    }
-+}
-diff --git a/src/slapi_r_plugin/src/entry.rs b/src/slapi_r_plugin/src/entry.rs
-new file mode 100644
-index 000000000..034efe692
---- /dev/null
-+++ b/src/slapi_r_plugin/src/entry.rs
-@@ -0,0 +1,92 @@
-+use crate::dn::SdnRef;
-+use crate::value::{slapi_value, ValueArrayRef, ValueRef};
-+use std::ffi::CString;
-+use std::os::raw::c_char;
-+
-+extern "C" {
-+    fn slapi_entry_get_sdn(e: *const libc::c_void) -> *const libc::c_void;
-+    fn slapi_entry_add_value(
-+        e: *const libc::c_void,
-+        a: *const c_char,
-+        v: *const slapi_value,
-+    ) -> i32;
-+    fn slapi_entry_attr_get_valuearray(
-+        e: *const libc::c_void,
-+        a: *const c_char,
-+    ) -> *const *const slapi_value;
-+}
-+
-+pub struct EntryRef {
-+    raw_e: *const libc::c_void,
-+}
-+
-+/*
-+pub struct Entry {
-+    value: EntryRef,
-+}
-+
-+impl Drop for Entry {
-+    fn drop(&mut self) {
-+        ()
-+    }
-+}
-+
-+impl Deref for Entry {
-+    type Target = EntryRef;
-+
-+    fn deref(&self) -> &Self::Target {
-+        &self.value
-+    }
-+}
-+
-+impl Entry {
-+    // Forget about this value, and get a pointer back suitable for providing to directory
-+    // server to take ownership.
-+    pub unsafe fn forget(self) -> *mut libc::c_void {
-+        unimplemented!();
-+    }
-+}
-+*/
-+
-+impl EntryRef {
-+    pub fn new(raw_e: *const libc::c_void) -> Self {
-+        EntryRef { raw_e }
-+    }
-+
-+    // get the sdn
-+    pub fn get_sdnref(&self) -> SdnRef {
-+        let sdn_ptr = unsafe { slapi_entry_get_sdn(self.raw_e) };
-+        SdnRef::new(sdn_ptr)
-+    }
-+
-+    pub fn get_attr(&self, name: &str) -> Option<ValueArrayRef> {
-+        let cname = CString::new(name).expect("invalid attr name");
-+        let va = unsafe { slapi_entry_attr_get_valuearray(self.raw_e, cname.as_ptr()) };
-+
-+        if va.is_null() {
-+            None
-+        } else {
-+            Some(ValueArrayRef::new(va as *const libc::c_void))
-+        }
-+    }
-+
-+    pub fn add_value(&mut self, a: &str, v: &ValueRef) {
-+        // turn the attr to a c string.
-+        // TODO FIX
-+        let attr_name = CString::new(a).expect("Invalid attribute name");
-+        // Get the raw ptr.
-+        let raw_value_ref = unsafe { v.as_ptr() };
-+        // We ignore the return because it always returns 0.
-+        let _ = unsafe {
-+            // By default, this clones.
-+            slapi_entry_add_value(self.raw_e, attr_name.as_ptr(), raw_value_ref)
-+        };
-+    }
-+
-+    /*
-+    pub fn replace_value(&mut self, a: &str, v: &ValueRef) {
-+        // slapi_entry_attr_replace(e, SLAPI_ATTR_ENTRYUSN, new_bvals);
-+        unimplemented!();
-+    }
-+    */
-+}
-diff --git a/src/slapi_r_plugin/src/error.rs b/src/slapi_r_plugin/src/error.rs
-new file mode 100644
-index 000000000..91c81cd26
---- /dev/null
-+++ b/src/slapi_r_plugin/src/error.rs
-@@ -0,0 +1,61 @@
-+// use std::convert::TryFrom;
-+
-+#[derive(Debug)]
-+#[repr(i32)]
-+pub enum RPluginError {
-+    Unknown = 500,
-+    Unimplemented = 501,
-+    FilterType = 502,
-+}
-+
-+#[derive(Debug)]
-+#[repr(i32)]
-+pub enum PluginError {
-+    GenericFailure = -1,
-+    Unknown = 1000,
-+    Unimplemented = 1001,
-+    Pblock = 1002,
-+    BervalString = 1003,
-+    InvalidSyntax = 1004,
-+    InvalidFilter = 1005,
-+    TxnFailure = 1006,
-+}
-+
-+#[derive(Debug)]
-+#[repr(i32)]
-+pub enum LDAPError {
-+    Success = 0,
-+    Operation = 1,
-+    ObjectClassViolation = 65,
-+    Other = 80,
-+    Unknown = 999,
-+}
-+
-+impl From<i32> for LDAPError {
-+    fn from(value: i32) -> Self {
-+        match value {
-+            0 => LDAPError::Success,
-+            1 => LDAPError::Operation,
-+            65 => LDAPError::ObjectClassViolation,
-+            80 => LDAPError::Other,
-+            _ => LDAPError::Unknown,
-+        }
-+    }
-+}
-+
-+// if we make debug impl, we can use this.
-+// errmsg = ldap_err2string(result);
-+
-+#[derive(Debug)]
-+#[repr(i32)]
-+pub enum DseCallbackStatus {
-+    DoNotApply = 0,
-+    Ok = 1,
-+    Error = -1,
-+}
-+
-+#[derive(Debug)]
-+pub enum LoggingError {
-+    Unknown,
-+    CString(String),
-+}
-diff --git a/src/slapi_r_plugin/src/init.c b/src/slapi_r_plugin/src/init.c
-new file mode 100644
-index 000000000..86d1235b8
---- /dev/null
-+++ b/src/slapi_r_plugin/src/init.c
-@@ -0,0 +1,8 @@
-+
-+#include <inttypes.h>
-+
-+int32_t
-+do_nothing_really_well_abcdef() {
-+    return 0;
-+}
-+
-diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs
-new file mode 100644
-index 000000000..d7fc22e52
---- /dev/null
-+++ b/src/slapi_r_plugin/src/lib.rs
-@@ -0,0 +1,36 @@
-+// extern crate lazy_static;
-+
-+#[macro_use]
-+pub mod macros;
-+pub mod backend;
-+pub mod ber;
-+mod constants;
-+pub mod dn;
-+pub mod entry;
-+pub mod error;
-+pub mod log;
-+pub mod pblock;
-+pub mod plugin;
-+pub mod search;
-+pub mod syntax_plugin;
-+pub mod task;
-+pub mod value;
-+
-+pub mod prelude {
-+    pub use crate::backend::{BackendRef, BackendRefTxn};
-+    pub use crate::ber::BerValRef;
-+    pub use crate::constants::{FilterType, PluginFnType, PluginType, PluginVersion, LDAP_SUCCESS};
-+    pub use crate::dn::{Sdn, SdnRef};
-+    pub use crate::entry::EntryRef;
-+    pub use crate::error::{DseCallbackStatus, LDAPError, PluginError, RPluginError};
-+    pub use crate::log::{log_error, ErrorLevel};
-+    pub use crate::pblock::{Pblock, PblockRef};
-+    pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3};
-+    pub use crate::search::{Search, SearchScope};
-+    pub use crate::syntax_plugin::{
-+        matchingrule_register, name_to_leaking_char, names_to_leaking_char_array, SlapiOrdMr,
-+        SlapiSubMr, SlapiSyntaxPlugin1,
-+    };
-+    pub use crate::task::{task_register_handler_fn, task_unregister_handler_fn, Task, TaskRef};
-+    pub use crate::value::{Value, ValueArray, ValueArrayRef, ValueRef};
-+}
-diff --git a/src/slapi_r_plugin/src/log.rs b/src/slapi_r_plugin/src/log.rs
-new file mode 100644
-index 000000000..f686ecd1a
---- /dev/null
-+++ b/src/slapi_r_plugin/src/log.rs
-@@ -0,0 +1,87 @@
-+use std::ffi::CString;
-+use std::os::raw::c_char;
-+
-+use crate::constants;
-+use crate::error::LoggingError;
-+
-+extern "C" {
-+    fn slapi_log_error(level: i32, system: *const c_char, message: *const c_char) -> i32;
-+}
-+
-+pub fn log_error(
-+    level: ErrorLevel,
-+    subsystem: String,
-+    message: String,
-+) -> Result<(), LoggingError> {
-+    let c_subsystem = CString::new(subsystem)
-+        .map_err(|e| LoggingError::CString(format!("failed to convert subsystem -> {:?}", e)))?;
-+    let c_message = CString::new(message)
-+        .map_err(|e| LoggingError::CString(format!("failed to convert message -> {:?}", e)))?;
-+
-+    match unsafe { slapi_log_error(level as i32, c_subsystem.as_ptr(), c_message.as_ptr()) } {
-+        constants::LDAP_SUCCESS => Ok(()),
-+        _ => Err(LoggingError::Unknown),
-+    }
-+}
-+
-+#[repr(i32)]
-+#[derive(Debug)]
-+/// This is a safe rust representation of the values from slapi-plugin.h
-+/// such as SLAPI_LOG_FATAL, SLAPI_LOG_TRACE, SLAPI_LOG_ ... These vaulues
-+/// must matche their counter parts in slapi-plugin.h
-+pub enum ErrorLevel {
-+    /// Always log messages at this level. Soon to go away, see EMERG, ALERT, CRIT, ERR, WARNING, NOTICE, INFO, DEBUG
-+    Fatal = 0,
-+    /// Log detailed messages.
-+    Trace = 1,
-+    /// Log packet tracing.
-+    Packets = 2,
-+    /// Log argument tracing.
-+    Args = 3,
-+    /// Log connection tracking.
-+    Conns = 4,
-+    /// Log BER parsing.
-+    Ber = 5,
-+    /// Log filter processing.
-+    Filter = 6,
-+    /// Log configuration processing.
-+    Config = 7,
-+    /// Log access controls
-+    Acl = 8,
-+    /// Log .... ???
-+    Shell = 9,
-+    /// Log .... ???
-+    Parse = 10,
-+    /// Log .... ???
-+    House = 11,
-+    /// Log detailed replication information.
-+    Repl = 12,
-+    /// Log cache management.
-+    Cache = 13,
-+    /// Log detailed plugin operations.
-+    Plugin = 14,
-+    /// Log .... ???
-+    Timing = 15,
-+    /// Log backend infomation.
-+    BackLDBM = 16,
-+    /// Log ACL processing.
-+    AclSummary = 17,
-+    /// Log nuncstans processing.
-+    NuncStansDONOTUSE = 18,
-+    /// Emergency messages. Server is bursting into flame.
-+    Emerg = 19,
-+    /// Important alerts, server may explode soon.
-+    Alert = 20,
-+    /// Critical messages, but the server isn't going to explode. Admin should intervene.
-+    Crit = 21,
-+    /// Error has occured, but we can keep going. Could indicate misconfiguration.
-+    Error = 22,
-+    /// Warning about an issue that isn't very important. Good to resolve though.
-+    Warning = 23,
-+    /// Inform the admin of something that they should know about, IE server is running now.
-+    Notice = 24,
-+    /// Informational messages that are nice to know.
-+    Info = 25,
-+    /// Debugging information from the server.
-+    Debug = 26,
-+}
-diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs
-new file mode 100644
-index 000000000..030449632
---- /dev/null
-+++ b/src/slapi_r_plugin/src/macros.rs
-@@ -0,0 +1,835 @@
-+#[macro_export]
-+macro_rules! log_error {
-+    ($level:expr, $($arg:tt)*) => ({
-+        use std::fmt;
-+        match log_error(
-+            $level,
-+            format!("{}:{}", file!(), line!()),
-+            format!("{}\n", fmt::format(format_args!($($arg)*)))
-+        ) {
-+            Ok(_) => {},
-+            Err(e) => {
-+                eprintln!("A logging error occured {}, {} -> {:?}", file!(), line!(), e);
-+            }
-+        };
-+    })
-+}
-+
-+#[macro_export]
-+macro_rules! slapi_r_plugin_hooks {
-+    ($mod_ident:ident, $hooks_ident:ident) => (
-+        paste::item! {
-+            use libc;
-+
-+            static mut PLUGINID: *const libc::c_void = std::ptr::null();
-+
-+            pub(crate) fn plugin_id() -> PluginIdRef {
-+                PluginIdRef {
-+                    raw_pid: unsafe { PLUGINID }
-+                }
-+            }
-+
-+            #[no_mangle]
-+            pub extern "C" fn [<$mod_ident _plugin_init>](raw_pb: *const libc::c_void) -> i32 {
-+                let mut pb = PblockRef::new(raw_pb);
-+                log_error!(ErrorLevel::Trace, "it's alive!\n");
-+
-+                match pb.set_plugin_version(PluginVersion::V03) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+
-+                // Setup the plugin id.
-+                unsafe {
-+                    PLUGINID = pb.get_plugin_identity();
-+                }
-+
-+                if $hooks_ident::has_betxn_pre_modify() {
-+                    match pb.register_betxn_pre_modify_fn([<$mod_ident _plugin_betxn_pre_modify>]) {
-+                        0 => {},
-+                        e => return e,
-+                    };
-+                }
-+
-+                if $hooks_ident::has_betxn_pre_add() {
-+                    match pb.register_betxn_pre_add_fn([<$mod_ident _plugin_betxn_pre_add>]) {
-+                        0 => {},
-+                        e => return e,
-+                    };
-+                }
-+
-+                // set the start fn
-+                match pb.register_start_fn([<$mod_ident _plugin_start>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+
-+                // set the close fn
-+                match pb.register_close_fn([<$mod_ident _plugin_close>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_start>](raw_pb: *const libc::c_void) -> i32 {
-+                let mut pb = PblockRef::new(raw_pb);
-+
-+                if let Some(task_ident) = $hooks_ident::has_task_handler() {
-+                    match task_register_handler_fn(task_ident, [<$mod_ident _plugin_task_handler>], &mut pb) {
-+                        0 => {},
-+                        e => return e,
-+                    };
-+                };
-+
-+                match $hooks_ident::start(&mut pb) {
-+                    Ok(()) => {
-+                        0
-+                    }
-+                    Err(e) => {
-+                        log_error!(ErrorLevel::Error, "-> {:?}", e);
-+                        1
-+                    }
-+                }
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_close>](raw_pb: *const libc::c_void) -> i32 {
-+                let mut pb = PblockRef::new(raw_pb);
-+
-+                if let Some(task_ident) = $hooks_ident::has_task_handler() {
-+                    match task_unregister_handler_fn(task_ident, [<$mod_ident _plugin_task_handler>]) {
-+                        0 => {},
-+                        e => return e,
-+                    };
-+                };
-+
-+                match $hooks_ident::close(&mut pb) {
-+                    Ok(()) => {
-+                        0
-+                    }
-+                    Err(e) => {
-+                        log_error!(ErrorLevel::Error, "-> {:?}", e);
-+                        1
-+                    }
-+                }
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_betxn_pre_modify>](raw_pb: *const libc::c_void) -> i32 {
-+                let mut pb = PblockRef::new(raw_pb);
-+                match $hooks_ident::betxn_pre_modify(&mut pb) {
-+                    Ok(()) => {
-+                        0
-+                    }
-+                    Err(e) => {
-+                        log_error!(ErrorLevel::Error, "-> {:?}", e);
-+                        1
-+                    }
-+                }
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_betxn_pre_add>](raw_pb: *const libc::c_void) -> i32 {
-+                let mut pb = PblockRef::new(raw_pb);
-+                match $hooks_ident::betxn_pre_add(&mut pb) {
-+                    Ok(()) => {
-+                        0
-+                    }
-+                    Err(e) => {
-+                        log_error!(ErrorLevel::Error, "-> {:?}", e);
-+                        1
-+                    }
-+                }
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_task_handler>](
-+                raw_pb: *const libc::c_void,
-+                raw_e_before: *const libc::c_void,
-+                _raw_e_after: *const libc::c_void,
-+                raw_returncode: *mut i32,
-+                _raw_returntext: *mut c_char,
-+                raw_arg: *const libc::c_void,
-+            ) -> i32 {
-+                let mut pb = PblockRef::new(raw_pb);
-+
-+                let e_before = EntryRef::new(raw_e_before);
-+                // let e_after = EntryRef::new(raw_e_after);
-+
-+                let task_data = match $hooks_ident::task_validate(
-+                    &e_before
-+                ) {
-+                    Ok(data) => data,
-+                    Err(retcode) => {
-+                        unsafe { *raw_returncode = retcode as i32 };
-+                        return DseCallbackStatus::Error as i32
-+                    }
-+                };
-+
-+                let mut task = Task::new(&e_before, raw_arg);
-+                task.register_destructor_fn([<$mod_ident _plugin_task_destructor>]);
-+
-+                // Setup the task thread and then run it. Remember, because Rust is
-+                // smarter about memory, the move statement here moves the task wrapper and
-+                // task_data to the thread, so they drop on thread close. No need for a
-+                // destructor beyond blocking on the thread to complete.
-+                std::thread::spawn(move || {
-+                    log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_task_thread => begin"));
-+                    // Indicate the task is begun
-+                    task.begin();
-+                    // Start a txn
-+                    let be: Option<BackendRef> = match $hooks_ident::task_be_dn_hint(&task_data)
-+                        .map(|be_dn| {
-+                            BackendRef::new(&be_dn)
-+                        })
-+                        .transpose() {
-+                            Ok(v) => v,
-+                            Err(_) => {
-+                                log_error!(ErrorLevel::Error, concat!(stringify!($mod_ident), "_plugin_task_thread => task error -> selected dn does not exist"));
-+                                task.error(PluginError::TxnFailure as i32);
-+                                return;
-+                            }
-+                        };
-+                    let be_txn: Option<BackendRefTxn> = match be {
-+                        Some(b) => {
-+                            match b.begin_txn() {
-+                                Ok(txn) => Some(txn),
-+                                Err(_) => {
-+                                    log_error!(ErrorLevel::Error, concat!(stringify!($mod_ident), "_plugin_task_thread => task error -> unable to begin txn"));
-+                                    task.error(PluginError::TxnFailure as i32);
-+                                    return;
-+                                }
-+                            }
-+                        }
-+                        None => None,
-+                    };
-+
-+                    // Abort or commit the txn here.
-+                    match $hooks_ident::task_handler(&mut task, task_data) {
-+                        Ok(_data) => {
-+                            match be_txn {
-+                                Some(be_txn) => be_txn.commit(),
-+                                None => {}
-+                            };
-+                            // These will set the status, and guarantee the drop
-+                            task.success();
-+                        }
-+                        Err(e) => {
-+                            log_error!(ErrorLevel::Error, "{}_plugin_task_thread => task error -> {:?}", stringify!($mod_ident), e);
-+                            // These will set the status, and guarantee the drop
-+                            task.error(e as i32);
-+                            // On drop, be_txn implicitly aborts.
-+                        }
-+                    };
-+
-+                    log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_task_thread <= complete"));
-+                });
-+
-+                // Indicate that the thread started just fine.
-+                unsafe { *raw_returncode = LDAP_SUCCESS };
-+                DseCallbackStatus::Ok as i32
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_task_destructor>](
-+                raw_task: *const libc::c_void,
-+            ) {
-+                // Simply block until the task refcount drops to 0.
-+                let task = TaskRef::new(raw_task);
-+                task.block();
-+            }
-+
-+        } // end paste
-+    )
-+} // end macro
-+
-+#[macro_export]
-+macro_rules! slapi_r_syntax_plugin_hooks {
-+    (
-+        $mod_ident:ident,
-+        $hooks_ident:ident
-+    ) => (
-+        paste::item! {
-+            use libc;
-+            use std::convert::TryFrom;
-+
-+            #[no_mangle]
-+            pub extern "C" fn [<$mod_ident _plugin_init>](raw_pb: *const libc::c_void) -> i32 {
-+                let mut pb = PblockRef::new(raw_pb);
-+                log_error!(ErrorLevel::Trace, "slapi_r_syntax_plugin_hooks => begin");
-+                // Setup our plugin
-+                match pb.set_plugin_version(PluginVersion::V01) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+
-+                // Setup the names/oids that this plugin provides syntaxes for.
-+
-+                let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::attr_supported_names()) };
-+                match pb.register_syntax_names(name_ptr) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+
-+                let name_ptr = unsafe { name_to_leaking_char($hooks_ident::attr_oid()) };
-+                match pb.register_syntax_oid(name_ptr) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+
-+                match pb.register_syntax_validate_fn([<$mod_ident _plugin_syntax_validate>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+
-+                // Now setup the MR's
-+                match register_plugin_ext(
-+                    PluginType::MatchingRule,
-+                    $hooks_ident::eq_mr_name(),
-+                    concat!(stringify!($mod_ident), "_plugin_eq_mr_init"),
-+                    [<$mod_ident _plugin_eq_mr_init>]
-+                ) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+
-+                if $hooks_ident::sub_mr_oid().is_some() {
-+                    match register_plugin_ext(
-+                        PluginType::MatchingRule,
-+                        $hooks_ident::sub_mr_name(),
-+                        concat!(stringify!($mod_ident), "_plugin_ord_mr_init"),
-+                        [<$mod_ident _plugin_ord_mr_init>]
-+                    ) {
-+                        0 => {},
-+                        e => return e,
-+                    };
-+                }
-+
-+                if $hooks_ident::ord_mr_oid().is_some() {
-+                    match register_plugin_ext(
-+                        PluginType::MatchingRule,
-+                        $hooks_ident::ord_mr_name(),
-+                        concat!(stringify!($mod_ident), "_plugin_ord_mr_init"),
-+                        [<$mod_ident _plugin_ord_mr_init>]
-+                    ) {
-+                        0 => {},
-+                        e => return e,
-+                    };
-+                }
-+
-+                log_error!(ErrorLevel::Trace, "slapi_r_syntax_plugin_hooks <= success");
-+
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_syntax_validate>](
-+                raw_berval: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_syntax_validate => begin"));
-+
-+                let bval = BerValRef::new(raw_berval);
-+
-+                match $hooks_ident::syntax_validate(&bval) {
-+                    Ok(()) => {
-+                        log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_syntax_validate <= success"));
-+                        LDAP_SUCCESS
-+                    }
-+                    Err(e) => {
-+                        log_error!(ErrorLevel::Warning,
-+                            "{}_plugin_syntax_validate error -> {:?}", stringify!($mod_ident), e
-+                        );
-+                        e as i32
-+                    }
-+                }
-+            }
-+
-+            // All the MR types share this.
-+            pub extern "C" fn [<$mod_ident _plugin_mr_filter_ava>](
-+                raw_pb: *const libc::c_void,
-+                raw_bvfilter: *const libc::c_void,
-+                raw_bvals: *const libc::c_void,
-+                i_ftype: i32,
-+                _retval: *mut libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_mr_filter_ava => begin"));
-+                let mut pb = PblockRef::new(raw_pb);
-+                let bvfilter = BerValRef::new(raw_bvfilter);
-+                let bvals = ValueArrayRef::new(raw_bvals);
-+                let ftype = match FilterType::try_from(i_ftype) {
-+                    Ok(f) => f,
-+                    Err(e) => {
-+                        log_error!(ErrorLevel::Error, "{}_plugin_ord_mr_filter_ava Error -> {:?}",
-+                        stringify!($mod_ident), e);
-+                        return e as i32
-+                    }
-+                };
-+
-+                let r: Result<bool, PluginError> = match ftype {
-+                    FilterType::And | FilterType::Or | FilterType::Not => {
-+                        Err(PluginError::InvalidFilter)
-+                    }
-+                    FilterType::Equality => {
-+                        $hooks_ident::filter_ava_eq(&mut pb, &bvfilter, &bvals)
-+                    }
-+                    FilterType::Substring => {
-+                        Err(PluginError::Unimplemented)
-+                    }
-+                    FilterType::Ge => {
-+                        $hooks_ident::filter_ava_ord(&mut pb, &bvfilter, &bvals)
-+                            .map(|o_ord| {
-+                                match o_ord {
-+                                    Some(Ordering::Greater) | Some(Ordering::Equal) => true,
-+                                    Some(Ordering::Less) | None => false,
-+                                }
-+                            })
-+                    }
-+                    FilterType::Le => {
-+                        $hooks_ident::filter_ava_ord(&mut pb, &bvfilter, &bvals)
-+                            .map(|o_ord| {
-+                                match o_ord {
-+                                    Some(Ordering::Less) | Some(Ordering::Equal) => true,
-+                                    Some(Ordering::Greater) | None => false,
-+                                }
-+                            })
-+                    }
-+                    FilterType::Present => {
-+                        Err(PluginError::Unimplemented)
-+                    }
-+                    FilterType::Approx => {
-+                        Err(PluginError::Unimplemented)
-+                    }
-+                    FilterType::Extended => {
-+                        Err(PluginError::Unimplemented)
-+                    }
-+                };
-+
-+                match r {
-+                    Ok(b) => {
-+                        log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_mr_filter_ava <= success"));
-+                        // rust bool into i32 will become 0 false, 1 true. However, ds expects 0 true and 1 false for
-+                        // for the filter_ava match. So we flip the bool, and send it back.
-+                        (!b) as i32
-+                    }
-+                    Err(e) => {
-+                        log_error!(ErrorLevel::Warning,
-+                            "{}_plugin_mr_filter_ava error -> {:?}",
-+                            stringify!($mod_ident), e
-+                        );
-+                        e as i32
-+                    }
-+                }
-+            }
-+
-+
-+            // EQ MR plugin hooks
-+            #[no_mangle]
-+            pub extern "C" fn [<$mod_ident _plugin_eq_mr_init>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                let mut pb = PblockRef::new(raw_pb);
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_init => begin"));
-+                match pb.set_plugin_version(PluginVersion::V01) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+
-+                let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::eq_mr_supported_names()) };
-+                // SLAPI_PLUGIN_MR_NAMES
-+                match pb.register_mr_names(name_ptr) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+
-+                // description
-+                // SLAPI_PLUGIN_MR_FILTER_CREATE_FN
-+                match pb.register_mr_filter_create_fn([<$mod_ident _plugin_eq_mr_filter_create>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+                // SLAPI_PLUGIN_MR_INDEXER_CREATE_FN
-+                match pb.register_mr_indexer_create_fn([<$mod_ident _plugin_eq_mr_indexer_create>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+                // SLAPI_PLUGIN_MR_FILTER_AVA
-+                match pb.register_mr_filter_ava_fn([<$mod_ident _plugin_mr_filter_ava>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+                // SLAPI_PLUGIN_MR_FILTER_SUB
-+                match pb.register_mr_filter_sub_fn([<$mod_ident _plugin_eq_mr_filter_sub>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+                // SLAPI_PLUGIN_MR_VALUES2KEYS
-+                match pb.register_mr_values2keys_fn([<$mod_ident _plugin_eq_mr_filter_values2keys>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+                // SLAPI_PLUGIN_MR_ASSERTION2KEYS_AVA
-+                match pb.register_mr_assertion2keys_ava_fn([<$mod_ident _plugin_eq_mr_filter_assertion2keys_ava>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+                // SLAPI_PLUGIN_MR_ASSERTION2KEYS_SUB
-+                match pb.register_mr_assertion2keys_sub_fn([<$mod_ident _plugin_eq_mr_filter_assertion2keys_sub>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+                // SLAPI_PLUGIN_MR_COMPARE
-+                match pb.register_mr_compare_fn([<$mod_ident _plugin_eq_mr_filter_compare>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+                // SLAPI_PLUGIN_MR_NORMALIZE
-+
-+                // Finaly, register the MR
-+                match unsafe { matchingrule_register($hooks_ident::eq_mr_oid(), $hooks_ident::eq_mr_name(), $hooks_ident::eq_mr_desc(), $hooks_ident::attr_oid(), &$hooks_ident::attr_compat_oids()) } {
-+                    0 => {},
-+                    e => return e,
-+                };
-+
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_init <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_create>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_create => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_create <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_eq_mr_indexer_create>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_indexer_create => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_indexer_create <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_sub>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_sub => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_sub <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_values2keys>](
-+                raw_pb: *const libc::c_void,
-+                raw_vals: *const libc::c_void,
-+                raw_ivals: *mut libc::c_void,
-+                i_ftype: i32,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_values2keys => begin"));
-+                let mut pb = PblockRef::new(raw_pb);
-+                let vals = ValueArrayRef::new(raw_vals);
-+                let ftype = match FilterType::try_from(i_ftype) {
-+                    Ok(f) => f,
-+                    Err(e) => {
-+                        log_error!(ErrorLevel::Error,
-+                        "{}_plugin_eq_mr_filter_values2keys Error -> {:?}",
-+                        stringify!($mod_ident),
-+                        e);
-+                        return e as i32
-+                    }
-+                };
-+
-+                if (ftype != FilterType::Equality && ftype != FilterType::Approx) {
-+                    log_error!(ErrorLevel::Error,
-+                        "{}_plugin_eq_mr_filter_values2keys Error -> Invalid Filter type",
-+                        stringify!($mod_ident),
-+                        );
-+                    return PluginError::InvalidFilter  as i32
-+                }
-+
-+                let va = match $hooks_ident::eq_mr_filter_values2keys(&mut pb, &vals) {
-+                    Ok(va) => va,
-+                    Err(e) => {
-+                        log_error!(ErrorLevel::Error,
-+                        "{}_plugin_eq_mr_filter_values2keys Error -> {:?}",
-+                        stringify!($mod_ident),
-+                        e);
-+                        return e as i32
-+                    }
-+                };
-+
-+                // Now, deconstruct the va, get the pointer, and put it into the ivals.
-+                unsafe {
-+                    let ivals_ptr: *mut *const libc::c_void = raw_ivals as *mut _;
-+                    (*ivals_ptr) = va.take_ownership() as *const libc::c_void;
-+                }
-+
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_values2keys <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_assertion2keys_ava>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_assertion2keys_ava => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_assertion2keys_ava <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_assertion2keys_sub>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_assertion2keys_sub => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_assertion2keys_sub <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_names>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                // This is probably another char pointer.
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_eq_mr_filter_compare>](
-+                raw_va: *const libc::c_void,
-+                raw_vb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_compare => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_eq_mr_filter_compare <= success"));
-+                0
-+            }
-+
-+            // SUB MR plugin hooks
-+
-+            pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_create>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_create => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_create <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_sub_mr_indexer_create>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_indexer_create => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_indexer_create <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_sub>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_sub => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_sub <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_values2keys>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_values2keys => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_values2keys <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_assertion2keys_ava>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_assertion2keys_ava => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_assertion2keys_ava <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_assertion2keys_sub>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_assertion2keys_sub => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_assertion2keys_sub <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_names>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                // Probably a char array
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_sub_mr_filter_compare>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_compare => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_sub_mr_filter_compare <= success"));
-+                0
-+            }
-+
-+            // ORD MR plugin hooks
-+            #[no_mangle]
-+            pub extern "C" fn [<$mod_ident _plugin_ord_mr_init>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                let mut pb = PblockRef::new(raw_pb);
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_init => begin"));
-+                match pb.set_plugin_version(PluginVersion::V01) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+
-+                let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::ord_mr_supported_names()) };
-+                // SLAPI_PLUGIN_MR_NAMES
-+                match pb.register_mr_names(name_ptr) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+
-+                // description
-+                // SLAPI_PLUGIN_MR_FILTER_CREATE_FN
-+                match pb.register_mr_filter_create_fn([<$mod_ident _plugin_ord_mr_filter_create>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+                // SLAPI_PLUGIN_MR_INDEXER_CREATE_FN
-+                match pb.register_mr_indexer_create_fn([<$mod_ident _plugin_ord_mr_indexer_create>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+                // SLAPI_PLUGIN_MR_FILTER_AVA
-+                match pb.register_mr_filter_ava_fn([<$mod_ident _plugin_mr_filter_ava>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+                // SLAPI_PLUGIN_MR_FILTER_SUB
-+                match pb.register_mr_filter_sub_fn([<$mod_ident _plugin_ord_mr_filter_sub>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+                // SLAPI_PLUGIN_MR_VALUES2KEYS
-+                /*
-+                match pb.register_mr_values2keys_fn([<$mod_ident _plugin_ord_mr_filter_values2keys>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+                */
-+                // SLAPI_PLUGIN_MR_ASSERTION2KEYS_AVA
-+                match pb.register_mr_assertion2keys_ava_fn([<$mod_ident _plugin_ord_mr_filter_assertion2keys_ava>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+                // SLAPI_PLUGIN_MR_ASSERTION2KEYS_SUB
-+                match pb.register_mr_assertion2keys_sub_fn([<$mod_ident _plugin_ord_mr_filter_assertion2keys_sub>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+                // SLAPI_PLUGIN_MR_COMPARE
-+                match pb.register_mr_compare_fn([<$mod_ident _plugin_ord_mr_filter_compare>]) {
-+                    0 => {},
-+                    e => return e,
-+                };
-+                // SLAPI_PLUGIN_MR_NORMALIZE
-+
-+                // Finaly, register the MR
-+                match unsafe { matchingrule_register($hooks_ident::ord_mr_oid().unwrap(), $hooks_ident::ord_mr_name(), $hooks_ident::ord_mr_desc(), $hooks_ident::attr_oid(), &$hooks_ident::attr_compat_oids()) } {
-+                    0 => {},
-+                    e => return e,
-+                };
-+
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_init <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_create>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_create => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_create <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_ord_mr_indexer_create>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_indexer_create => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_indexer_create <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_sub>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_sub => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_sub <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_values2keys>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_values2keys => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_values2keys <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_assertion2keys_ava>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_assertion2keys_ava => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_assertion2keys_ava <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_assertion2keys_sub>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_assertion2keys_sub => begin"));
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_assertion2keys_sub <= success"));
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_names>](
-+                raw_pb: *const libc::c_void,
-+            ) -> i32 {
-+                // probably char pointers
-+                0
-+            }
-+
-+            pub extern "C" fn [<$mod_ident _plugin_ord_mr_filter_compare>](
-+                raw_va: *const libc::c_void,
-+                raw_vb: *const libc::c_void,
-+            ) -> i32 {
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_compare => begin"));
-+                let va = BerValRef::new(raw_va);
-+                let vb = BerValRef::new(raw_vb);
-+                let rc = match $hooks_ident::filter_compare(&va, &vb) {
-+                    Ordering::Less => -1,
-+                    Ordering::Equal => 0,
-+                    Ordering::Greater => 1,
-+                };
-+                log_error!(ErrorLevel::Trace, concat!(stringify!($mod_ident), "_plugin_ord_mr_filter_compare <= success"));
-+                rc
-+            }
-+
-+        } // end paste
-+    )
-+} // end macro
-+
-+#[macro_export]
-+macro_rules! slapi_r_search_callback_mapfn {
-+    (
-+        $mod_ident:ident,
-+        $cb_target_ident:ident,
-+        $cb_mod_ident:ident
-+    ) => {
-+        paste::item! {
-+            #[no_mangle]
-+            pub extern "C" fn [<$cb_target_ident>](
-+                raw_e: *const libc::c_void,
-+                raw_data: *const libc::c_void,
-+            ) -> i32 {
-+                let e = EntryRef::new(raw_e);
-+                let data_ptr = raw_data as *const _;
-+                let data = unsafe { &(*data_ptr) };
-+                match $cb_mod_ident(e, data) {
-+                    Ok(_) => LDAPError::Success as i32,
-+                    Err(e) => e as i32,
-+                }
-+            }
-+        } // end paste
-+    };
-+} // end macro
-diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs
-new file mode 100644
-index 000000000..b69ce1680
---- /dev/null
-+++ b/src/slapi_r_plugin/src/pblock.rs
-@@ -0,0 +1,275 @@
-+use libc;
-+use std::ops::{Deref, DerefMut};
-+use std::os::raw::c_char;
-+use std::ptr;
-+
-+use crate::backend::BackendRef;
-+use crate::constants::{PblockType, PluginFnType, PluginVersion};
-+use crate::entry::EntryRef;
-+pub use crate::log::{log_error, ErrorLevel};
-+
-+extern "C" {
-+    fn slapi_pblock_set(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32;
-+    fn slapi_pblock_get(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32;
-+    fn slapi_pblock_new() -> *const libc::c_void;
-+}
-+
-+pub struct Pblock {
-+    value: PblockRef,
-+}
-+
-+impl Pblock {
-+    pub fn new() -> Pblock {
-+        let raw_pb = unsafe { slapi_pblock_new() };
-+        Pblock {
-+            value: PblockRef { raw_pb },
-+        }
-+    }
-+}
-+
-+impl Deref for Pblock {
-+    type Target = PblockRef;
-+
-+    fn deref(&self) -> &Self::Target {
-+        &self.value
-+    }
-+}
-+
-+impl DerefMut for Pblock {
-+    fn deref_mut(&mut self) -> &mut Self::Target {
-+        &mut self.value
-+    }
-+}
-+
-+pub struct PblockRef {
-+    raw_pb: *const libc::c_void,
-+}
-+
-+impl PblockRef {
-+    pub fn new(raw_pb: *const libc::c_void) -> Self {
-+        PblockRef { raw_pb }
-+    }
-+
-+    pub unsafe fn as_ptr(&self) -> *const libc::c_void {
-+        self.raw_pb
-+    }
-+
-+    fn set_pb_char_arr_ptr(&mut self, req_type: PblockType, ptr: *const *const c_char) -> i32 {
-+        let value_ptr: *const libc::c_void = ptr as *const libc::c_void;
-+        unsafe { slapi_pblock_set(self.raw_pb, req_type as i32, value_ptr) }
-+    }
-+
-+    fn set_pb_char_ptr(&mut self, req_type: PblockType, ptr: *const c_char) -> i32 {
-+        let value_ptr: *const libc::c_void = ptr as *const libc::c_void;
-+        unsafe { slapi_pblock_set(self.raw_pb, req_type as i32, value_ptr) }
-+    }
-+
-+    fn set_pb_fn_ptr(
-+        &mut self,
-+        fn_type: PluginFnType,
-+        ptr: extern "C" fn(*const libc::c_void) -> i32,
-+    ) -> i32 {
-+        let value_ptr: *const libc::c_void = ptr as *const libc::c_void;
-+        unsafe { slapi_pblock_set(self.raw_pb, fn_type as i32, value_ptr) }
-+    }
-+
-+    fn get_value_ptr(&mut self, req_type: PblockType) -> Result<*const libc::c_void, ()> {
-+        let mut value: *mut libc::c_void = ptr::null::<libc::c_void>() as *mut libc::c_void;
-+        let value_ptr: *const libc::c_void = &mut value as *const _ as *const libc::c_void;
-+        match unsafe { slapi_pblock_get(self.raw_pb, req_type as i32, value_ptr) } {
-+            0 => Ok(value),
-+            e => {
-+                log_error!(ErrorLevel::Error, "enable to get from pblock -> {:?}", e);
-+                Err(())
-+            }
-+        }
-+    }
-+
-+    fn get_value_i32(&mut self, req_type: PblockType) -> Result<i32, ()> {
-+        let mut value: i32 = 0;
-+        let value_ptr: *const libc::c_void = &mut value as *const _ as *const libc::c_void;
-+        match unsafe { slapi_pblock_get(self.raw_pb, req_type as i32, value_ptr) } {
-+            0 => Ok(value),
-+            e => {
-+                log_error!(ErrorLevel::Error, "enable to get from pblock -> {:?}", e);
-+                Err(())
-+            }
-+        }
-+    }
-+
-+    pub fn register_start_fn(&mut self, ptr: extern "C" fn(*const libc::c_void) -> i32) -> i32 {
-+        self.set_pb_fn_ptr(PluginFnType::Start, ptr)
-+    }
-+
-+    pub fn register_close_fn(&mut self, ptr: extern "C" fn(*const libc::c_void) -> i32) -> i32 {
-+        self.set_pb_fn_ptr(PluginFnType::Close, ptr)
-+    }
-+
-+    pub fn register_betxn_pre_add_fn(
-+        &mut self,
-+        ptr: extern "C" fn(*const libc::c_void) -> i32,
-+    ) -> i32 {
-+        self.set_pb_fn_ptr(PluginFnType::BeTxnPreAdd, ptr)
-+    }
-+
-+    pub fn register_betxn_pre_modify_fn(
-+        &mut self,
-+        ptr: extern "C" fn(*const libc::c_void) -> i32,
-+    ) -> i32 {
-+        self.set_pb_fn_ptr(PluginFnType::BeTxnPreModify, ptr)
-+    }
-+
-+    pub fn register_syntax_filter_ava_fn(
-+        &mut self,
-+        ptr: extern "C" fn(
-+            *const core::ffi::c_void,
-+            *const core::ffi::c_void,
-+            *const core::ffi::c_void,
-+            i32,
-+            *mut core::ffi::c_void,
-+        ) -> i32,
-+    ) -> i32 {
-+        // We can't use self.set_pb_fn_ptr here as the fn type sig is different.
-+        let value_ptr: *const libc::c_void = ptr as *const libc::c_void;
-+        unsafe { slapi_pblock_set(self.raw_pb, PluginFnType::SyntaxFilterAva as i32, value_ptr) }
-+    }
-+
-+    pub fn register_syntax_values2keys_fn(
-+        &mut self,
-+        ptr: extern "C" fn(*const libc::c_void) -> i32,
-+    ) -> i32 {
-+        self.set_pb_fn_ptr(PluginFnType::SyntaxValuesToKeys, ptr)
-+    }
-+
-+    pub fn register_syntax_assertion2keys_fn(
-+        &mut self,
-+        ptr: extern "C" fn(*const libc::c_void) -> i32,
-+    ) -> i32 {
-+        self.set_pb_fn_ptr(PluginFnType::SyntaxAssertion2KeysAva, ptr)
-+    }
-+
-+    pub fn register_syntax_flags_fn(
-+        &mut self,
-+        ptr: extern "C" fn(*const libc::c_void) -> i32,
-+    ) -> i32 {
-+        self.set_pb_fn_ptr(PluginFnType::SyntaxFlags, ptr)
-+    }
-+
-+    pub fn register_syntax_oid(&mut self, ptr: *const c_char) -> i32 {
-+        self.set_pb_char_ptr(PblockType::SyntaxOid, ptr)
-+    }
-+
-+    pub fn register_syntax_compare_fn(
-+        &mut self,
-+        ptr: extern "C" fn(*const libc::c_void) -> i32,
-+    ) -> i32 {
-+        self.set_pb_fn_ptr(PluginFnType::SyntaxCompare, ptr)
-+    }
-+
-+    pub fn register_syntax_validate_fn(
-+        &mut self,
-+        ptr: extern "C" fn(*const libc::c_void) -> i32,
-+    ) -> i32 {
-+        self.set_pb_fn_ptr(PluginFnType::SyntaxValidate, ptr)
-+    }
-+
-+    pub fn register_syntax_names(&mut self, arr_ptr: *const *const c_char) -> i32 {
-+        self.set_pb_char_arr_ptr(PblockType::SyntaxNames, arr_ptr)
-+    }
-+
-+    pub fn register_mr_filter_create_fn(
-+        &mut self,
-+        ptr: extern "C" fn(*const libc::c_void) -> i32,
-+    ) -> i32 {
-+        self.set_pb_fn_ptr(PluginFnType::MRFilterCreate, ptr)
-+    }
-+
-+    pub fn register_mr_indexer_create_fn(
-+        &mut self,
-+        ptr: extern "C" fn(*const libc::c_void) -> i32,
-+    ) -> i32 {
-+        self.set_pb_fn_ptr(PluginFnType::MRIndexerCreate, ptr)
-+    }
-+
-+    pub fn register_mr_filter_ava_fn(
-+        &mut self,
-+        ptr: extern "C" fn(
-+            *const core::ffi::c_void,
-+            *const core::ffi::c_void,
-+            *const core::ffi::c_void,
-+            i32,
-+            *mut core::ffi::c_void,
-+        ) -> i32,
-+    ) -> i32 {
-+        let value_ptr: *const libc::c_void = ptr as *const libc::c_void;
-+        unsafe { slapi_pblock_set(self.raw_pb, PluginFnType::MRFilterAva as i32, value_ptr) }
-+    }
-+
-+    pub fn register_mr_filter_sub_fn(
-+        &mut self,
-+        ptr: extern "C" fn(*const libc::c_void) -> i32,
-+    ) -> i32 {
-+        self.set_pb_fn_ptr(PluginFnType::MRFilterSub, ptr)
-+    }
-+
-+    pub fn register_mr_values2keys_fn(
-+        &mut self,
-+        ptr: extern "C" fn(
-+            *const core::ffi::c_void,
-+            *const core::ffi::c_void,
-+            *mut core::ffi::c_void,
-+            i32,
-+        ) -> i32,
-+    ) -> i32 {
-+        let value_ptr: *const libc::c_void = ptr as *const libc::c_void;
-+        unsafe { slapi_pblock_set(self.raw_pb, PluginFnType::MRValuesToKeys as i32, value_ptr) }
-+    }
-+
-+    pub fn register_mr_assertion2keys_ava_fn(
-+        &mut self,
-+        ptr: extern "C" fn(*const libc::c_void) -> i32,
-+    ) -> i32 {
-+        self.set_pb_fn_ptr(PluginFnType::MRAssertionToKeysAva, ptr)
-+    }
-+
-+    pub fn register_mr_assertion2keys_sub_fn(
-+        &mut self,
-+        ptr: extern "C" fn(*const libc::c_void) -> i32,
-+    ) -> i32 {
-+        self.set_pb_fn_ptr(PluginFnType::MRAssertionToKeysSub, ptr)
-+    }
-+
-+    pub fn register_mr_compare_fn(
-+        &mut self,
-+        ptr: extern "C" fn(*const libc::c_void, *const libc::c_void) -> i32,
-+    ) -> i32 {
-+        let value_ptr: *const libc::c_void = ptr as *const libc::c_void;
-+        unsafe { slapi_pblock_set(self.raw_pb, PluginFnType::MRCompare as i32, value_ptr) }
-+    }
-+
-+    pub fn register_mr_names(&mut self, arr_ptr: *const *const c_char) -> i32 {
-+        self.set_pb_char_arr_ptr(PblockType::MRNames, arr_ptr)
-+    }
-+
-+    pub fn get_op_add_entryref(&mut self) -> Result<EntryRef, ()> {
-+        self.get_value_ptr(PblockType::AddEntry)
-+            .map(|ptr| EntryRef::new(ptr))
-+    }
-+
-+    pub fn set_plugin_version(&mut self, vers: PluginVersion) -> i32 {
-+        self.set_pb_char_ptr(PblockType::Version, vers.to_char_ptr())
-+    }
-+
-+    pub fn set_op_backend(&mut self, be: &BackendRef) -> i32 {
-+        unsafe { slapi_pblock_set(self.raw_pb, PblockType::Backend as i32, be.as_ptr()) }
-+    }
-+
-+    pub fn get_plugin_identity(&mut self) -> *const libc::c_void {
-+        self.get_value_ptr(PblockType::Identity)
-+            .unwrap_or(std::ptr::null())
-+    }
-+
-+    pub fn get_op_result(&mut self) -> i32 {
-+        self.get_value_i32(PblockType::OpResult).unwrap_or(-1)
-+    }
-+}
-diff --git a/src/slapi_r_plugin/src/plugin.rs b/src/slapi_r_plugin/src/plugin.rs
-new file mode 100644
-index 000000000..bf47779bc
---- /dev/null
-+++ b/src/slapi_r_plugin/src/plugin.rs
-@@ -0,0 +1,117 @@
-+use crate::constants::{PluginType, PLUGIN_DEFAULT_PRECEDENCE};
-+use crate::dn::Sdn;
-+use crate::entry::EntryRef;
-+use crate::error::LDAPError;
-+use crate::error::PluginError;
-+use crate::pblock::PblockRef;
-+use crate::task::Task;
-+use libc;
-+use std::ffi::CString;
-+use std::os::raw::c_char;
-+use std::ptr;
-+
-+extern "C" {
-+    fn slapi_register_plugin_ext(
-+        plugintype: *const c_char,
-+        enabled: i32,
-+        initsymbol: *const c_char,
-+        initfunc: *const libc::c_void,
-+        name: *const c_char,
-+        argv: *const *const c_char,
-+        group_identity: *const libc::c_void,
-+        precedence: i32,
-+    ) -> i32;
-+}
-+
-+pub struct PluginIdRef {
-+    pub raw_pid: *const libc::c_void,
-+}
-+
-+pub fn register_plugin_ext(
-+    ptype: PluginType,
-+    plugname: &str,
-+    initfnname: &str,
-+    initfn: extern "C" fn(*const libc::c_void) -> i32,
-+) -> i32 {
-+    let c_plugname = match CString::new(plugname) {
-+        Ok(c) => c,
-+        Err(_) => return 1,
-+    };
-+    let c_initfnname = match CString::new(initfnname) {
-+        Ok(c) => c,
-+        Err(_) => return 1,
-+    };
-+    let argv = [c_plugname.as_ptr(), ptr::null()];
-+    let value_ptr: *const libc::c_void = initfn as *const libc::c_void;
-+
-+    unsafe {
-+        slapi_register_plugin_ext(
-+            ptype.to_char_ptr(),
-+            1,
-+            c_initfnname.as_ptr(),
-+            value_ptr,
-+            c_plugname.as_ptr(),
-+            &argv as *const *const c_char,
-+            ptr::null(),
-+            PLUGIN_DEFAULT_PRECEDENCE,
-+        )
-+    }
-+}
-+
-+pub trait SlapiPlugin3 {
-+    // We require a newer rust for default associated types.
-+    // type TaskData = ();
-+    type TaskData;
-+
-+    fn has_pre_modify() -> bool {
-+        false
-+    }
-+
-+    fn has_post_modify() -> bool {
-+        false
-+    }
-+
-+    fn has_pre_add() -> bool {
-+        false
-+    }
-+
-+    fn has_post_add() -> bool {
-+        false
-+    }
-+
-+    fn has_betxn_pre_modify() -> bool {
-+        false
-+    }
-+
-+    fn has_betxn_pre_add() -> bool {
-+        false
-+    }
-+
-+    fn has_task_handler() -> Option<&'static str> {
-+        None
-+    }
-+
-+    fn start(_pb: &mut PblockRef) -> Result<(), PluginError>;
-+
-+    fn close(_pb: &mut PblockRef) -> Result<(), PluginError>;
-+
-+    fn betxn_pre_modify(_pb: &mut PblockRef) -> Result<(), PluginError> {
-+        Err(PluginError::Unimplemented)
-+    }
-+
-+    fn betxn_pre_add(_pb: &mut PblockRef) -> Result<(), PluginError> {
-+        Err(PluginError::Unimplemented)
-+    }
-+
-+    fn task_validate(_e: &EntryRef) -> Result<Self::TaskData, LDAPError> {
-+        Err(LDAPError::Other)
-+    }
-+
-+    fn task_be_dn_hint(_data: &Self::TaskData) -> Option<Sdn> {
-+        None
-+    }
-+
-+    fn task_handler(_task: &Task, _data: Self::TaskData) -> Result<Self::TaskData, PluginError> {
-+        Err(PluginError::Unimplemented)
-+    }
-+}
-diff --git a/src/slapi_r_plugin/src/search.rs b/src/slapi_r_plugin/src/search.rs
-new file mode 100644
-index 000000000..e0e2a1fd7
---- /dev/null
-+++ b/src/slapi_r_plugin/src/search.rs
-@@ -0,0 +1,127 @@
-+use crate::dn::SdnRef;
-+use crate::error::{LDAPError, PluginError};
-+use crate::pblock::Pblock;
-+use crate::plugin::PluginIdRef;
-+use std::ffi::CString;
-+use std::ops::Deref;
-+use std::os::raw::c_char;
-+
-+extern "C" {
-+    fn slapi_search_internal_set_pb_ext(
-+        pb: *const libc::c_void,
-+        base: *const libc::c_void,
-+        scope: i32,
-+        filter: *const c_char,
-+        attrs: *const *const c_char,
-+        attrsonly: i32,
-+        controls: *const *const libc::c_void,
-+        uniqueid: *const c_char,
-+        plugin_ident: *const libc::c_void,
-+        op_flags: i32,
-+    );
-+    fn slapi_search_internal_callback_pb(
-+        pb: *const libc::c_void,
-+        cb_data: *const libc::c_void,
-+        cb_result_ptr: *const libc::c_void,
-+        cb_entry_ptr: *const libc::c_void,
-+        cb_referral_ptr: *const libc::c_void,
-+    ) -> i32;
-+}
-+
-+#[derive(Debug)]
-+#[repr(i32)]
-+pub enum SearchScope {
-+    Base = 0,
-+    Onelevel = 1,
-+    Subtree = 2,
-+}
-+
-+enum SearchType {
-+    InternalMapEntry(
-+        extern "C" fn(*const core::ffi::c_void, *const core::ffi::c_void) -> i32,
-+        *const libc::c_void,
-+    ),
-+    // InternalMapResult
-+    // InternalMapReferral
-+}
-+
-+pub struct Search {
-+    pb: Pblock,
-+    // This is so that the char * to the pb lives long enough as ds won't clone it.
-+    filter: Option<CString>,
-+    stype: SearchType,
-+}
-+
-+pub struct SearchResult {
-+    pb: Pblock,
-+}
-+
-+impl Search {
-+    pub fn new_map_entry<T>(
-+        basedn: &SdnRef,
-+        scope: SearchScope,
-+        filter: &str,
-+        plugin_id: PluginIdRef,
-+        cbdata: &T,
-+        mapfn: extern "C" fn(*const libc::c_void, *const libc::c_void) -> i32,
-+    ) -> Result<Self, PluginError>
-+    where
-+        T: Send,
-+    {
-+        // Configure a search based on the requested type.
-+        let pb = Pblock::new();
-+        let raw_filter = CString::new(filter).map_err(|_| PluginError::InvalidFilter)?;
-+
-+        unsafe {
-+            slapi_search_internal_set_pb_ext(
-+                pb.deref().as_ptr(),
-+                basedn.as_ptr(),
-+                scope as i32,
-+                raw_filter.as_ptr(),
-+                std::ptr::null(),
-+                0,
-+                std::ptr::null(),
-+                std::ptr::null(),
-+                plugin_id.raw_pid,
-+                0,
-+            )
-+        };
-+
-+        Ok(Search {
-+            pb,
-+            filter: Some(raw_filter),
-+            stype: SearchType::InternalMapEntry(mapfn, cbdata as *const _ as *const libc::c_void),
-+        })
-+    }
-+
-+    // Consume self, do the search
-+    pub fn execute(self) -> Result<SearchResult, LDAPError> {
-+        // Deconstruct self
-+        let Search {
-+            mut pb,
-+            filter: _filter,
-+            stype,
-+        } = self;
-+
-+        // run the search based on the type.
-+        match stype {
-+            SearchType::InternalMapEntry(mapfn, cbdata) => unsafe {
-+                slapi_search_internal_callback_pb(
-+                    pb.deref().as_ptr(),
-+                    cbdata,
-+                    std::ptr::null(),
-+                    mapfn as *const libc::c_void,
-+                    std::ptr::null(),
-+                );
-+            },
-+        };
-+
-+        // now check the result, and map to what we need.
-+        let result = pb.get_op_result();
-+
-+        match result {
-+            0 => Ok(SearchResult { pb }),
-+            _e => Err(LDAPError::from(result)),
-+        }
-+    }
-+}
-diff --git a/src/slapi_r_plugin/src/syntax_plugin.rs b/src/slapi_r_plugin/src/syntax_plugin.rs
-new file mode 100644
-index 000000000..e7d5c01bd
---- /dev/null
-+++ b/src/slapi_r_plugin/src/syntax_plugin.rs
-@@ -0,0 +1,169 @@
-+use crate::ber::BerValRef;
-+// use crate::constants::FilterType;
-+use crate::error::PluginError;
-+use crate::pblock::PblockRef;
-+use crate::value::{ValueArray, ValueArrayRef};
-+use std::cmp::Ordering;
-+use std::ffi::CString;
-+use std::iter::once;
-+use std::os::raw::c_char;
-+use std::ptr;
-+
-+// need a call to slapi_register_plugin_ext
-+
-+extern "C" {
-+    fn slapi_matchingrule_register(mr: *const slapi_matchingRuleEntry) -> i32;
-+}
-+
-+#[repr(C)]
-+struct slapi_matchingRuleEntry {
-+    mr_oid: *const c_char,
-+    _mr_oidalias: *const c_char,
-+    mr_name: *const c_char,
-+    mr_desc: *const c_char,
-+    mr_syntax: *const c_char,
-+    _mr_obsolete: i32, // unused
-+    mr_compat_syntax: *const *const c_char,
-+}
-+
-+pub unsafe fn name_to_leaking_char(name: &str) -> *const c_char {
-+    let n = CString::new(name)
-+        .expect("An invalid string has been hardcoded!")
-+        .into_boxed_c_str();
-+    let n_ptr = n.as_ptr();
-+    // Now we intentionally leak the name here, and the pointer will remain valid.
-+    Box::leak(n);
-+    n_ptr
-+}
-+
-+pub unsafe fn names_to_leaking_char_array(names: &[&str]) -> *const *const c_char {
-+    let n_arr: Vec<CString> = names
-+        .iter()
-+        .map(|s| CString::new(*s).expect("An invalid string has been hardcoded!"))
-+        .collect();
-+    let n_arr = n_arr.into_boxed_slice();
-+    let n_ptr_arr: Vec<*const c_char> = n_arr
-+        .iter()
-+        .map(|v| v.as_ptr())
-+        .chain(once(ptr::null()))
-+        .collect();
-+    let n_ptr_arr = n_ptr_arr.into_boxed_slice();
-+
-+    // Now we intentionally leak these names here,
-+    let _r_n_arr = Box::leak(n_arr);
-+    let r_n_ptr_arr = Box::leak(n_ptr_arr);
-+
-+    let name_ptr = r_n_ptr_arr as *const _ as *const *const c_char;
-+    name_ptr
-+}
-+
-+// oid - the oid of the matching rule
-+// name - the name of the mr
-+// desc - description
-+// syntax - the syntax of the attribute we apply to
-+// compat_syntax - extended syntaxes f other attributes we may apply to.
-+pub unsafe fn matchingrule_register(
-+    oid: &str,
-+    name: &str,
-+    desc: &str,
-+    syntax: &str,
-+    compat_syntax: &[&str],
-+) -> i32 {
-+    let oid_ptr = name_to_leaking_char(oid);
-+    let name_ptr = name_to_leaking_char(name);
-+    let desc_ptr = name_to_leaking_char(desc);
-+    let syntax_ptr = name_to_leaking_char(syntax);
-+    let compat_syntax_ptr = names_to_leaking_char_array(compat_syntax);
-+
-+    let new_mr = slapi_matchingRuleEntry {
-+        mr_oid: oid_ptr,
-+        _mr_oidalias: ptr::null(),
-+        mr_name: name_ptr,
-+        mr_desc: desc_ptr,
-+        mr_syntax: syntax_ptr,
-+        _mr_obsolete: 0,
-+        mr_compat_syntax: compat_syntax_ptr,
-+    };
-+
-+    let new_mr_ptr = &new_mr as *const _;
-+    slapi_matchingrule_register(new_mr_ptr)
-+}
-+
-+pub trait SlapiSyntaxPlugin1 {
-+    fn attr_oid() -> &'static str;
-+
-+    fn attr_compat_oids() -> Vec<&'static str>;
-+
-+    fn attr_supported_names() -> Vec<&'static str>;
-+
-+    fn syntax_validate(bval: &BerValRef) -> Result<(), PluginError>;
-+
-+    fn eq_mr_oid() -> &'static str;
-+
-+    fn eq_mr_name() -> &'static str;
-+
-+    fn eq_mr_desc() -> &'static str;
-+
-+    fn eq_mr_supported_names() -> Vec<&'static str>;
-+
-+    fn filter_ava_eq(
-+        _pb: &mut PblockRef,
-+        _bval_filter: &BerValRef,
-+        _vals: &ValueArrayRef,
-+    ) -> Result<bool, PluginError> {
-+        Ok(false)
-+    }
-+
-+    fn eq_mr_filter_values2keys(
-+        _pb: &mut PblockRef,
-+        _vals: &ValueArrayRef,
-+    ) -> Result<ValueArray, PluginError>;
-+}
-+
-+pub trait SlapiOrdMr: SlapiSyntaxPlugin1 {
-+    fn ord_mr_oid() -> Option<&'static str> {
-+        None
-+    }
-+
-+    fn ord_mr_name() -> &'static str {
-+        panic!("Unimplemented ord_mr_name for SlapiOrdMr")
-+    }
-+
-+    fn ord_mr_desc() -> &'static str {
-+        panic!("Unimplemented ord_mr_desc for SlapiOrdMr")
-+    }
-+
-+    fn ord_mr_supported_names() -> Vec<&'static str> {
-+        panic!("Unimplemented ord_mr_supported_names for SlapiOrdMr")
-+    }
-+
-+    fn filter_ava_ord(
-+        _pb: &mut PblockRef,
-+        _bval_filter: &BerValRef,
-+        _vals: &ValueArrayRef,
-+    ) -> Result<Option<Ordering>, PluginError> {
-+        Ok(None)
-+    }
-+
-+    fn filter_compare(_a: &BerValRef, _b: &BerValRef) -> Ordering {
-+        panic!("Unimplemented filter_compare")
-+    }
-+}
-+
-+pub trait SlapiSubMr: SlapiSyntaxPlugin1 {
-+    fn sub_mr_oid() -> Option<&'static str> {
-+        None
-+    }
-+
-+    fn sub_mr_name() -> &'static str {
-+        panic!("Unimplemented sub_mr_name for SlapiSubMr")
-+    }
-+
-+    fn sub_mr_desc() -> &'static str {
-+        panic!("Unimplemented sub_mr_desc for SlapiSubMr")
-+    }
-+
-+    fn sub_mr_supported_names() -> Vec<&'static str> {
-+        panic!("Unimplemented sub_mr_supported_names for SlapiSubMr")
-+    }
-+}
-diff --git a/src/slapi_r_plugin/src/task.rs b/src/slapi_r_plugin/src/task.rs
-new file mode 100644
-index 000000000..251ae4d82
---- /dev/null
-+++ b/src/slapi_r_plugin/src/task.rs
-@@ -0,0 +1,148 @@
-+use crate::constants::LDAP_SUCCESS;
-+use crate::entry::EntryRef;
-+use crate::pblock::PblockRef;
-+use std::ffi::CString;
-+use std::os::raw::c_char;
-+use std::thread;
-+use std::time::Duration;
-+
-+extern "C" {
-+    fn slapi_plugin_new_task(ndn: *const c_char, arg: *const libc::c_void) -> *const libc::c_void;
-+    fn slapi_task_dec_refcount(task: *const libc::c_void);
-+    fn slapi_task_inc_refcount(task: *const libc::c_void);
-+    fn slapi_task_get_refcount(task: *const libc::c_void) -> i32;
-+    fn slapi_task_begin(task: *const libc::c_void, rc: i32);
-+    fn slapi_task_finish(task: *const libc::c_void, rc: i32);
-+
-+    fn slapi_plugin_task_register_handler(
-+        ident: *const c_char,
-+        cb: extern "C" fn(
-+            *const libc::c_void,
-+            *const libc::c_void,
-+            *const libc::c_void,
-+            *mut i32,
-+            *mut c_char,
-+            *const libc::c_void,
-+        ) -> i32,
-+        pb: *const libc::c_void,
-+    ) -> i32;
-+    fn slapi_plugin_task_unregister_handler(
-+        ident: *const c_char,
-+        cb: extern "C" fn(
-+            *const libc::c_void,
-+            *const libc::c_void,
-+            *const libc::c_void,
-+            *mut i32,
-+            *mut c_char,
-+            *const libc::c_void,
-+        ) -> i32,
-+    ) -> i32;
-+    fn slapi_task_set_destructor_fn(
-+        task: *const libc::c_void,
-+        cb: extern "C" fn(*const libc::c_void),
-+    );
-+}
-+
-+pub struct TaskRef {
-+    raw_task: *const libc::c_void,
-+}
-+
-+pub struct Task {
-+    value: TaskRef,
-+}
-+
-+// Because raw pointers are not send, but we need to send the task to a thread
-+// as part of the task thread spawn, we need to convince the compiler this
-+// action is okay. It's probably not because C is terrible, BUT provided the
-+// server and framework only touch the ref count, we are okay.
-+unsafe impl Send for Task {}
-+
-+pub fn task_register_handler_fn(
-+    ident: &'static str,
-+    cb: extern "C" fn(
-+        *const libc::c_void,
-+        *const libc::c_void,
-+        *const libc::c_void,
-+        *mut i32,
-+        *mut c_char,
-+        *const libc::c_void,
-+    ) -> i32,
-+    pb: &mut PblockRef,
-+) -> i32 {
-+    let cstr = CString::new(ident).expect("Invalid ident provided");
-+    unsafe { slapi_plugin_task_register_handler(cstr.as_ptr(), cb, pb.as_ptr()) }
-+}
-+
-+pub fn task_unregister_handler_fn(
-+    ident: &'static str,
-+    cb: extern "C" fn(
-+        *const libc::c_void,
-+        *const libc::c_void,
-+        *const libc::c_void,
-+        *mut i32,
-+        *mut c_char,
-+        *const libc::c_void,
-+    ) -> i32,
-+) -> i32 {
-+    let cstr = CString::new(ident).expect("Invalid ident provided");
-+    unsafe { slapi_plugin_task_unregister_handler(cstr.as_ptr(), cb) }
-+}
-+
-+impl Task {
-+    pub fn new(e: &EntryRef, arg: *const libc::c_void) -> Self {
-+        let sdn = e.get_sdnref();
-+        let ndn = unsafe { sdn.as_ndnref() };
-+        let raw_task = unsafe { slapi_plugin_new_task(ndn.as_ptr(), arg) };
-+        unsafe { slapi_task_inc_refcount(raw_task) };
-+        Task {
-+            value: TaskRef { raw_task },
-+        }
-+    }
-+
-+    pub fn begin(&self) {
-+        // Indicate we begin
-+        unsafe { slapi_task_begin(self.value.raw_task, 1) }
-+    }
-+
-+    pub fn register_destructor_fn(&mut self, cb: extern "C" fn(*const libc::c_void)) {
-+        unsafe {
-+            slapi_task_set_destructor_fn(self.value.raw_task, cb);
-+        }
-+    }
-+
-+    pub fn success(self) {
-+        unsafe {
-+            slapi_task_finish(self.value.raw_task, LDAP_SUCCESS);
-+        }
-+    }
-+
-+    pub fn error(self, rc: i32) {
-+        unsafe { slapi_task_finish(self.value.raw_task, rc) };
-+    }
-+}
-+
-+impl Drop for Task {
-+    fn drop(&mut self) {
-+        unsafe {
-+            slapi_task_dec_refcount(self.value.raw_task);
-+        }
-+    }
-+}
-+
-+impl TaskRef {
-+    pub fn new(raw_task: *const libc::c_void) -> Self {
-+        TaskRef { raw_task }
-+    }
-+
-+    pub fn block(&self) {
-+        // wait for the refcount to go to 0.
-+        let d = Duration::from_millis(250);
-+        loop {
-+            if unsafe { slapi_task_get_refcount(self.raw_task) } > 0 {
-+                thread::sleep(d);
-+            } else {
-+                return;
-+            }
-+        }
-+    }
-+}
-diff --git a/src/slapi_r_plugin/src/value.rs b/src/slapi_r_plugin/src/value.rs
-new file mode 100644
-index 000000000..5a40dd279
---- /dev/null
-+++ b/src/slapi_r_plugin/src/value.rs
-@@ -0,0 +1,235 @@
-+use crate::ber::{ol_berval, BerValRef};
-+use crate::dn::Sdn;
-+use std::convert::{From, TryFrom};
-+use std::ffi::CString;
-+use std::iter::once;
-+use std::iter::FromIterator;
-+use std::mem;
-+use std::ops::Deref;
-+use std::ptr;
-+use uuid::Uuid;
-+
-+extern "C" {
-+    fn slapi_value_new() -> *mut slapi_value;
-+    fn slapi_value_free(v: *mut *const libc::c_void);
-+}
-+
-+#[repr(C)]
-+/// From ./ldap/servers/slapd/slap.h
-+pub struct slapi_value {
-+    bv: ol_berval,
-+    v_csnset: *const libc::c_void,
-+    v_flags: u32,
-+}
-+
-+pub struct ValueArrayRefIter<'a> {
-+    idx: isize,
-+    va_ref: &'a ValueArrayRef,
-+}
-+
-+impl<'a> Iterator for ValueArrayRefIter<'a> {
-+    type Item = ValueRef;
-+
-+    #[inline]
-+    fn next(&mut self) -> Option<Self::Item> {
-+        // So long as va_ref.raw_slapi_val + offset != NULL, continue.
-+        // this is so wildly unsafe, but you know, that's just daily life of C anyway ...
-+        unsafe {
-+            let n_ptr: *const slapi_value = *(self.va_ref.raw_slapi_val.offset(self.idx));
-+            if n_ptr.is_null() {
-+                None
-+            } else {
-+                // Advance the iter.
-+                self.idx = self.idx + 1;
-+                let raw_berval: *const ol_berval = &(*n_ptr).bv as *const _;
-+                Some(ValueRef {
-+                    raw_slapi_val: n_ptr,
-+                    bvr: BerValRef { raw_berval },
-+                })
-+            }
-+        }
-+    }
-+}
-+
-+pub struct ValueArrayRef {
-+    raw_slapi_val: *const *const slapi_value,
-+}
-+
-+impl ValueArrayRef {
-+    pub fn new(raw_slapi_val: *const libc::c_void) -> Self {
-+        let raw_slapi_val = raw_slapi_val as *const _ as *const *const slapi_value;
-+        ValueArrayRef { raw_slapi_val }
-+    }
-+
-+    pub fn iter(&self) -> ValueArrayRefIter {
-+        ValueArrayRefIter {
-+            idx: 0,
-+            va_ref: &self,
-+        }
-+    }
-+
-+    pub fn first(&self) -> Option<ValueRef> {
-+        self.iter().next()
-+    }
-+}
-+
-+pub struct ValueArray {
-+    data: Vec<*mut slapi_value>,
-+    vrf: ValueArrayRef,
-+}
-+
-+impl Deref for ValueArray {
-+    type Target = ValueArrayRef;
-+
-+    fn deref(&self) -> &Self::Target {
-+        &self.vrf
-+    }
-+}
-+
-+impl ValueArray {
-+    /// Take ownership of this value array, returning the pointer to the inner memory
-+    /// and forgetting about it for ourself. This prevents the drop handler from freeing
-+    /// the slapi_value, ie we are giving this to the 389-ds framework to manage from now.
-+    pub unsafe fn take_ownership(mut self) -> *const *const slapi_value {
-+        let mut vs = Vec::new();
-+        mem::swap(&mut self.data, &mut vs);
-+        let bs = vs.into_boxed_slice();
-+        Box::leak(bs) as *const _ as *const *const slapi_value
-+    }
-+}
-+
-+impl FromIterator<Value> for ValueArray {
-+    fn from_iter<I: IntoIterator<Item = Value>>(iter: I) -> Self {
-+        let data: Vec<*mut slapi_value> = iter
-+            .into_iter()
-+            .map(|v| unsafe { v.take_ownership() })
-+            .chain(once(ptr::null_mut() as *mut slapi_value))
-+            .collect();
-+        let vrf = ValueArrayRef {
-+            raw_slapi_val: data.as_ptr() as *const *const slapi_value,
-+        };
-+        ValueArray { data, vrf }
-+    }
-+}
-+
-+impl Drop for ValueArray {
-+    fn drop(&mut self) {
-+        self.data.drain(0..).for_each(|mut v| unsafe {
-+            slapi_value_free(&mut v as *mut _ as *mut *const libc::c_void);
-+        })
-+    }
-+}
-+
-+#[derive(Debug)]
-+pub struct ValueRef {
-+    raw_slapi_val: *const slapi_value,
-+    bvr: BerValRef,
-+}
-+
-+impl ValueRef {
-+    pub(crate) unsafe fn as_ptr(&self) -> *const slapi_value {
-+        // This is unsafe as the *const may outlive the value ref.
-+        self.raw_slapi_val
-+    }
-+}
-+
-+pub struct Value {
-+    value: ValueRef,
-+}
-+
-+impl Value {
-+    pub unsafe fn take_ownership(mut self) -> *mut slapi_value {
-+        let mut n_ptr = ptr::null();
-+        mem::swap(&mut self.value.raw_slapi_val, &mut n_ptr);
-+        n_ptr as *mut slapi_value
-+        // Now drop will run and not care.
-+    }
-+}
-+
-+impl Drop for Value {
-+    fn drop(&mut self) {
-+        if self.value.raw_slapi_val != ptr::null() {
-+            // free it
-+            unsafe {
-+                slapi_value_free(
-+                    &mut self.value.raw_slapi_val as *mut _ as *mut *const libc::c_void,
-+                );
-+            }
-+        }
-+    }
-+}
-+
-+impl Deref for Value {
-+    type Target = ValueRef;
-+
-+    fn deref(&self) -> &Self::Target {
-+        &self.value
-+    }
-+}
-+
-+impl From<&Uuid> for Value {
-+    fn from(u: &Uuid) -> Self {
-+        // turn the uuid to a str
-+        let u_str = u.to_hyphenated().to_string();
-+        let len = u_str.len();
-+        let cstr = CString::new(u_str)
-+            .expect("Invalid uuid, should never occur!")
-+            .into_boxed_c_str();
-+        let s_ptr = cstr.as_ptr();
-+        Box::leak(cstr);
-+
-+        let mut v = unsafe { slapi_value_new() };
-+        unsafe {
-+            (*v).bv.len = len;
-+            (*v).bv.data = s_ptr as *const u8;
-+        }
-+
-+        Value {
-+            value: ValueRef::new(v as *const libc::c_void),
-+        }
-+    }
-+}
-+
-+impl ValueRef {
-+    pub fn new(raw_slapi_val: *const libc::c_void) -> Self {
-+        let raw_slapi_val = raw_slapi_val as *const _ as *const slapi_value;
-+        let raw_berval: *const ol_berval = unsafe { &(*raw_slapi_val).bv as *const _ };
-+        ValueRef {
-+            raw_slapi_val,
-+            bvr: BerValRef { raw_berval },
-+        }
-+    }
-+}
-+
-+impl TryFrom<&ValueRef> for String {
-+    type Error = ();
-+
-+    fn try_from(value: &ValueRef) -> Result<Self, Self::Error> {
-+        value.bvr.into_string().ok_or(())
-+    }
-+}
-+
-+impl TryFrom<&ValueRef> for Sdn {
-+    type Error = ();
-+
-+    fn try_from(value: &ValueRef) -> Result<Self, Self::Error> {
-+        // We need to do a middle step of moving through a cstring as
-+        // bervals may not always have a trailing NULL, and sdn expects one.
-+        let cdn = value.bvr.into_cstring().ok_or(())?;
-+        Ok(cdn.as_c_str().into())
-+    }
-+}
-+
-+impl AsRef<ValueRef> for ValueRef {
-+    fn as_ref(&self) -> &ValueRef {
-+        &self
-+    }
-+}
-+
-+impl Deref for ValueRef {
-+    type Target = BerValRef;
-+
-+    fn deref(&self) -> &Self::Target {
-+        &self.bvr
-+    }
-+}
--- 
-2.26.3
-
diff --git a/SOURCES/0004-Issue-4956-Automember-allows-invalid-regex-and-does-.patch b/SOURCES/0004-Issue-4956-Automember-allows-invalid-regex-and-does-.patch
new file mode 100644
index 0000000..0e68f9f
--- /dev/null
+++ b/SOURCES/0004-Issue-4956-Automember-allows-invalid-regex-and-does-.patch
@@ -0,0 +1,114 @@
+From d037688c072c4cb84fbf9b2a6cb24927f7950605 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Wed, 20 Oct 2021 10:04:06 -0400
+Subject: [PATCH 04/12] Issue 4956 - Automember allows invalid regex, and does
+ not log proper error
+
+Bug Description:  The server was detecting an invalid automember
+                  regex, but it did not reject it, and it did not
+                  log which regex rule was invalid.
+
+Fix Description:  By properly rejecting the invalid regex will also
+                  trigger the proper error logging to occur.
+
+relates: https://github.com/389ds/389-ds-base/issues/4956
+
+Reviewed by: tbordaz & spichugi(Thanks!!)
+---
+ .../automember_plugin/configuration_test.py   | 49 +++++++++++++++++--
+ ldap/servers/plugins/automember/automember.c  |  1 +
+ 2 files changed, 46 insertions(+), 4 deletions(-)
+
+diff --git a/dirsrvtests/tests/suites/automember_plugin/configuration_test.py b/dirsrvtests/tests/suites/automember_plugin/configuration_test.py
+index 0f9cc49dc..4a6b596db 100644
+--- a/dirsrvtests/tests/suites/automember_plugin/configuration_test.py
++++ b/dirsrvtests/tests/suites/automember_plugin/configuration_test.py
+@@ -1,21 +1,20 @@
+ # --- BEGIN COPYRIGHT BLOCK ---
+-# Copyright (C) 2019 Red Hat, Inc.
++# Copyright (C) 2021 Red Hat, Inc.
+ # All rights reserved.
+ #
+ # License: GPL (version 3 or any later version).
+ # See LICENSE for details.
+ # --- END COPYRIGHT BLOCK ---
+ 
++import ldap
+ import os
+ import pytest
+-
+ from lib389.topologies import topology_st as topo
+ from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions, MemberOfPlugin
+-import ldap
++from lib389._constants import DEFAULT_SUFFIX
+ 
+ pytestmark = pytest.mark.tier1
+ 
+-
+ @pytest.mark.bz834056
+ def test_configuration(topo):
+     """
+@@ -52,6 +51,48 @@ def test_configuration(topo):
+                                               '"cn=SuffDef1,ou=autouserGroups,cn=config" '
+                                               'can not be a child of the plugin config area "cn=config"')
+ 
++def test_invalid_regex(topo):
++    """Test invalid regex is properly reportedin the error log
++
++    :id: a6d89f84-ec76-4871-be96-411d051800b1
++    :setup: Standalone Instance
++    :steps:
++        1. Setup automember
++        2. Add invalid regex
++        3. Error log reports useful message
++    :expectedresults:
++        1. Success
++        2. Success
++        3. Success
++    """
++    REGEX_DN = "cn=regex1,cn=testregex,cn=auto membership plugin,cn=plugins,cn=config"
++    REGEX_VALUE = "cn=*invalid*"
++    REGEX_ESC_VALUE = "cn=\\*invalid\\*"
++    GROUP_DN = "cn=demo_group,ou=groups,"  + DEFAULT_SUFFIX
++
++    AutoMembershipPlugin(topo.standalone).remove_all("nsslapd-pluginConfigArea")
++    automemberplugin = AutoMembershipPlugin(topo.standalone)
++
++    automember_prop = {
++        'cn': 'testRegex',
++        'autoMemberScope': 'ou=People,' + DEFAULT_SUFFIX,
++        'autoMemberFilter': 'objectclass=*',
++        'autoMemberDefaultGroup': GROUP_DN,
++        'autoMemberGroupingAttr': 'member:dn',
++    }
++    automember_defs = AutoMembershipDefinitions(topo.standalone, "cn=Auto Membership Plugin,cn=plugins,cn=config")
++    automember_def = automember_defs.create(properties=automember_prop)
++    automember_def.add_regex_rule("regex1", GROUP_DN, include_regex=[REGEX_VALUE])
++
++    automemberplugin.enable()
++    topo.standalone.restart()
++
++    # Check errors log for invalid message
++    ERR_STR1 = "automember_parse_regex_rule - Unable to parse regex rule"
++    ERR_STR2 = f"Skipping invalid inclusive regex rule in rule entry \"{REGEX_DN}\" \\(rule = \"{REGEX_ESC_VALUE}\"\\)"
++    assert topo.standalone.searchErrorsLog(ERR_STR1)
++    assert topo.standalone.searchErrorsLog(ERR_STR2)
++
+ 
+ if __name__ == "__main__":
+     CURRENT_FILE = os.path.realpath(__file__)
+diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
+index 39350ad53..b92b89bd5 100644
+--- a/ldap/servers/plugins/automember/automember.c
++++ b/ldap/servers/plugins/automember/automember.c
+@@ -1217,6 +1217,7 @@ automember_parse_regex_rule(char *rule_string)
+                       "automember_parse_regex_rule - Unable to parse "
+                       "regex rule (invalid regex).  Error \"%s\".\n",
+                       recomp_result ? recomp_result : "unknown");
++        goto bail;
+     }
+ 
+     /* Validation has passed, so create the regex rule struct and fill it in.
+-- 
+2.31.1
+
diff --git a/SOURCES/0004-Ticket-4326-entryuuid-fixup-did-not-work-correctly-4.patch b/SOURCES/0004-Ticket-4326-entryuuid-fixup-did-not-work-correctly-4.patch
deleted file mode 100644
index 8416726..0000000
--- a/SOURCES/0004-Ticket-4326-entryuuid-fixup-did-not-work-correctly-4.patch
+++ /dev/null
@@ -1,373 +0,0 @@
-From c167d6127db45d8426437c273060c8c8f7fbcb9b Mon Sep 17 00:00:00 2001
-From: Firstyear <william.brown@suse.com>
-Date: Wed, 23 Sep 2020 09:19:34 +1000
-Subject: [PATCH 04/12] Ticket 4326 - entryuuid fixup did not work correctly
- (#4328)
-
-Bug Description: due to an oversight in how fixup tasks
-worked, the entryuuid fixup task did not work correctly and
-would not persist over restarts.
-
-Fix Description: Correctly implement entryuuid fixup.
-
-fixes: #4326
-
-Author: William Brown <william@blackhats.net.au>
-
-Review by: mreynolds (thanks!)
----
- .../tests/suites/entryuuid/basic_test.py      |  24 +++-
- src/plugins/entryuuid/src/lib.rs              |  43 ++++++-
- src/slapi_r_plugin/src/constants.rs           |   5 +
- src/slapi_r_plugin/src/entry.rs               |   8 ++
- src/slapi_r_plugin/src/lib.rs                 |   2 +
- src/slapi_r_plugin/src/macros.rs              |   2 +-
- src/slapi_r_plugin/src/modify.rs              | 118 ++++++++++++++++++
- src/slapi_r_plugin/src/pblock.rs              |   7 ++
- src/slapi_r_plugin/src/value.rs               |   4 +
- 9 files changed, 206 insertions(+), 7 deletions(-)
- create mode 100644 src/slapi_r_plugin/src/modify.rs
-
-diff --git a/dirsrvtests/tests/suites/entryuuid/basic_test.py b/dirsrvtests/tests/suites/entryuuid/basic_test.py
-index beb73701d..4d8a40909 100644
---- a/dirsrvtests/tests/suites/entryuuid/basic_test.py
-+++ b/dirsrvtests/tests/suites/entryuuid/basic_test.py
-@@ -12,6 +12,7 @@ import time
- import shutil
- from lib389.idm.user import nsUserAccounts, UserAccounts
- from lib389.idm.account import Accounts
-+from lib389.idm.domain import Domain
- from lib389.topologies import topology_st as topology
- from lib389.backend import Backends
- from lib389.paths import Paths
-@@ -190,6 +191,7 @@ def test_entryuuid_fixup_task(topology):
-         3. Enable the entryuuid plugin
-         4. Run the fixup
-         5. Assert the entryuuid now exists
-+        6. Restart and check they persist
- 
-     :expectedresults:
-         1. Success
-@@ -197,6 +199,7 @@ def test_entryuuid_fixup_task(topology):
-         3. Success
-         4. Success
-         5. Suddenly EntryUUID!
-+        6. Still has EntryUUID!
-     """
-     # 1. Disable the plugin
-     plug = EntryUUIDPlugin(topology.standalone)
-@@ -220,7 +223,22 @@ def test_entryuuid_fixup_task(topology):
-     assert(task.is_complete() and task.get_exit_code() == 0)
-     topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,))
- 
--    # 5. Assert the uuid.
--    euuid = account.get_attr_val_utf8('entryUUID')
--    assert(euuid is not None)
-+    # 5.1 Assert the uuid on the user.
-+    euuid_user = account.get_attr_val_utf8('entryUUID')
-+    assert(euuid_user is not None)
-+
-+    # 5.2 Assert it on the domain entry.
-+    domain = Domain(topology.standalone, dn=DEFAULT_SUFFIX)
-+    euuid_domain = domain.get_attr_val_utf8('entryUUID')
-+    assert(euuid_domain is not None)
-+
-+    # Assert it persists after a restart.
-+    topology.standalone.restart()
-+    # 6.1 Assert the uuid on the use.
-+    euuid_user_2 = account.get_attr_val_utf8('entryUUID')
-+    assert(euuid_user_2 == euuid_user)
-+
-+    # 6.2 Assert it on the domain entry.
-+    euuid_domain_2 = domain.get_attr_val_utf8('entryUUID')
-+    assert(euuid_domain_2 == euuid_domain)
- 
-diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
-index 6b5e8d1bb..92977db05 100644
---- a/src/plugins/entryuuid/src/lib.rs
-+++ b/src/plugins/entryuuid/src/lib.rs
-@@ -187,9 +187,46 @@ impl SlapiPlugin3 for EntryUuid {
-     }
- }
- 
--pub fn entryuuid_fixup_mapfn(mut e: EntryRef, _data: &()) -> Result<(), PluginError> {
--    assign_uuid(&mut e);
--    Ok(())
-+pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError> {
-+    /* Supply a modification to the entry. */
-+    let sdn = e.get_sdnref();
-+
-+    /* Sanity check that entryuuid doesn't already exist */
-+    if e.contains_attr("entryUUID") {
-+        log_error!(
-+            ErrorLevel::Trace,
-+            "skipping fixup for -> {}",
-+            sdn.to_dn_string()
-+        );
-+        return Ok(());
-+    }
-+
-+    // Setup the modifications
-+    let mut mods = SlapiMods::new();
-+
-+    let u: Uuid = Uuid::new_v4();
-+    let uuid_value = Value::from(&u);
-+    let values: ValueArray = std::iter::once(uuid_value).collect();
-+    mods.append(ModType::Replace, "entryUUID", values);
-+
-+    /* */
-+    let lmod = Modify::new(&sdn, mods, plugin_id())?;
-+
-+    match lmod.execute() {
-+        Ok(_) => {
-+            log_error!(ErrorLevel::Trace, "fixed-up -> {}", sdn.to_dn_string());
-+            Ok(())
-+        }
-+        Err(e) => {
-+            log_error!(
-+                ErrorLevel::Error,
-+                "entryuuid_fixup_mapfn -> fixup failed -> {} {:?}",
-+                sdn.to_dn_string(),
-+                e
-+            );
-+            Err(PluginError::GenericFailure)
-+        }
-+    }
- }
- 
- #[cfg(test)]
-diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs
-index cf76ccbdb..34845c2f4 100644
---- a/src/slapi_r_plugin/src/constants.rs
-+++ b/src/slapi_r_plugin/src/constants.rs
-@@ -5,6 +5,11 @@ use std::os::raw::c_char;
- pub const LDAP_SUCCESS: i32 = 0;
- pub const PLUGIN_DEFAULT_PRECEDENCE: i32 = 50;
- 
-+#[repr(i32)]
-+pub enum OpFlags {
-+    ByassReferrals = 0x0040_0000,
-+}
-+
- #[repr(i32)]
- /// The set of possible function handles we can register via the pblock. These
- /// values correspond to slapi-plugin.h.
-diff --git a/src/slapi_r_plugin/src/entry.rs b/src/slapi_r_plugin/src/entry.rs
-index 034efe692..22ae45189 100644
---- a/src/slapi_r_plugin/src/entry.rs
-+++ b/src/slapi_r_plugin/src/entry.rs
-@@ -70,6 +70,14 @@ impl EntryRef {
-         }
-     }
- 
-+    pub fn contains_attr(&self, name: &str) -> bool {
-+        let cname = CString::new(name).expect("invalid attr name");
-+        let va = unsafe { slapi_entry_attr_get_valuearray(self.raw_e, cname.as_ptr()) };
-+
-+        // If it's null, it's not present, so flip the logic.
-+        !va.is_null()
-+    }
-+
-     pub fn add_value(&mut self, a: &str, v: &ValueRef) {
-         // turn the attr to a c string.
-         // TODO FIX
-diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs
-index d7fc22e52..076907bae 100644
---- a/src/slapi_r_plugin/src/lib.rs
-+++ b/src/slapi_r_plugin/src/lib.rs
-@@ -9,6 +9,7 @@ pub mod dn;
- pub mod entry;
- pub mod error;
- pub mod log;
-+pub mod modify;
- pub mod pblock;
- pub mod plugin;
- pub mod search;
-@@ -24,6 +25,7 @@ pub mod prelude {
-     pub use crate::entry::EntryRef;
-     pub use crate::error::{DseCallbackStatus, LDAPError, PluginError, RPluginError};
-     pub use crate::log::{log_error, ErrorLevel};
-+    pub use crate::modify::{ModType, Modify, SlapiMods};
-     pub use crate::pblock::{Pblock, PblockRef};
-     pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3};
-     pub use crate::search::{Search, SearchScope};
-diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs
-index 030449632..bc8dfa60f 100644
---- a/src/slapi_r_plugin/src/macros.rs
-+++ b/src/slapi_r_plugin/src/macros.rs
-@@ -825,7 +825,7 @@ macro_rules! slapi_r_search_callback_mapfn {
-                 let e = EntryRef::new(raw_e);
-                 let data_ptr = raw_data as *const _;
-                 let data = unsafe { &(*data_ptr) };
--                match $cb_mod_ident(e, data) {
-+                match $cb_mod_ident(&e, data) {
-                     Ok(_) => LDAPError::Success as i32,
-                     Err(e) => e as i32,
-                 }
-diff --git a/src/slapi_r_plugin/src/modify.rs b/src/slapi_r_plugin/src/modify.rs
-new file mode 100644
-index 000000000..30864377a
---- /dev/null
-+++ b/src/slapi_r_plugin/src/modify.rs
-@@ -0,0 +1,118 @@
-+use crate::constants::OpFlags;
-+use crate::dn::SdnRef;
-+use crate::error::{LDAPError, PluginError};
-+use crate::pblock::Pblock;
-+use crate::plugin::PluginIdRef;
-+use crate::value::{slapi_value, ValueArray};
-+
-+use std::ffi::CString;
-+use std::ops::{Deref, DerefMut};
-+use std::os::raw::c_char;
-+
-+extern "C" {
-+    fn slapi_modify_internal_set_pb_ext(
-+        pb: *const libc::c_void,
-+        dn: *const libc::c_void,
-+        mods: *const *const libc::c_void,
-+        controls: *const *const libc::c_void,
-+        uniqueid: *const c_char,
-+        plugin_ident: *const libc::c_void,
-+        op_flags: i32,
-+    );
-+    fn slapi_modify_internal_pb(pb: *const libc::c_void);
-+    fn slapi_mods_free(smods: *const *const libc::c_void);
-+    fn slapi_mods_get_ldapmods_byref(smods: *const libc::c_void) -> *const *const libc::c_void;
-+    fn slapi_mods_new() -> *const libc::c_void;
-+    fn slapi_mods_add_mod_values(
-+        smods: *const libc::c_void,
-+        mtype: i32,
-+        attrtype: *const c_char,
-+        value: *const *const slapi_value,
-+    );
-+}
-+
-+#[derive(Debug)]
-+#[repr(i32)]
-+pub enum ModType {
-+    Add = 0,
-+    Delete = 1,
-+    Replace = 2,
-+}
-+
-+pub struct SlapiMods {
-+    inner: *const libc::c_void,
-+    vas: Vec<ValueArray>,
-+}
-+
-+impl Drop for SlapiMods {
-+    fn drop(&mut self) {
-+        unsafe { slapi_mods_free(&self.inner as *const *const libc::c_void) }
-+    }
-+}
-+
-+impl SlapiMods {
-+    pub fn new() -> Self {
-+        SlapiMods {
-+            inner: unsafe { slapi_mods_new() },
-+            vas: Vec::new(),
-+        }
-+    }
-+
-+    pub fn append(&mut self, modtype: ModType, attrtype: &str, values: ValueArray) {
-+        // We can get the value array pointer here to push to the inner
-+        // because the internal pointers won't change even when we push them
-+        // to the list to preserve their lifetime.
-+        let vas = values.as_ptr();
-+        // We take ownership of this to ensure it lives as least as long as our
-+        // slapimods structure.
-+        self.vas.push(values);
-+        // now we can insert these to the modes.
-+        let c_attrtype = CString::new(attrtype).expect("failed to allocate attrtype");
-+        unsafe { slapi_mods_add_mod_values(self.inner, modtype as i32, c_attrtype.as_ptr(), vas) };
-+    }
-+}
-+
-+pub struct Modify {
-+    pb: Pblock,
-+    mods: SlapiMods,
-+}
-+
-+pub struct ModifyResult {
-+    pb: Pblock,
-+}
-+
-+impl Modify {
-+    pub fn new(dn: &SdnRef, mods: SlapiMods, plugin_id: PluginIdRef) -> Result<Self, PluginError> {
-+        let pb = Pblock::new();
-+        let lmods = unsafe { slapi_mods_get_ldapmods_byref(mods.inner) };
-+        // OP_FLAG_ACTION_LOG_ACCESS
-+
-+        unsafe {
-+            slapi_modify_internal_set_pb_ext(
-+                pb.deref().as_ptr(),
-+                dn.as_ptr(),
-+                lmods,
-+                std::ptr::null(),
-+                std::ptr::null(),
-+                plugin_id.raw_pid,
-+                OpFlags::ByassReferrals as i32,
-+            )
-+        };
-+
-+        Ok(Modify { pb, mods })
-+    }
-+
-+    pub fn execute(self) -> Result<ModifyResult, LDAPError> {
-+        let Modify {
-+            mut pb,
-+            mods: _mods,
-+        } = self;
-+        unsafe { slapi_modify_internal_pb(pb.deref().as_ptr()) };
-+        let result = pb.get_op_result();
-+
-+        match result {
-+            0 => Ok(ModifyResult { pb }),
-+            _e => Err(LDAPError::from(result)),
-+        }
-+    }
-+}
-diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs
-index b69ce1680..0f83914f3 100644
---- a/src/slapi_r_plugin/src/pblock.rs
-+++ b/src/slapi_r_plugin/src/pblock.rs
-@@ -11,6 +11,7 @@ pub use crate::log::{log_error, ErrorLevel};
- extern "C" {
-     fn slapi_pblock_set(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32;
-     fn slapi_pblock_get(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32;
-+    fn slapi_pblock_destroy(pb: *const libc::c_void);
-     fn slapi_pblock_new() -> *const libc::c_void;
- }
- 
-@@ -41,6 +42,12 @@ impl DerefMut for Pblock {
-     }
- }
- 
-+impl Drop for Pblock {
-+    fn drop(&mut self) {
-+        unsafe { slapi_pblock_destroy(self.value.raw_pb) }
-+    }
-+}
-+
- pub struct PblockRef {
-     raw_pb: *const libc::c_void,
- }
-diff --git a/src/slapi_r_plugin/src/value.rs b/src/slapi_r_plugin/src/value.rs
-index 5a40dd279..46246837a 100644
---- a/src/slapi_r_plugin/src/value.rs
-+++ b/src/slapi_r_plugin/src/value.rs
-@@ -96,6 +96,10 @@ impl ValueArray {
-         let bs = vs.into_boxed_slice();
-         Box::leak(bs) as *const _ as *const *const slapi_value
-     }
-+
-+    pub fn as_ptr(&self) -> *const *const slapi_value {
-+        self.data.as_ptr() as *const *const slapi_value
-+    }
- }
- 
- impl FromIterator<Value> for ValueArray {
--- 
-2.26.3
-
diff --git a/SOURCES/0005-Issue-4092-systemd-tmpfiles-warnings.patch b/SOURCES/0005-Issue-4092-systemd-tmpfiles-warnings.patch
new file mode 100644
index 0000000..8a54d97
--- /dev/null
+++ b/SOURCES/0005-Issue-4092-systemd-tmpfiles-warnings.patch
@@ -0,0 +1,245 @@
+From 9c08a053938eb28821fad7d0850c046ef2ed44c4 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Wed, 9 Dec 2020 16:16:30 -0500
+Subject: [PATCH 05/12] Issue 4092 - systemd-tmpfiles warnings
+
+Bug Description:
+
+systemd-tmpfiles warns about legacy paths in our tmpfiles configs.
+Using /var/run also introduces a race condition, see the following
+issue https://pagure.io/389-ds-base/issue/47429
+
+Fix Description:
+
+Instead of using @localstatedir@/run use @localrundir@ which was
+introduced in #850.
+
+Relates: https://github.com/389ds/389-ds-base/issues/766
+Fixes: https://github.com/389ds/389-ds-base/issues/4092
+
+Reviewed by: vashirov & firstyear(Thanks!)
+---
+ Makefile.am                                  |  4 ++--
+ configure.ac                                 | 10 ++++++++--
+ dirsrvtests/tests/suites/basic/basic_test.py |  3 ++-
+ ldap/admin/src/defaults.inf.in               |  8 ++++----
+ ldap/servers/snmp/main.c                     |  8 ++++----
+ src/lib389/lib389/__init__.py                |  3 +++
+ src/lib389/lib389/instance/options.py        |  7 ++++++-
+ src/lib389/lib389/instance/remove.py         | 13 ++++++++-----
+ src/lib389/lib389/instance/setup.py          | 10 ++++++++--
+ 9 files changed, 45 insertions(+), 21 deletions(-)
+
+diff --git a/Makefile.am b/Makefile.am
+index 36434cf17..fc5a6a7d1 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -141,8 +141,8 @@ PATH_DEFINES = -DLOCALSTATEDIR="\"$(localstatedir)\"" -DSYSCONFDIR="\"$(sysconfd
+ 	-DLIBDIR="\"$(libdir)\"" -DBINDIR="\"$(bindir)\"" \
+ 	-DDATADIR="\"$(datadir)\"" -DDOCDIR="\"$(docdir)\"" \
+ 	-DSBINDIR="\"$(sbindir)\"" -DPLUGINDIR="\"$(serverplugindir)\"" \
+-	-DTEMPLATEDIR="\"$(sampledatadir)\"" -DSYSTEMSCHEMADIR="\"$(systemschemadir)\""
+-
++	-DTEMPLATEDIR="\"$(sampledatadir)\"" -DSYSTEMSCHEMADIR="\"$(systemschemadir)\"" \
++	-DLOCALRUNDIR="\"$(localrundir)\""
+ # Now that we have all our defines in place, setup the CPPFLAGS
+ 
+ # These flags are the "must have" for all components
+diff --git a/configure.ac b/configure.ac
+index 61bf35e4a..9845beb7d 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -418,7 +418,14 @@ fi
+ 
+ m4_include(m4/fhs.m4)
+ 
+-localrundir='/run'
++# /run directory path
++AC_ARG_WITH([localrundir],
++            AS_HELP_STRING([--with-localrundir=DIR],
++                           [Runtime data directory]),
++            [localrundir=$with_localrundir],
++            [localrundir="/run"])
++AC_SUBST([localrundir])
++
+ cockpitdir=/389-console
+ 
+ # installation paths - by default, we store everything
+@@ -899,7 +906,6 @@ AC_SUBST(ldaplib_defs)
+ AC_SUBST(ldaptool_bindir)
+ AC_SUBST(ldaptool_opts)
+ AC_SUBST(plainldif_opts)
+-AC_SUBST(localrundir)
+ 
+ AC_SUBST(brand)
+ AC_SUBST(capbrand)
+diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
+index 41726f073..7e80c443b 100644
+--- a/dirsrvtests/tests/suites/basic/basic_test.py
++++ b/dirsrvtests/tests/suites/basic/basic_test.py
+@@ -901,7 +901,8 @@ def test_basic_ldapagent(topology_st, import_example_ldif):
+     # Remember, this is *forking*
+     check_output([os.path.join(topology_st.standalone.get_sbin_dir(), 'ldap-agent'), config_file])
+     # First kill any previous agents ....
+-    pidpath = os.path.join(var_dir, 'run/ldap-agent.pid')
++    run_dir = topology_st.standalone.get_run_dir()
++    pidpath = os.path.join(run_dir, 'ldap-agent.pid')
+     pid = None
+     with open(pidpath, 'r') as pf:
+         pid = pf.readlines()[0].strip()
+diff --git a/ldap/admin/src/defaults.inf.in b/ldap/admin/src/defaults.inf.in
+index d5f504591..e02248b89 100644
+--- a/ldap/admin/src/defaults.inf.in
++++ b/ldap/admin/src/defaults.inf.in
+@@ -35,12 +35,12 @@ sysconf_dir = @sysconfdir@
+ initconfig_dir = @initconfigdir@
+ config_dir = @instconfigdir@/slapd-{instance_name}
+ local_state_dir = @localstatedir@
+-run_dir = @localstatedir@/run/dirsrv
++run_dir = @localrundir@
+ # This is the expected location of ldapi.
+-ldapi = @localstatedir@/run/slapd-{instance_name}.socket
++ldapi = @localrundir@/slapd-{instance_name}.socket
++pid_file = @localrundir@/slapd-{instance_name}.pid
+ ldapi_listen = on
+ ldapi_autobind = on
+-pid_file = @localstatedir@/run/dirsrv/slapd-{instance_name}.pid
+ inst_dir = @serverdir@/slapd-{instance_name}
+ plugin_dir = @serverplugindir@
+ system_schema_dir = @systemschemadir@
+@@ -54,7 +54,7 @@ root_dn = cn=Directory Manager
+ schema_dir = @instconfigdir@/slapd-{instance_name}/schema
+ cert_dir = @instconfigdir@/slapd-{instance_name}
+ 
+-lock_dir = @localstatedir@/lock/dirsrv/slapd-{instance_name}
++lock_dir = @localrundir@/lock/dirsrv/slapd-{instance_name}
+ log_dir = @localstatedir@/log/dirsrv/slapd-{instance_name}
+ access_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/access
+ audit_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/audit
+diff --git a/ldap/servers/snmp/main.c b/ldap/servers/snmp/main.c
+index 88a4d532a..e6271a8a9 100644
+--- a/ldap/servers/snmp/main.c
++++ b/ldap/servers/snmp/main.c
+@@ -287,14 +287,14 @@ load_config(char *conf_path)
+     }
+ 
+     /* set pidfile path */
+-    if ((pidfile = malloc(strlen(LOCALSTATEDIR) + strlen("/run/") +
++    if ((pidfile = malloc(strlen(LOCALRUNDIR) + strlen("/") +
+                           strlen(LDAP_AGENT_PIDFILE) + 1)) != NULL) {
+-        strncpy(pidfile, LOCALSTATEDIR, strlen(LOCALSTATEDIR) + 1);
++        strncpy(pidfile, LOCALRUNDIR, strlen(LOCALRUNDIR) + 1);
+         /* The above will likely not be NULL terminated, but we need to
+          * be sure that we're properly NULL terminated for the below
+          * strcat() to work properly. */
+-        pidfile[strlen(LOCALSTATEDIR)] = (char)0;
+-        strcat(pidfile, "/run/");
++        pidfile[strlen(LOCALRUNDIR)] = (char)0;
++        strcat(pidfile, "/");
+         strcat(pidfile, LDAP_AGENT_PIDFILE);
+     } else {
+         printf("ldap-agent: malloc error processing config file\n");
+diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
+index e0299c5b4..2a0b83913 100644
+--- a/src/lib389/lib389/__init__.py
++++ b/src/lib389/lib389/__init__.py
+@@ -1709,6 +1709,9 @@ class DirSrv(SimpleLDAPObject, object):
+     def get_bin_dir(self):
+         return self.ds_paths.bin_dir
+ 
++    def get_run_dir(self):
++        return self.ds_paths.run_dir
++
+     def get_plugin_dir(self):
+         return self.ds_paths.plugin_dir
+ 
+diff --git a/src/lib389/lib389/instance/options.py b/src/lib389/lib389/instance/options.py
+index 4e083618c..d5b95e6df 100644
+--- a/src/lib389/lib389/instance/options.py
++++ b/src/lib389/lib389/instance/options.py
+@@ -1,5 +1,5 @@
+ # --- BEGIN COPYRIGHT BLOCK ---
+-# Copyright (C) 2019 Red Hat, Inc.
++# Copyright (C) 2021 Red Hat, Inc.
+ # All rights reserved.
+ #
+ # License: GPL (version 3 or any later version).
+@@ -32,6 +32,7 @@ format_keys = [
+     'backup_dir',
+     'db_dir',
+     'db_home_dir',
++    'ldapi',
+     'ldif_dir',
+     'lock_dir',
+     'log_dir',
+@@ -233,6 +234,10 @@ class Slapd2Base(Options2):
+         self._helptext['local_state_dir'] = "Sets the location of Directory Server variable data. Only set this parameter in a development environment."
+         self._advanced['local_state_dir'] = True
+ 
++        self._options['ldapi'] = ds_paths.ldapi
++        self._type['ldapi'] = str
++        self._helptext['ldapi'] = "Sets the location of socket interface of the Directory Server."
++
+         self._options['lib_dir'] = ds_paths.lib_dir
+         self._type['lib_dir'] = str
+         self._helptext['lib_dir'] = "Sets the location of Directory Server shared libraries. Only set this parameter in a development environment."
+diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py
+index d7bb48ce0..1a35ddc07 100644
+--- a/src/lib389/lib389/instance/remove.py
++++ b/src/lib389/lib389/instance/remove.py
+@@ -78,13 +78,16 @@ def remove_ds_instance(dirsrv, force=False):
+ 
+     _log.debug("Found instance marker at %s! Proceeding to remove ..." % dse_ldif_path)
+ 
+-    # Stop the instance (if running) and now we know it really does exist
+-    # and hopefully have permission to access it ...
+-    _log.debug("Stopping instance %s" % dirsrv.serverid)
+-    dirsrv.stop()
+-
+     ### ANY NEW REMOVAL ACTION MUST BE BELOW THIS LINE!!!
+ 
++    # Remove LDAPI socket file
++    ldapi_path = os.path.join(dirsrv.ds_paths.run_dir, "slapd-%s.socket" % dirsrv.serverid)
++    if os.path.exists(ldapi_path):
++        try:
++            os.remove(ldapi_path)
++        except OSError as e:
++            _log.debug(f"Failed to remove LDAPI socket ({ldapi_path})  Error: {str(e)}")
++
+     # Remove these paths:
+     # for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
+     #             'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'):
+diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
+index ab7a2da85..57e7a9fd4 100644
+--- a/src/lib389/lib389/instance/setup.py
++++ b/src/lib389/lib389/instance/setup.py
+@@ -732,7 +732,10 @@ class SetupDs(object):
+                 dse += line.replace('%', '{', 1).replace('%', '}', 1)
+ 
+         with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse:
+-            ldapi_path = os.path.join(slapd['local_state_dir'], "run/slapd-%s.socket" % slapd['instance_name'])
++            if os.path.exists(os.path.dirname(slapd['ldapi'])):
++                ldapi_path = slapd['ldapi']
++            else:
++                ldapi_path = os.path.join(slapd['run_dir'], "slapd-%s.socket" % slapd['instance_name'])
+             dse_fmt = dse.format(
+                 schema_dir=slapd['schema_dir'],
+                 lock_dir=slapd['lock_dir'],
+@@ -902,10 +905,13 @@ class SetupDs(object):
+             self.log.info("Perform SELinux labeling ...")
+             selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
+                              'ldif_dir', 'lock_dir', 'log_dir', 'db_home_dir',
+-                             'run_dir', 'schema_dir', 'tmp_dir')
++                             'schema_dir', 'tmp_dir')
+             for path in selinux_paths:
+                 selinux_restorecon(slapd[path])
+ 
++            # Don't run restorecon on the entire /run directory
++            selinux_restorecon(slapd['run_dir'] + '/dirsrv')
++
+             selinux_label_port(slapd['port'])
+ 
+         # Start the server
+-- 
+2.31.1
+
diff --git a/SOURCES/0005-Issue-4498-BUG-entryuuid-replication-may-not-work-45.patch b/SOURCES/0005-Issue-4498-BUG-entryuuid-replication-may-not-work-45.patch
deleted file mode 100644
index 91de38c..0000000
--- a/SOURCES/0005-Issue-4498-BUG-entryuuid-replication-may-not-work-45.patch
+++ /dev/null
@@ -1,192 +0,0 @@
-From b2e0a1d405d15383064e547fd15008bc136d3efe Mon Sep 17 00:00:00 2001
-From: Firstyear <william@blackhats.net.au>
-Date: Thu, 17 Dec 2020 08:22:23 +1000
-Subject: [PATCH 05/12] Issue 4498 - BUG - entryuuid replication may not work
- (#4503)
-
-Bug Description: EntryUUID can be duplicated in replication,
-due to a missing check in assign_uuid
-
-Fix Description: Add a test case to determine how this occurs,
-and add the correct check for existing entryUUID.
-
-fixes: https://github.com/389ds/389-ds-base/issues/4498
-
-Author: William Brown <william@blackhats.net.au>
-
-Review by: @mreynolds389
----
- .../tests/suites/entryuuid/replicated_test.py | 77 +++++++++++++++++++
- rpm.mk                                        |  2 +-
- src/plugins/entryuuid/src/lib.rs              | 20 ++++-
- src/slapi_r_plugin/src/constants.rs           |  2 +
- src/slapi_r_plugin/src/pblock.rs              |  7 ++
- 5 files changed, 106 insertions(+), 2 deletions(-)
- create mode 100644 dirsrvtests/tests/suites/entryuuid/replicated_test.py
-
-diff --git a/dirsrvtests/tests/suites/entryuuid/replicated_test.py b/dirsrvtests/tests/suites/entryuuid/replicated_test.py
-new file mode 100644
-index 000000000..a2ebc8ff7
---- /dev/null
-+++ b/dirsrvtests/tests/suites/entryuuid/replicated_test.py
-@@ -0,0 +1,77 @@
-+# --- BEGIN COPYRIGHT BLOCK ---
-+# Copyright (C) 2020 William Brown <william@blackhats.net.au>
-+# All rights reserved.
-+#
-+# License: GPL (version 3 or any later version).
-+# See LICENSE for details.
-+# --- END COPYRIGHT BLOCK ---
-+
-+import ldap
-+import pytest
-+import logging
-+from lib389.topologies import topology_m2 as topo_m2
-+from lib389.idm.user import nsUserAccounts
-+from lib389.paths import Paths
-+from lib389.utils import ds_is_older
-+from lib389._constants import *
-+from lib389.replica import ReplicationManager
-+
-+default_paths = Paths()
-+
-+pytestmark = pytest.mark.tier1
-+
-+@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions")
-+
-+def test_entryuuid_with_replication(topo_m2):
-+    """ Check that entryuuid works with replication
-+
-+    :id: a5f15bf9-7f63-473a-840c-b9037b787024
-+
-+    :setup: two node mmr
-+
-+    :steps:
-+        1. Create an entry on one server
-+        2. Wait for replication
-+        3. Assert it is on the second
-+
-+    :expectedresults:
-+        1. Success
-+        1. Success
-+        1. Success
-+    """
-+
-+    server_a = topo_m2.ms["supplier1"]
-+    server_b = topo_m2.ms["supplier2"]
-+    server_a.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE))
-+    server_b.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE))
-+
-+    repl = ReplicationManager(DEFAULT_SUFFIX)
-+
-+    account_a = nsUserAccounts(server_a, DEFAULT_SUFFIX).create_test_user(uid=2000)
-+    euuid_a = account_a.get_attr_vals_utf8('entryUUID')
-+    print("🧩 %s" % euuid_a)
-+    assert(euuid_a is not None)
-+    assert(len(euuid_a) == 1)
-+
-+    repl.wait_for_replication(server_a, server_b)
-+
-+    account_b = nsUserAccounts(server_b, DEFAULT_SUFFIX).get("test_user_2000")
-+    euuid_b = account_b.get_attr_vals_utf8('entryUUID')
-+    print("🧩 %s" % euuid_b)
-+
-+    server_a.config.loglevel(vals=(ErrorLog.DEFAULT,))
-+    server_b.config.loglevel(vals=(ErrorLog.DEFAULT,))
-+
-+    assert(euuid_b is not None)
-+    assert(len(euuid_b) == 1)
-+    assert(euuid_b == euuid_a)
-+
-+    account_b.set("description", "update")
-+    repl.wait_for_replication(server_b, server_a)
-+
-+    euuid_c = account_a.get_attr_vals_utf8('entryUUID')
-+    print("🧩 %s" % euuid_c)
-+    assert(euuid_c is not None)
-+    assert(len(euuid_c) == 1)
-+    assert(euuid_c == euuid_a)
-+
-diff --git a/rpm.mk b/rpm.mk
-index 02f5bba37..d1cdff7df 100644
---- a/rpm.mk
-+++ b/rpm.mk
-@@ -25,7 +25,7 @@ TSAN_ON = 0
- # Undefined Behaviour Sanitizer
- UBSAN_ON = 0
- 
--RUST_ON = 0
-+RUST_ON = 1
- 
- # PERL_ON is deprecated and turns on the LEGACY_ON, this for not breaking people's workflows.
- PERL_ON = 1
-diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
-index 92977db05..0197c5e83 100644
---- a/src/plugins/entryuuid/src/lib.rs
-+++ b/src/plugins/entryuuid/src/lib.rs
-@@ -30,6 +30,16 @@ slapi_r_search_callback_mapfn!(entryuuid, entryuuid_fixup_cb, entryuuid_fixup_ma
- fn assign_uuid(e: &mut EntryRef) {
-     let sdn = e.get_sdnref();
- 
-+    // 🚧 safety barrier 🚧
-+    if e.contains_attr("entryUUID") {
-+        log_error!(
-+            ErrorLevel::Trace,
-+            "assign_uuid -> entryUUID exists, skipping dn {}",
-+            sdn.to_dn_string()
-+        );
-+        return;
-+    }
-+
-     // We could consider making these lazy static.
-     let config_sdn = Sdn::try_from("cn=config").expect("Invalid static dn");
-     let schema_sdn = Sdn::try_from("cn=schema").expect("Invalid static dn");
-@@ -66,7 +76,15 @@ impl SlapiPlugin3 for EntryUuid {
-     }
- 
-     fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> {
--        log_error!(ErrorLevel::Trace, "betxn_pre_add");
-+        if pb.get_is_replicated_operation() {
-+            log_error!(
-+                ErrorLevel::Trace,
-+                "betxn_pre_add -> replicated operation, will not change"
-+            );
-+            return Ok(());
-+        }
-+
-+        log_error!(ErrorLevel::Trace, "betxn_pre_add -> start");
- 
-         let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?;
-         assign_uuid(&mut e);
-diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs
-index 34845c2f4..aa0691acc 100644
---- a/src/slapi_r_plugin/src/constants.rs
-+++ b/src/slapi_r_plugin/src/constants.rs
-@@ -164,6 +164,8 @@ pub(crate) enum PblockType {
-     AddEntry = 60,
-     /// SLAPI_BACKEND
-     Backend = 130,
-+    /// SLAPI_IS_REPLICATED_OPERATION
-+    IsReplicationOperation = 142,
-     /// SLAPI_PLUGIN_MR_NAMES
-     MRNames = 624,
-     /// SLAPI_PLUGIN_SYNTAX_NAMES
-diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs
-index 0f83914f3..718ff2ca7 100644
---- a/src/slapi_r_plugin/src/pblock.rs
-+++ b/src/slapi_r_plugin/src/pblock.rs
-@@ -279,4 +279,11 @@ impl PblockRef {
-     pub fn get_op_result(&mut self) -> i32 {
-         self.get_value_i32(PblockType::OpResult).unwrap_or(-1)
-     }
-+
-+    pub fn get_is_replicated_operation(&mut self) -> bool {
-+        let i = self.get_value_i32(PblockType::IsReplicationOperation).unwrap_or(0);
-+        // Because rust returns the result of the last evaluation, we can
-+        // just return if not equal 0.
-+        i != 0
-+    }
- }
--- 
-2.26.3
-
diff --git a/SOURCES/0006-Issue-4421-Unable-to-build-with-Rust-enabled-in-clos.patch b/SOURCES/0006-Issue-4421-Unable-to-build-with-Rust-enabled-in-clos.patch
deleted file mode 100644
index 0affdf6..0000000
--- a/SOURCES/0006-Issue-4421-Unable-to-build-with-Rust-enabled-in-clos.patch
+++ /dev/null
@@ -1,626 +0,0 @@
-From 04c44e74503a842561b6c6e58001faf86d924b20 Mon Sep 17 00:00:00 2001
-From: Mark Reynolds <mreynolds@redhat.com>
-Date: Mon, 7 Dec 2020 11:00:45 -0500
-Subject: [PATCH 06/12] Issue 4421 - Unable to build with Rust enabled in
- closed environment
-
-Description:  Add Makefile flags and update rpm.mk that allow updating
-              and downloading all the cargo/rust dependencies.  This is
-              needed for nightly tests and upstream/downstream releases.
-
-Fixes: https://github.com/389ds/389-ds-base/issues/4421
-
-Reviewed by: firstyear(Thanks!)
----
- rpm.mk                  |   3 +-
- rpm/389-ds-base.spec.in |   2 +-
- src/Cargo.lock          | 563 ----------------------------------------
- 3 files changed, 3 insertions(+), 565 deletions(-)
- delete mode 100644 src/Cargo.lock
-
-diff --git a/rpm.mk b/rpm.mk
-index d1cdff7df..ef810c63c 100644
---- a/rpm.mk
-+++ b/rpm.mk
-@@ -44,6 +44,7 @@ update-cargo-dependencies:
- 	cargo update --manifest-path=./src/Cargo.toml
- 
- download-cargo-dependencies:
-+	cargo update --manifest-path=./src/Cargo.toml
- 	cargo vendor --manifest-path=./src/Cargo.toml
- 	cargo fetch --manifest-path=./src/Cargo.toml
- 	tar -czf vendor.tar.gz vendor
-@@ -114,7 +115,7 @@ rpmbuildprep:
- 		cp dist/sources/$(JEMALLOC_TARBALL) $(RPMBUILD)/SOURCES/ ; \
- 	fi
- 
--srpms: rpmroot srpmdistdir tarballs rpmbuildprep
-+srpms: rpmroot srpmdistdir download-cargo-dependencies tarballs rpmbuildprep 
- 	rpmbuild --define "_topdir $(RPMBUILD)" -bs $(RPMBUILD)/SPECS/$(PACKAGE).spec
- 	cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)*.src.rpm dist/srpms/
- 	rm -rf $(RPMBUILD)
-diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
-index b9f85489b..d80de8422 100644
---- a/rpm/389-ds-base.spec.in
-+++ b/rpm/389-ds-base.spec.in
-@@ -357,7 +357,7 @@ UBSAN_FLAGS="--enable-ubsan --enable-debug"
- %endif
- 
- %if %{use_rust}
--RUST_FLAGS="--enable-rust"
-+RUST_FLAGS="--enable-rust --enable-rust-offline"
- %endif
- 
- %if %{use_legacy}
-diff --git a/src/Cargo.lock b/src/Cargo.lock
-deleted file mode 100644
-index 33d7b8f23..000000000
---- a/src/Cargo.lock
-+++ /dev/null
-@@ -1,563 +0,0 @@
--# This file is automatically @generated by Cargo.
--# It is not intended for manual editing.
--[[package]]
--name = "ansi_term"
--version = "0.11.0"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
--dependencies = [
-- "winapi",
--]
--
--[[package]]
--name = "atty"
--version = "0.2.14"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
--dependencies = [
-- "hermit-abi",
-- "libc",
-- "winapi",
--]
--
--[[package]]
--name = "autocfg"
--version = "1.0.1"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
--
--[[package]]
--name = "base64"
--version = "0.13.0"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
--
--[[package]]
--name = "bitflags"
--version = "1.2.1"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
--
--[[package]]
--name = "byteorder"
--version = "1.4.3"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
--
--[[package]]
--name = "cbindgen"
--version = "0.9.1"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
--dependencies = [
-- "clap",
-- "log",
-- "proc-macro2",
-- "quote",
-- "serde",
-- "serde_json",
-- "syn",
-- "tempfile",
-- "toml",
--]
--
--[[package]]
--name = "cc"
--version = "1.0.67"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd"
--dependencies = [
-- "jobserver",
--]
--
--[[package]]
--name = "cfg-if"
--version = "1.0.0"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
--
--[[package]]
--name = "clap"
--version = "2.33.3"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
--dependencies = [
-- "ansi_term",
-- "atty",
-- "bitflags",
-- "strsim",
-- "textwrap",
-- "unicode-width",
-- "vec_map",
--]
--
--[[package]]
--name = "entryuuid"
--version = "0.1.0"
--dependencies = [
-- "cc",
-- "libc",
-- "paste",
-- "slapi_r_plugin",
-- "uuid",
--]
--
--[[package]]
--name = "entryuuid_syntax"
--version = "0.1.0"
--dependencies = [
-- "cc",
-- "libc",
-- "paste",
-- "slapi_r_plugin",
-- "uuid",
--]
--
--[[package]]
--name = "fernet"
--version = "0.1.4"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f"
--dependencies = [
-- "base64",
-- "byteorder",
-- "getrandom",
-- "openssl",
-- "zeroize",
--]
--
--[[package]]
--name = "foreign-types"
--version = "0.3.2"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
--dependencies = [
-- "foreign-types-shared",
--]
--
--[[package]]
--name = "foreign-types-shared"
--version = "0.1.1"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
--
--[[package]]
--name = "getrandom"
--version = "0.2.3"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
--dependencies = [
-- "cfg-if",
-- "libc",
-- "wasi",
--]
--
--[[package]]
--name = "hermit-abi"
--version = "0.1.18"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
--dependencies = [
-- "libc",
--]
--
--[[package]]
--name = "itoa"
--version = "0.4.7"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
--
--[[package]]
--name = "jobserver"
--version = "0.1.22"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd"
--dependencies = [
-- "libc",
--]
--
--[[package]]
--name = "lazy_static"
--version = "1.4.0"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
--
--[[package]]
--name = "libc"
--version = "0.2.94"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e"
--
--[[package]]
--name = "librnsslapd"
--version = "0.1.0"
--dependencies = [
-- "cbindgen",
-- "libc",
-- "slapd",
--]
--
--[[package]]
--name = "librslapd"
--version = "0.1.0"
--dependencies = [
-- "cbindgen",
-- "libc",
-- "slapd",
--]
--
--[[package]]
--name = "log"
--version = "0.4.14"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
--dependencies = [
-- "cfg-if",
--]
--
--[[package]]
--name = "once_cell"
--version = "1.7.2"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3"
--
--[[package]]
--name = "openssl"
--version = "0.10.34"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8"
--dependencies = [
-- "bitflags",
-- "cfg-if",
-- "foreign-types",
-- "libc",
-- "once_cell",
-- "openssl-sys",
--]
--
--[[package]]
--name = "openssl-sys"
--version = "0.9.63"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98"
--dependencies = [
-- "autocfg",
-- "cc",
-- "libc",
-- "pkg-config",
-- "vcpkg",
--]
--
--[[package]]
--name = "paste"
--version = "0.1.18"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
--dependencies = [
-- "paste-impl",
-- "proc-macro-hack",
--]
--
--[[package]]
--name = "paste-impl"
--version = "0.1.18"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
--dependencies = [
-- "proc-macro-hack",
--]
--
--[[package]]
--name = "pkg-config"
--version = "0.3.19"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
--
--[[package]]
--name = "ppv-lite86"
--version = "0.2.10"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
--
--[[package]]
--name = "proc-macro-hack"
--version = "0.5.19"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
--
--[[package]]
--name = "proc-macro2"
--version = "1.0.27"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038"
--dependencies = [
-- "unicode-xid",
--]
--
--[[package]]
--name = "quote"
--version = "1.0.9"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
--dependencies = [
-- "proc-macro2",
--]
--
--[[package]]
--name = "rand"
--version = "0.8.3"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
--dependencies = [
-- "libc",
-- "rand_chacha",
-- "rand_core",
-- "rand_hc",
--]
--
--[[package]]
--name = "rand_chacha"
--version = "0.3.0"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
--dependencies = [
-- "ppv-lite86",
-- "rand_core",
--]
--
--[[package]]
--name = "rand_core"
--version = "0.6.2"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7"
--dependencies = [
-- "getrandom",
--]
--
--[[package]]
--name = "rand_hc"
--version = "0.3.0"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
--dependencies = [
-- "rand_core",
--]
--
--[[package]]
--name = "redox_syscall"
--version = "0.2.8"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc"
--dependencies = [
-- "bitflags",
--]
--
--[[package]]
--name = "remove_dir_all"
--version = "0.5.3"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
--dependencies = [
-- "winapi",
--]
--
--[[package]]
--name = "rsds"
--version = "0.1.0"
--
--[[package]]
--name = "ryu"
--version = "1.0.5"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
--
--[[package]]
--name = "serde"
--version = "1.0.126"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03"
--dependencies = [
-- "serde_derive",
--]
--
--[[package]]
--name = "serde_derive"
--version = "1.0.126"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43"
--dependencies = [
-- "proc-macro2",
-- "quote",
-- "syn",
--]
--
--[[package]]
--name = "serde_json"
--version = "1.0.64"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79"
--dependencies = [
-- "itoa",
-- "ryu",
-- "serde",
--]
--
--[[package]]
--name = "slapd"
--version = "0.1.0"
--dependencies = [
-- "fernet",
--]
--
--[[package]]
--name = "slapi_r_plugin"
--version = "0.1.0"
--dependencies = [
-- "lazy_static",
-- "libc",
-- "paste",
-- "uuid",
--]
--
--[[package]]
--name = "strsim"
--version = "0.8.0"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
--
--[[package]]
--name = "syn"
--version = "1.0.72"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82"
--dependencies = [
-- "proc-macro2",
-- "quote",
-- "unicode-xid",
--]
--
--[[package]]
--name = "synstructure"
--version = "0.12.4"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701"
--dependencies = [
-- "proc-macro2",
-- "quote",
-- "syn",
-- "unicode-xid",
--]
--
--[[package]]
--name = "tempfile"
--version = "3.2.0"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
--dependencies = [
-- "cfg-if",
-- "libc",
-- "rand",
-- "redox_syscall",
-- "remove_dir_all",
-- "winapi",
--]
--
--[[package]]
--name = "textwrap"
--version = "0.11.0"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
--dependencies = [
-- "unicode-width",
--]
--
--[[package]]
--name = "toml"
--version = "0.5.8"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa"
--dependencies = [
-- "serde",
--]
--
--[[package]]
--name = "unicode-width"
--version = "0.1.8"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
--
--[[package]]
--name = "unicode-xid"
--version = "0.2.2"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
--
--[[package]]
--name = "uuid"
--version = "0.8.2"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
--dependencies = [
-- "getrandom",
--]
--
--[[package]]
--name = "vcpkg"
--version = "0.2.12"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "cbdbff6266a24120518560b5dc983096efb98462e51d0d68169895b237be3e5d"
--
--[[package]]
--name = "vec_map"
--version = "0.8.2"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
--
--[[package]]
--name = "wasi"
--version = "0.10.2+wasi-snapshot-preview1"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
--
--[[package]]
--name = "winapi"
--version = "0.3.9"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
--dependencies = [
-- "winapi-i686-pc-windows-gnu",
-- "winapi-x86_64-pc-windows-gnu",
--]
--
--[[package]]
--name = "winapi-i686-pc-windows-gnu"
--version = "0.4.0"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
--
--[[package]]
--name = "winapi-x86_64-pc-windows-gnu"
--version = "0.4.0"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
--
--[[package]]
--name = "zeroize"
--version = "1.3.0"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd"
--dependencies = [
-- "zeroize_derive",
--]
--
--[[package]]
--name = "zeroize_derive"
--version = "1.1.0"
--source = "registry+https://github.com/rust-lang/crates.io-index"
--checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1"
--dependencies = [
-- "proc-macro2",
-- "quote",
-- "syn",
-- "synstructure",
--]
--- 
-2.26.3
-
diff --git a/SOURCES/0006-Issue-4973-installer-changes-permissions-on-run.patch b/SOURCES/0006-Issue-4973-installer-changes-permissions-on-run.patch
new file mode 100644
index 0000000..5088fb8
--- /dev/null
+++ b/SOURCES/0006-Issue-4973-installer-changes-permissions-on-run.patch
@@ -0,0 +1,113 @@
+From b4a3b88faeafa6aa197d88ee84e4b2dbadd37ace Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Mon, 1 Nov 2021 10:42:27 -0400
+Subject: [PATCH 06/12] Issue 4973 - installer changes permissions on /run
+
+Description:  There was a regression when we switched over to using /run
+              that caused the installer to try and create /run which
+              caused the ownership to change.  Fixed this by changing
+              the "run_dir" to /run/dirsrv
+
+relates: https://github.com/389ds/389-ds-base/issues/4973
+
+Reviewed by: jchapman(Thanks!)
+---
+ ldap/admin/src/defaults.inf.in       |  2 +-
+ src/lib389/lib389/instance/remove.py | 10 +---------
+ src/lib389/lib389/instance/setup.py  | 13 +++----------
+ 3 files changed, 5 insertions(+), 20 deletions(-)
+
+diff --git a/ldap/admin/src/defaults.inf.in b/ldap/admin/src/defaults.inf.in
+index e02248b89..92b93d695 100644
+--- a/ldap/admin/src/defaults.inf.in
++++ b/ldap/admin/src/defaults.inf.in
+@@ -35,7 +35,7 @@ sysconf_dir = @sysconfdir@
+ initconfig_dir = @initconfigdir@
+ config_dir = @instconfigdir@/slapd-{instance_name}
+ local_state_dir = @localstatedir@
+-run_dir = @localrundir@
++run_dir = @localrundir@/dirsrv
+ # This is the expected location of ldapi.
+ ldapi = @localrundir@/slapd-{instance_name}.socket
+ pid_file = @localrundir@/slapd-{instance_name}.pid
+diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py
+index 1a35ddc07..e96db3896 100644
+--- a/src/lib389/lib389/instance/remove.py
++++ b/src/lib389/lib389/instance/remove.py
+@@ -52,9 +52,9 @@ def remove_ds_instance(dirsrv, force=False):
+     remove_paths['ldif_dir'] = dirsrv.ds_paths.ldif_dir
+     remove_paths['lock_dir'] = dirsrv.ds_paths.lock_dir
+     remove_paths['log_dir'] = dirsrv.ds_paths.log_dir
+-    # remove_paths['run_dir'] = dirsrv.ds_paths.run_dir
+     remove_paths['inst_dir'] = dirsrv.ds_paths.inst_dir
+     remove_paths['etc_sysconfig'] = "%s/sysconfig/dirsrv-%s" % (dirsrv.ds_paths.sysconf_dir, dirsrv.serverid)
++    remove_paths['ldapi'] = dirsrv.ds_paths.ldapi
+ 
+     tmpfiles_d_path = dirsrv.ds_paths.tmpfiles_d + "/dirsrv-" + dirsrv.serverid + ".conf"
+ 
+@@ -80,14 +80,6 @@ def remove_ds_instance(dirsrv, force=False):
+ 
+     ### ANY NEW REMOVAL ACTION MUST BE BELOW THIS LINE!!!
+ 
+-    # Remove LDAPI socket file
+-    ldapi_path = os.path.join(dirsrv.ds_paths.run_dir, "slapd-%s.socket" % dirsrv.serverid)
+-    if os.path.exists(ldapi_path):
+-        try:
+-            os.remove(ldapi_path)
+-        except OSError as e:
+-            _log.debug(f"Failed to remove LDAPI socket ({ldapi_path})  Error: {str(e)}")
+-
+     # Remove these paths:
+     # for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
+     #             'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'):
+diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
+index 57e7a9fd4..be6854af8 100644
+--- a/src/lib389/lib389/instance/setup.py
++++ b/src/lib389/lib389/instance/setup.py
+@@ -732,10 +732,6 @@ class SetupDs(object):
+                 dse += line.replace('%', '{', 1).replace('%', '}', 1)
+ 
+         with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse:
+-            if os.path.exists(os.path.dirname(slapd['ldapi'])):
+-                ldapi_path = slapd['ldapi']
+-            else:
+-                ldapi_path = os.path.join(slapd['run_dir'], "slapd-%s.socket" % slapd['instance_name'])
+             dse_fmt = dse.format(
+                 schema_dir=slapd['schema_dir'],
+                 lock_dir=slapd['lock_dir'],
+@@ -759,7 +755,7 @@ class SetupDs(object):
+                 db_dir=slapd['db_dir'],
+                 db_home_dir=slapd['db_home_dir'],
+                 ldapi_enabled="on",
+-                ldapi=ldapi_path,
++                ldapi=slapd['ldapi'],
+                 ldapi_autobind="on",
+             )
+             file_dse.write(dse_fmt)
+@@ -861,7 +857,7 @@ class SetupDs(object):
+             SER_ROOT_PW: self._raw_secure_password,
+             SER_DEPLOYED_DIR: slapd['prefix'],
+             SER_LDAPI_ENABLED: 'on',
+-            SER_LDAPI_SOCKET: ldapi_path,
++            SER_LDAPI_SOCKET: slapd['ldapi'],
+             SER_LDAPI_AUTOBIND: 'on'
+         }
+ 
+@@ -905,13 +901,10 @@ class SetupDs(object):
+             self.log.info("Perform SELinux labeling ...")
+             selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
+                              'ldif_dir', 'lock_dir', 'log_dir', 'db_home_dir',
+-                             'schema_dir', 'tmp_dir')
++                             'run_dir', 'schema_dir', 'tmp_dir')
+             for path in selinux_paths:
+                 selinux_restorecon(slapd[path])
+ 
+-            # Don't run restorecon on the entire /run directory
+-            selinux_restorecon(slapd['run_dir'] + '/dirsrv')
+-
+             selinux_label_port(slapd['port'])
+ 
+         # Start the server
+-- 
+2.31.1
+
diff --git a/SOURCES/0007-Issue-4973-update-snmp-to-use-run-dirsrv-for-PID-fil.patch b/SOURCES/0007-Issue-4973-update-snmp-to-use-run-dirsrv-for-PID-fil.patch
new file mode 100644
index 0000000..dc375a4
--- /dev/null
+++ b/SOURCES/0007-Issue-4973-update-snmp-to-use-run-dirsrv-for-PID-fil.patch
@@ -0,0 +1,70 @@
+From c26c463ac92682dcf01ddbdc11cc1109b183eb0a Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Mon, 1 Nov 2021 16:04:28 -0400
+Subject: [PATCH 07/12] Issue 4973 - update snmp to use /run/dirsrv for PID
+ file
+
+Description:  Previously SNMP would write the agent PID file directly
+              under /run (or /var/run), but this broke a CI test after
+              updating lib389/defaults.inf to use /run/dirsrv.
+
+              Instead of hacking the CI test, I changed the path
+              snmp uses to:  /run/dirsrv/  Which is where it
+              should really be written anyway.
+
+relates: https://github.com/389ds/389-ds-base/issues/4973
+
+Reviewed by: vashirov(Thanks!)
+---
+ ldap/servers/snmp/main.c         | 4 ++--
+ wrappers/systemd-snmp.service.in | 6 +++---
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/ldap/servers/snmp/main.c b/ldap/servers/snmp/main.c
+index e6271a8a9..d8eb918f6 100644
+--- a/ldap/servers/snmp/main.c
++++ b/ldap/servers/snmp/main.c
+@@ -287,14 +287,14 @@ load_config(char *conf_path)
+     }
+ 
+     /* set pidfile path */
+-    if ((pidfile = malloc(strlen(LOCALRUNDIR) + strlen("/") +
++    if ((pidfile = malloc(strlen(LOCALRUNDIR) + strlen("/dirsrv/") +
+                           strlen(LDAP_AGENT_PIDFILE) + 1)) != NULL) {
+         strncpy(pidfile, LOCALRUNDIR, strlen(LOCALRUNDIR) + 1);
+         /* The above will likely not be NULL terminated, but we need to
+          * be sure that we're properly NULL terminated for the below
+          * strcat() to work properly. */
+         pidfile[strlen(LOCALRUNDIR)] = (char)0;
+-        strcat(pidfile, "/");
++        strcat(pidfile, "/dirsrv/");
+         strcat(pidfile, LDAP_AGENT_PIDFILE);
+     } else {
+         printf("ldap-agent: malloc error processing config file\n");
+diff --git a/wrappers/systemd-snmp.service.in b/wrappers/systemd-snmp.service.in
+index 477bc623d..f18766cb4 100644
+--- a/wrappers/systemd-snmp.service.in
++++ b/wrappers/systemd-snmp.service.in
+@@ -1,7 +1,7 @@
+ # do not edit this file in /lib/systemd/system - instead do the following:
+ # cp /lib/systemd/system/dirsrv-snmp.service /etc/systemd/system/
+ # edit /etc/systemd/system/dirsrv-snmp.service
+-# systemctl daemon-reload 
++# systemctl daemon-reload
+ # systemctl (re)start dirsrv-snmp.service
+ [Unit]
+ Description=@capbrand@ Directory Server SNMP Subagent.
+@@ -9,8 +9,8 @@ After=network.target
+ 
+ [Service]
+ Type=forking
+-PIDFile=/run/ldap-agent.pid
+-ExecStart=@sbindir@/ldap-agent @configdir@/ldap-agent.conf 
++PIDFile=/run/dirsrv/ldap-agent.pid
++ExecStart=@sbindir@/ldap-agent @configdir@/ldap-agent.conf
+ 
+ [Install]
+ WantedBy=multi-user.target
+-- 
+2.31.1
+
diff --git a/SOURCES/0007-Ticket-51175-resolve-plugin-name-leaking.patch b/SOURCES/0007-Ticket-51175-resolve-plugin-name-leaking.patch
deleted file mode 100644
index f5edc9d..0000000
--- a/SOURCES/0007-Ticket-51175-resolve-plugin-name-leaking.patch
+++ /dev/null
@@ -1,412 +0,0 @@
-From 279bdb5148eb0b67ddab40c4dd9d08e9e1672f13 Mon Sep 17 00:00:00 2001
-From: William Brown <william@blackhats.net.au>
-Date: Fri, 26 Jun 2020 10:27:56 +1000
-Subject: [PATCH 07/12] Ticket 51175 - resolve plugin name leaking
-
-Bug Description: Previously pblock.c assumed that all plugin
-names were static c strings. Rust can't create static C
-strings, so these were intentionally leaked.
-
-Fix Description: Rather than leak these, we do a dup/free
-through the slapiplugin struct instead, meaning we can use
-ephemeral, and properly managed strings in rust. This does not
-affect any other existing code which will still handle the
-static strings correctly.
-
-https://pagure.io/389-ds-base/issue/51175
-
-Author: William Brown <william@blackhats.net.au>
-
-Review by: mreynolds, tbordaz (Thanks!)
----
- Makefile.am                             |  1 +
- configure.ac                            |  2 +-
- ldap/servers/slapd/pagedresults.c       |  6 +--
- ldap/servers/slapd/pblock.c             |  9 ++--
- ldap/servers/slapd/plugin.c             |  7 +++
- ldap/servers/slapd/pw_verify.c          |  1 +
- ldap/servers/slapd/tools/pwenc.c        |  2 +-
- src/slapi_r_plugin/README.md            |  6 +--
- src/slapi_r_plugin/src/charray.rs       | 32 ++++++++++++++
- src/slapi_r_plugin/src/lib.rs           |  8 ++--
- src/slapi_r_plugin/src/macros.rs        | 17 +++++---
- src/slapi_r_plugin/src/syntax_plugin.rs | 57 +++++++------------------
- 12 files changed, 85 insertions(+), 63 deletions(-)
- create mode 100644 src/slapi_r_plugin/src/charray.rs
-
-diff --git a/Makefile.am b/Makefile.am
-index 627953850..36434cf17 100644
---- a/Makefile.am
-+++ b/Makefile.am
-@@ -1312,6 +1312,7 @@ rust-nsslapd-private.h: @abs_top_builddir@/rs/@rust_target_dir@/librnsslapd.a
- libslapi_r_plugin_SOURCES = \
- 	src/slapi_r_plugin/src/backend.rs \
- 	src/slapi_r_plugin/src/ber.rs \
-+	src/slapi_r_plugin/src/charray.rs \
- 	src/slapi_r_plugin/src/constants.rs \
- 	src/slapi_r_plugin/src/dn.rs \
- 	src/slapi_r_plugin/src/entry.rs \
-diff --git a/configure.ac b/configure.ac
-index b3cf77d08..61bf35e4a 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -122,7 +122,7 @@ if test "$enable_debug" = yes ; then
-   debug_defs="-DDEBUG -DMCC_DEBUG"
-   debug_cflags="-g3 -O0 -rdynamic"
-   debug_cxxflags="-g3 -O0 -rdynamic"
--  debug_rust_defs="-C debuginfo=2"
-+  debug_rust_defs="-C debuginfo=2 -Z macro-backtrace"
-   cargo_defs=""
-   rust_target_dir="debug"
- else
-diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
-index d8b8798b6..e3444e944 100644
---- a/ldap/servers/slapd/pagedresults.c
-+++ b/ldap/servers/slapd/pagedresults.c
-@@ -738,10 +738,10 @@ pagedresults_cleanup(Connection *conn, int needlock)
-     int i;
-     PagedResults *prp = NULL;
- 
--    slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "=>\n");
-+    /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "=>\n"); */
- 
-     if (NULL == conn) {
--        slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= Connection is NULL\n");
-+        /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= Connection is NULL\n"); */
-         return 0;
-     }
- 
-@@ -767,7 +767,7 @@ pagedresults_cleanup(Connection *conn, int needlock)
-     if (needlock) {
-         pthread_mutex_unlock(&(conn->c_mutex));
-     }
--    slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc);
-+    /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc); */
-     return rc;
- }
- 
-diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
-index 1ad9d0399..f7d1f8885 100644
---- a/ldap/servers/slapd/pblock.c
-+++ b/ldap/servers/slapd/pblock.c
-@@ -3351,13 +3351,15 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
-         if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
-             return (-1);
-         }
--        pblock->pb_plugin->plg_syntax_names = (char **)value;
-+        PR_ASSERT(pblock->pb_plugin->plg_syntax_names == NULL);
-+        pblock->pb_plugin->plg_syntax_names = slapi_ch_array_dup((char **)value);
-         break;
-     case SLAPI_PLUGIN_SYNTAX_OID:
-         if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
-             return (-1);
-         }
--        pblock->pb_plugin->plg_syntax_oid = (char *)value;
-+        PR_ASSERT(pblock->pb_plugin->plg_syntax_oid == NULL);
-+        pblock->pb_plugin->plg_syntax_oid = slapi_ch_strdup((char *)value);
-         break;
-     case SLAPI_PLUGIN_SYNTAX_FLAGS:
-         if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
-@@ -3806,7 +3808,8 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
-         if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
-             return (-1);
-         }
--        pblock->pb_plugin->plg_mr_names = (char **)value;
-+        PR_ASSERT(pblock->pb_plugin->plg_mr_names == NULL);
-+        pblock->pb_plugin->plg_mr_names = slapi_ch_array_dup((char **)value);
-         break;
-     case SLAPI_PLUGIN_MR_COMPARE:
-         if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
-diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c
-index 282b98738..e6b48de60 100644
---- a/ldap/servers/slapd/plugin.c
-+++ b/ldap/servers/slapd/plugin.c
-@@ -2694,6 +2694,13 @@ plugin_free(struct slapdplugin *plugin)
-     if (plugin->plg_type == SLAPI_PLUGIN_PWD_STORAGE_SCHEME || plugin->plg_type == SLAPI_PLUGIN_REVER_PWD_STORAGE_SCHEME) {
-         slapi_ch_free_string(&plugin->plg_pwdstorageschemename);
-     }
-+    if (plugin->plg_type == SLAPI_PLUGIN_SYNTAX) {
-+        slapi_ch_free_string(&plugin->plg_syntax_oid);
-+        slapi_ch_array_free(plugin->plg_syntax_names);
-+    }
-+    if (plugin->plg_type == SLAPI_PLUGIN_MATCHINGRULE) {
-+        slapi_ch_array_free(plugin->plg_mr_names);
-+    }
-     release_componentid(plugin->plg_identity);
-     slapi_counter_destroy(&plugin->plg_op_counter);
-     if (!plugin->plg_group) {
-diff --git a/ldap/servers/slapd/pw_verify.c b/ldap/servers/slapd/pw_verify.c
-index 4f0944b73..4ff1fa2fd 100644
---- a/ldap/servers/slapd/pw_verify.c
-+++ b/ldap/servers/slapd/pw_verify.c
-@@ -111,6 +111,7 @@ pw_verify_token_dn(Slapi_PBlock *pb) {
-     if (fernet_verify_token(dn, cred->bv_val, key, tok_ttl) != 0) {
-         rc = SLAPI_BIND_SUCCESS;
-     }
-+    slapi_ch_free_string(&key);
- #endif
-     return rc;
- }
-diff --git a/ldap/servers/slapd/tools/pwenc.c b/ldap/servers/slapd/tools/pwenc.c
-index 1629c06cd..d89225e34 100644
---- a/ldap/servers/slapd/tools/pwenc.c
-+++ b/ldap/servers/slapd/tools/pwenc.c
-@@ -34,7 +34,7 @@
- 
- int ldap_syslog;
- int ldap_syslog_level;
--int slapd_ldap_debug = LDAP_DEBUG_ANY;
-+/* int slapd_ldap_debug = LDAP_DEBUG_ANY; */
- int detached;
- FILE *error_logfp;
- FILE *access_logfp;
-diff --git a/src/slapi_r_plugin/README.md b/src/slapi_r_plugin/README.md
-index af9743ec9..1c9bcbf17 100644
---- a/src/slapi_r_plugin/README.md
-+++ b/src/slapi_r_plugin/README.md
-@@ -15,7 +15,7 @@ the [Rust Nomicon](https://doc.rust-lang.org/nomicon/index.html)
- > warning about danger.
- 
- This document will not detail the specifics of unsafe or the invariants you must adhere to for rust
--to work with C.
-+to work with C. Failure to uphold these invariants will lead to less than optimal consequences.
- 
- If you still want to see more about the plugin bindings, go on ...
- 
-@@ -135,7 +135,7 @@ associated functions.
- Now, you may notice that not all members of the trait are implemented. This is due to a feature
- of rust known as default trait impls. This allows the trait origin (src/plugin.rs) to provide
- template versions of these functions. If you "overwrite" them, your implementation is used. Unlike
--OO, you may not inherit or call the default function. 
-+OO, you may not inherit or call the default function.
- 
- If a default is not provided you *must* implement that function to be considered valid. Today (20200422)
- this only applies to `start` and `close`.
-@@ -183,7 +183,7 @@ It's important to understand how Rust manages memory both on the stack and the h
- As a result, this means that we must express in code, assertions about the proper ownership of memory
- and who is responsible for it (unlike C, where it can be hard to determine who or what is responsible
- for freeing some value.) Failure to handle this correctly, can and will lead to crashes, leaks or
--*hand waving* magical failures that are eXtReMeLy FuN to debug.
-+*hand waving* magical failures that are `eXtReMeLy FuN` to debug.
- 
- ### Reference Types
- 
-diff --git a/src/slapi_r_plugin/src/charray.rs b/src/slapi_r_plugin/src/charray.rs
-new file mode 100644
-index 000000000..d2e44693c
---- /dev/null
-+++ b/src/slapi_r_plugin/src/charray.rs
-@@ -0,0 +1,32 @@
-+use std::ffi::CString;
-+use std::iter::once;
-+use std::os::raw::c_char;
-+use std::ptr;
-+
-+pub struct Charray {
-+    pin: Vec<CString>,
-+    charray: Vec<*const c_char>,
-+}
-+
-+impl Charray {
-+    pub fn new(input: &[&str]) -> Result<Self, ()> {
-+        let pin: Result<Vec<_>, ()> = input
-+            .iter()
-+            .map(|s| CString::new(*s).map_err(|_e| ()))
-+            .collect();
-+
-+        let pin = pin?;
-+
-+        let charray: Vec<_> = pin
-+            .iter()
-+            .map(|s| s.as_ptr())
-+            .chain(once(ptr::null()))
-+            .collect();
-+
-+        Ok(Charray { pin, charray })
-+    }
-+
-+    pub fn as_ptr(&self) -> *const *const c_char {
-+        self.charray.as_ptr()
-+    }
-+}
-diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs
-index 076907bae..be28cac95 100644
---- a/src/slapi_r_plugin/src/lib.rs
-+++ b/src/slapi_r_plugin/src/lib.rs
-@@ -1,9 +1,11 @@
--// extern crate lazy_static;
-+#[macro_use]
-+extern crate lazy_static;
- 
- #[macro_use]
- pub mod macros;
- pub mod backend;
- pub mod ber;
-+pub mod charray;
- mod constants;
- pub mod dn;
- pub mod entry;
-@@ -20,6 +22,7 @@ pub mod value;
- pub mod prelude {
-     pub use crate::backend::{BackendRef, BackendRefTxn};
-     pub use crate::ber::BerValRef;
-+    pub use crate::charray::Charray;
-     pub use crate::constants::{FilterType, PluginFnType, PluginType, PluginVersion, LDAP_SUCCESS};
-     pub use crate::dn::{Sdn, SdnRef};
-     pub use crate::entry::EntryRef;
-@@ -30,8 +33,7 @@ pub mod prelude {
-     pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3};
-     pub use crate::search::{Search, SearchScope};
-     pub use crate::syntax_plugin::{
--        matchingrule_register, name_to_leaking_char, names_to_leaking_char_array, SlapiOrdMr,
--        SlapiSubMr, SlapiSyntaxPlugin1,
-+        matchingrule_register, SlapiOrdMr, SlapiSubMr, SlapiSyntaxPlugin1,
-     };
-     pub use crate::task::{task_register_handler_fn, task_unregister_handler_fn, Task, TaskRef};
-     pub use crate::value::{Value, ValueArray, ValueArrayRef, ValueRef};
-diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs
-index bc8dfa60f..97fc5d7ef 100644
---- a/src/slapi_r_plugin/src/macros.rs
-+++ b/src/slapi_r_plugin/src/macros.rs
-@@ -249,6 +249,7 @@ macro_rules! slapi_r_syntax_plugin_hooks {
-         paste::item! {
-             use libc;
-             use std::convert::TryFrom;
-+            use std::ffi::CString;
- 
-             #[no_mangle]
-             pub extern "C" fn [<$mod_ident _plugin_init>](raw_pb: *const libc::c_void) -> i32 {
-@@ -261,15 +262,15 @@ macro_rules! slapi_r_syntax_plugin_hooks {
-                 };
- 
-                 // Setup the names/oids that this plugin provides syntaxes for.
--
--                let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::attr_supported_names()) };
--                match pb.register_syntax_names(name_ptr) {
-+                // DS will clone these, so they can be ephemeral to this function.
-+                let name_vec = Charray::new($hooks_ident::attr_supported_names().as_slice()).expect("invalid supported names");
-+                match pb.register_syntax_names(name_vec.as_ptr()) {
-                     0 => {},
-                     e => return e,
-                 };
- 
--                let name_ptr = unsafe { name_to_leaking_char($hooks_ident::attr_oid()) };
--                match pb.register_syntax_oid(name_ptr) {
-+                let attr_oid = CString::new($hooks_ident::attr_oid()).expect("invalid attr oid");
-+                match pb.register_syntax_oid(attr_oid.as_ptr()) {
-                     0 => {},
-                     e => return e,
-                 };
-@@ -430,7 +431,8 @@ macro_rules! slapi_r_syntax_plugin_hooks {
-                     e => return e,
-                 };
- 
--                let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::eq_mr_supported_names()) };
-+                let name_vec = Charray::new($hooks_ident::eq_mr_supported_names().as_slice()).expect("invalid mr supported names");
-+                let name_ptr = name_vec.as_ptr();
-                 // SLAPI_PLUGIN_MR_NAMES
-                 match pb.register_mr_names(name_ptr) {
-                     0 => {},
-@@ -672,7 +674,8 @@ macro_rules! slapi_r_syntax_plugin_hooks {
-                     e => return e,
-                 };
- 
--                let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::ord_mr_supported_names()) };
-+                let name_vec = Charray::new($hooks_ident::ord_mr_supported_names().as_slice()).expect("invalid ord supported names");
-+                let name_ptr = name_vec.as_ptr();
-                 // SLAPI_PLUGIN_MR_NAMES
-                 match pb.register_mr_names(name_ptr) {
-                     0 => {},
-diff --git a/src/slapi_r_plugin/src/syntax_plugin.rs b/src/slapi_r_plugin/src/syntax_plugin.rs
-index e7d5c01bd..86f84bdd8 100644
---- a/src/slapi_r_plugin/src/syntax_plugin.rs
-+++ b/src/slapi_r_plugin/src/syntax_plugin.rs
-@@ -1,11 +1,11 @@
- use crate::ber::BerValRef;
- // use crate::constants::FilterType;
-+use crate::charray::Charray;
- use crate::error::PluginError;
- use crate::pblock::PblockRef;
- use crate::value::{ValueArray, ValueArrayRef};
- use std::cmp::Ordering;
- use std::ffi::CString;
--use std::iter::once;
- use std::os::raw::c_char;
- use std::ptr;
- 
-@@ -26,37 +26,6 @@ struct slapi_matchingRuleEntry {
-     mr_compat_syntax: *const *const c_char,
- }
- 
--pub unsafe fn name_to_leaking_char(name: &str) -> *const c_char {
--    let n = CString::new(name)
--        .expect("An invalid string has been hardcoded!")
--        .into_boxed_c_str();
--    let n_ptr = n.as_ptr();
--    // Now we intentionally leak the name here, and the pointer will remain valid.
--    Box::leak(n);
--    n_ptr
--}
--
--pub unsafe fn names_to_leaking_char_array(names: &[&str]) -> *const *const c_char {
--    let n_arr: Vec<CString> = names
--        .iter()
--        .map(|s| CString::new(*s).expect("An invalid string has been hardcoded!"))
--        .collect();
--    let n_arr = n_arr.into_boxed_slice();
--    let n_ptr_arr: Vec<*const c_char> = n_arr
--        .iter()
--        .map(|v| v.as_ptr())
--        .chain(once(ptr::null()))
--        .collect();
--    let n_ptr_arr = n_ptr_arr.into_boxed_slice();
--
--    // Now we intentionally leak these names here,
--    let _r_n_arr = Box::leak(n_arr);
--    let r_n_ptr_arr = Box::leak(n_ptr_arr);
--
--    let name_ptr = r_n_ptr_arr as *const _ as *const *const c_char;
--    name_ptr
--}
--
- // oid - the oid of the matching rule
- // name - the name of the mr
- // desc - description
-@@ -69,20 +38,24 @@ pub unsafe fn matchingrule_register(
-     syntax: &str,
-     compat_syntax: &[&str],
- ) -> i32 {
--    let oid_ptr = name_to_leaking_char(oid);
--    let name_ptr = name_to_leaking_char(name);
--    let desc_ptr = name_to_leaking_char(desc);
--    let syntax_ptr = name_to_leaking_char(syntax);
--    let compat_syntax_ptr = names_to_leaking_char_array(compat_syntax);
-+    // Make everything CStrings that live long enough.
-+
-+    let oid_cs = CString::new(oid).expect("invalid oid");
-+    let name_cs = CString::new(name).expect("invalid name");
-+    let desc_cs = CString::new(desc).expect("invalid desc");
-+    let syntax_cs = CString::new(syntax).expect("invalid syntax");
-+
-+    // We have to do this so the cstrings live long enough.
-+    let compat_syntax_ca = Charray::new(compat_syntax).expect("invalid compat_syntax");
- 
-     let new_mr = slapi_matchingRuleEntry {
--        mr_oid: oid_ptr,
-+        mr_oid: oid_cs.as_ptr(),
-         _mr_oidalias: ptr::null(),
--        mr_name: name_ptr,
--        mr_desc: desc_ptr,
--        mr_syntax: syntax_ptr,
-+        mr_name: name_cs.as_ptr(),
-+        mr_desc: desc_cs.as_ptr(),
-+        mr_syntax: syntax_cs.as_ptr(),
-         _mr_obsolete: 0,
--        mr_compat_syntax: compat_syntax_ptr,
-+        mr_compat_syntax: compat_syntax_ca.as_ptr(),
-     };
- 
-     let new_mr_ptr = &new_mr as *const _;
--- 
-2.26.3
-
diff --git a/SOURCES/0008-Issue-4773-Enable-interval-feature-of-DNA-plugin.patch b/SOURCES/0008-Issue-4773-Enable-interval-feature-of-DNA-plugin.patch
deleted file mode 100644
index ce8b124..0000000
--- a/SOURCES/0008-Issue-4773-Enable-interval-feature-of-DNA-plugin.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From 40e9a4835a6e95f021a711a7c42ce0c1bddc5ba4 Mon Sep 17 00:00:00 2001
-From: Mark Reynolds <mreynolds@redhat.com>
-Date: Fri, 21 May 2021 13:09:12 -0400
-Subject: [PATCH 08/12] Issue 4773 - Enable interval feature of DNA plugin
-
-Description:  Enable the dormant interval feature in DNA plugin
-
-relates: https://github.com/389ds/389-ds-base/issues/4773
-
-Review by: mreynolds (one line commit rule)
----
- ldap/servers/plugins/dna/dna.c | 2 --
- 1 file changed, 2 deletions(-)
-
-diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
-index bf6b74a99..928a3f54a 100644
---- a/ldap/servers/plugins/dna/dna.c
-+++ b/ldap/servers/plugins/dna/dna.c
-@@ -1023,7 +1023,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
-     /* Set the default interval to 1 */
-     entry->interval = 1;
- 
--#ifdef DNA_ENABLE_INTERVAL
-     value = slapi_entry_attr_get_charptr(e, DNA_INTERVAL);
-     if (value) {
-         entry->interval = strtoull(value, 0, 0);
-@@ -1032,7 +1031,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
- 
-     slapi_log_err(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM,
-                   "dna_parse_config_entry - %s [%" PRIu64 "]\n", DNA_INTERVAL, entry->interval);
--#endif
- 
-     value = slapi_entry_attr_get_charptr(e, DNA_GENERATE);
-     if (value) {
--- 
-2.26.3
-
diff --git a/SOURCES/0008-Issue-4978-make-installer-robust.patch b/SOURCES/0008-Issue-4978-make-installer-robust.patch
new file mode 100644
index 0000000..90704e4
--- /dev/null
+++ b/SOURCES/0008-Issue-4978-make-installer-robust.patch
@@ -0,0 +1,70 @@
+From 88d6ceb18e17c5a18bafb5092ae0c22241b212df Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Mon, 1 Nov 2021 14:01:11 -0400
+Subject: [PATCH 08/12] Issue 4978 - make installer robust
+
+Description:  When run in a container the server can fail to start
+              because the installer sets the db_home_dir to /dev/shm,
+              but in containers the default size of /dev/shm is too
+              small for libdb. We should detect if we are in a
+              container and not set db_home_dir to /dev/shm.
+
+              During instance removal, if an instance was not properly
+              created then it can not be removed either. Make the
+              uninstall more robust to accept some errors and continue
+              removing the instance.
+
+relates: https://github.com/389ds/389-ds-base/issues/4978
+
+Reviewed by: firstyear & tbordaz(Thanks!)
+---
+ src/lib389/lib389/instance/setup.py | 9 +++++++++
+ src/lib389/lib389/utils.py          | 5 ++++-
+ 2 files changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
+index be6854af8..7b0147cf9 100644
+--- a/src/lib389/lib389/instance/setup.py
++++ b/src/lib389/lib389/instance/setup.py
+@@ -731,6 +731,15 @@ class SetupDs(object):
+             for line in template_dse.readlines():
+                 dse += line.replace('%', '{', 1).replace('%', '}', 1)
+ 
++        # Check if we are in a container, if so don't use /dev/shm for the db home dir
++        # as containers typically don't allocate enough space for dev/shm and we don't
++        # want to unexpectedly break the server after an upgrade
++        container_result = subprocess.run(["systemd-detect-virt", "-c"], capture_output=True)
++        if container_result.returncode == 0:
++            # In a container, set the db_home_dir to the db path
++            self.log.debug("Container detected setting db home directory to db directory.")
++            slapd['db_home_dir'] = slapd['db_dir']
++
+         with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse:
+             dse_fmt = dse.format(
+                 schema_dir=slapd['schema_dir'],
+diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py
+index 5ba0c6676..c63b4d0ee 100644
+--- a/src/lib389/lib389/utils.py
++++ b/src/lib389/lib389/utils.py
+@@ -266,6 +266,8 @@ def selinux_label_port(port, remove_label=False):
+     :type remove_label: boolean
+     :raises: ValueError: Error message
+     """
++    if port is None:
++        return
+     try:
+         import selinux
+     except ImportError:
+@@ -662,7 +664,8 @@ def isLocalHost(host_name):
+         Uses gethostbyname()
+     """
+     # first see if this is a "well known" local hostname
+-    if host_name == 'localhost' or \
++    if host_name is None or \
++       host_name == 'localhost' or \
+        host_name == 'localhost.localdomain' or \
+        host_name == socket.gethostname():
+         return True
+-- 
+2.31.1
+
diff --git a/SOURCES/0009-Issue-4623-RFE-Monitor-the-current-DB-locks-4762.patch b/SOURCES/0009-Issue-4623-RFE-Monitor-the-current-DB-locks-4762.patch
deleted file mode 100644
index b4d22df..0000000
--- a/SOURCES/0009-Issue-4623-RFE-Monitor-the-current-DB-locks-4762.patch
+++ /dev/null
@@ -1,926 +0,0 @@
-From 8df95679519364d0993572ecbea72ab89e5250a5 Mon Sep 17 00:00:00 2001
-From: Simon Pichugin <spichugi@redhat.com>
-Date: Thu, 20 May 2021 14:24:25 +0200
-Subject: [PATCH 09/12] Issue 4623 - RFE - Monitor the current DB locks (#4762)
-
-Description: DB lock gets exhausted because of unindexed internal searches
-(under a transaction). Indexing those searches is the way to prevent exhaustion.
-If db lock get exhausted during a txn, it leads to db panic and the later recovery
-can possibly fail. That leads to a full reinit of the instance where the db locks
-got exhausted.
-
-Add three attributes to global BDB config: "nsslapd-db-locks-monitoring-enabled",
- "nsslapd-db-locks-monitoring-threshold" and "nsslapd-db-locks-monitoring-pause".
-By default, nsslapd-db-locks-monitoring-enabled is turned on, nsslapd-db-locks-monitoring-threshold is set to 90% and nsslapd-db-locks-monitoring-threshold is 500ms.
-
-When current locks are close to the maximum locks value of 90% - returning
-the next candidate will fail until the maximum of locks won't be
-increased or current locks are released.
-The monitoring thread runs with the configurable interval of 500ms.
-
-Add the setting to UI and CLI tools.
-
-Fixes: https://github.com/389ds/389-ds-base/issues/4623
-
-Reviewed by: @Firstyear, @tbordaz, @jchapma, @mreynolds389 (Thank you!!)
----
- .../suites/monitor/db_locks_monitor_test.py   | 251 ++++++++++++++++++
- ldap/servers/slapd/back-ldbm/back-ldbm.h      |  13 +-
- .../slapd/back-ldbm/db-bdb/bdb_config.c       |  99 +++++++
- .../slapd/back-ldbm/db-bdb/bdb_layer.c        |  85 ++++++
- ldap/servers/slapd/back-ldbm/init.c           |   3 +
- ldap/servers/slapd/back-ldbm/ldbm_config.c    |   3 +
- ldap/servers/slapd/back-ldbm/ldbm_config.h    |   3 +
- ldap/servers/slapd/back-ldbm/ldbm_search.c    |  13 +
- ldap/servers/slapd/libglobs.c                 |   4 +-
- src/cockpit/389-console/src/css/ds.css        |   4 +
- src/cockpit/389-console/src/database.jsx      |   7 +
- src/cockpit/389-console/src/index.html        |   2 +-
- .../src/lib/database/databaseConfig.jsx       |  88 +++++-
- src/lib389/lib389/backend.py                  |   3 +
- src/lib389/lib389/cli_conf/backend.py         |  10 +
- 15 files changed, 576 insertions(+), 12 deletions(-)
- create mode 100644 dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
-
-diff --git a/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
-new file mode 100644
-index 000000000..7f9938f30
---- /dev/null
-+++ b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
-@@ -0,0 +1,251 @@
-+# --- BEGIN COPYRIGHT BLOCK ---
-+# Copyright (C) 2021 Red Hat, Inc.
-+# All rights reserved.
-+#
-+# License: GPL (version 3 or any later version).
-+# See LICENSE for details.
-+# --- END COPYRIGHT BLOCK ---
-+#
-+import logging
-+import pytest
-+import datetime
-+import subprocess
-+from multiprocessing import Process, Queue
-+from lib389 import pid_from_file
-+from lib389.utils import ldap, os
-+from lib389._constants import DEFAULT_SUFFIX, ReplicaRole
-+from lib389.cli_base import LogCapture
-+from lib389.idm.user import UserAccounts
-+from lib389.idm.organizationalunit import OrganizationalUnits
-+from lib389.tasks import AccessLog
-+from lib389.backend import Backends
-+from lib389.ldclt import Ldclt
-+from lib389.dbgen import dbgen_users
-+from lib389.tasks import ImportTask
-+from lib389.index import Indexes
-+from lib389.plugins import AttributeUniquenessPlugin
-+from lib389.config import BDB_LDBMConfig
-+from lib389.monitor import MonitorLDBM
-+from lib389.topologies import create_topology, _remove_ssca_db
-+
-+pytestmark = pytest.mark.tier2
-+db_locks_monitoring_ack = pytest.mark.skipif(not os.environ.get('DB_LOCKS_MONITORING_ACK', False),
-+                                                                reason="DB locks monitoring tests may take hours if the feature is not present or another failure exists. "
-+                                                                    "Also, the feature requires a big amount of space as we set nsslapd-db-locks to 1300000.")
-+
-+DEBUGGING = os.getenv('DEBUGGING', default=False)
-+if DEBUGGING:
-+    logging.getLogger(__name__).setLevel(logging.DEBUG)
-+else:
-+    logging.getLogger(__name__).setLevel(logging.INFO)
-+log = logging.getLogger(__name__)
-+
-+
-+def _kill_ns_slapd(inst):
-+    pid = str(pid_from_file(inst.ds_paths.pid_file))
-+    cmd = ['kill', '-9', pid]
-+    subprocess.Popen(cmd, stdout=subprocess.PIPE)
-+
-+
-+@pytest.fixture(scope="function")
-+def topology_st_fn(request):
-+    """Create DS standalone instance for each test case"""
-+
-+    topology = create_topology({ReplicaRole.STANDALONE: 1})
-+
-+    def fin():
-+        # Kill the hanging process at the end of test to prevent failures in the following tests
-+        if DEBUGGING:
-+            [_kill_ns_slapd(inst) for inst in topology]
-+        else:
-+            [_kill_ns_slapd(inst) for inst in topology]
-+            assert _remove_ssca_db(topology)
-+            [inst.stop() for inst in topology if inst.exists()]
-+            [inst.delete() for inst in topology if inst.exists()]
-+    request.addfinalizer(fin)
-+
-+    topology.logcap = LogCapture()
-+    return topology
-+
-+
-+@pytest.fixture(scope="function")
-+def setup_attruniq_index_be_import(topology_st_fn):
-+    """Enable Attribute Uniqueness, disable indexes and
-+    import 120000 entries to the default backend
-+    """
-+    inst = topology_st_fn.standalone
-+
-+    inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access')
-+    inst.config.set('nsslapd-plugin-logging', 'on')
-+    inst.restart()
-+
-+    attruniq = AttributeUniquenessPlugin(inst, dn="cn=attruniq,cn=plugins,cn=config")
-+    attruniq.create(properties={'cn': 'attruniq'})
-+    for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']:
-+        attruniq.add_unique_attribute(cn)
-+    attruniq.add_unique_subtree(DEFAULT_SUFFIX)
-+    attruniq.enable_all_subtrees()
-+    attruniq.enable()
-+
-+    indexes = Indexes(inst)
-+    for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']:
-+        indexes.ensure_state(properties={
-+            'cn': cn,
-+            'nsSystemIndex': 'false',
-+            'nsIndexType': 'none'})
-+
-+    bdb_config = BDB_LDBMConfig(inst)
-+    bdb_config.replace("nsslapd-db-locks", "130000")
-+    inst.restart()
-+
-+    ldif_dir = inst.get_ldif_dir()
-+    import_ldif = ldif_dir + '/perf_import.ldif'
-+
-+    # Valid online import
-+    import_task = ImportTask(inst)
-+    dbgen_users(inst, 120000, import_ldif, DEFAULT_SUFFIX, entry_name="userNew")
-+    import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
-+    import_task.wait()
-+    assert import_task.is_complete()
-+
-+
-+def create_user_wrapper(q, users):
-+    try:
-+        users.create_test_user()
-+    except Exception as ex:
-+        q.put(ex)
-+
-+
-+def spawn_worker_thread(function, users, log, timeout, info):
-+    log.info(f"Starting the thread - {info}")
-+    q = Queue()
-+    p = Process(target=function, args=(q,users,))
-+    p.start()
-+
-+    log.info(f"Waiting for {timeout} seconds for the thread to finish")
-+    p.join(timeout)
-+
-+    if p.is_alive():
-+        log.info("Killing the thread as it's still running")
-+        p.terminate()
-+        p.join()
-+        raise RuntimeError(f"Function call was aborted: {info}")
-+    result = q.get()
-+    if isinstance(result, Exception):
-+        raise result
-+    else:
-+        return result
-+
-+
-+@db_locks_monitoring_ack 
-+@pytest.mark.parametrize("lock_threshold", [("70"), ("80"), ("95")])
-+def test_exhaust_db_locks_basic(topology_st_fn, setup_attruniq_index_be_import, lock_threshold):
-+    """Test that when all of the locks are exhausted the instance still working
-+    and database is not corrupted
-+
-+    :id: 299108cc-04d8-4ddc-b58e-99157fccd643
-+    :setup: Standalone instance with Attr Uniq plugin and user indexes disabled
-+    :steps: 1. Set nsslapd-db-locks to 11000
-+            2. Check that we stop acquiring new locks when the threshold is reached
-+            3. Check that we can regulate a pause interval for DB locks monitoring thread
-+            4. Make sure the feature works for different backends on the same suffix
-+    :expectedresults:
-+            1. Success
-+            2. Success
-+            3. Success
-+            4. Success
-+    """
-+
-+    inst = topology_st_fn.standalone
-+    ADDITIONAL_SUFFIX = 'ou=newpeople,dc=example,dc=com'
-+
-+    backends = Backends(inst)
-+    backends.create(properties={'nsslapd-suffix': ADDITIONAL_SUFFIX,
-+                                'name': ADDITIONAL_SUFFIX[-3:]})
-+    ous = OrganizationalUnits(inst, DEFAULT_SUFFIX)
-+    ous.create(properties={'ou': 'newpeople'})
-+
-+    bdb_config = BDB_LDBMConfig(inst)
-+    bdb_config.replace("nsslapd-db-locks", "11000")
-+
-+    # Restart server
-+    inst.restart()
-+
-+    for lock_enabled in ["on", "off"]:
-+        for lock_pause in ["100", "500", "1000"]:
-+            bdb_config.replace("nsslapd-db-locks-monitoring-enabled", lock_enabled)
-+            bdb_config.replace("nsslapd-db-locks-monitoring-threshold", lock_threshold)
-+            bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause)
-+            inst.restart()
-+
-+            if lock_enabled == "off":
-+                raised_exception = (RuntimeError, ldap.SERVER_DOWN)
-+            else:
-+                raised_exception = ldap.OPERATIONS_ERROR
-+
-+            users = UserAccounts(inst, DEFAULT_SUFFIX)
-+            with pytest.raises(raised_exception):
-+                spawn_worker_thread(create_user_wrapper, users, log, 30,
-+                                    f"Adding user with monitoring enabled='{lock_enabled}'; "
-+                                    f"threshold='{lock_threshold}'; pause='{lock_pause}'.")
-+            # Restart because we already run out of locks and the next unindexed searches will fail eventually
-+            if lock_enabled == "off":
-+                _kill_ns_slapd(inst)
-+                inst.restart()
-+
-+            users = UserAccounts(inst, ADDITIONAL_SUFFIX, rdn=None)
-+            with pytest.raises(raised_exception):
-+                spawn_worker_thread(create_user_wrapper, users, log, 30,
-+                                    f"Adding user with monitoring enabled='{lock_enabled}'; "
-+                                    f"threshold='{lock_threshold}'; pause='{lock_pause}'.")
-+            # In case feature is disabled - restart for the clean up
-+            if lock_enabled == "off":
-+                _kill_ns_slapd(inst)
-+            inst.restart()
-+
-+
-+@db_locks_monitoring_ack
-+def test_exhaust_db_locks_big_pause(topology_st_fn, setup_attruniq_index_be_import):
-+    """Test that DB lock pause setting increases the wait interval value for the monitoring thread
-+
-+    :id: 7d5bf838-5d4e-4ad5-8c03-5716afb84ea6
-+    :setup: Standalone instance with Attr Uniq plugin and user indexes disabled
-+    :steps: 1. Set nsslapd-db-locks to 20000 while using the default threshold value (95%)
-+            2. Set nsslapd-db-locks-monitoring-pause to 10000 (10 seconds)
-+            3. Make sure that the pause is successfully increased a few times in a row
-+    :expectedresults:
-+            1. Success
-+            2. Success
-+            3. Success
-+    """
-+
-+    inst = topology_st_fn.standalone
-+
-+    bdb_config = BDB_LDBMConfig(inst)
-+    bdb_config.replace("nsslapd-db-locks", "20000")
-+    lock_pause = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-pause")
-+    assert lock_pause == 500
-+    lock_pause = "10000"
-+    bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause)
-+
-+    # Restart server
-+    inst.restart()
-+
-+    lock_enabled = bdb_config.get_attr_val_utf8_l("nsslapd-db-locks-monitoring-enabled")
-+    lock_threshold = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-threshold")
-+    assert lock_enabled == "on"
-+    assert lock_threshold == 90
-+
-+    users = UserAccounts(inst, DEFAULT_SUFFIX)
-+    start = datetime.datetime.now()
-+    with pytest.raises(ldap.OPERATIONS_ERROR):
-+        spawn_worker_thread(create_user_wrapper, users, log, 30,
-+                            f"Adding user with monitoring enabled='{lock_enabled}'; "
-+                            f"threshold='{lock_threshold}'; pause='{lock_pause}'. Expect it to 'Work'")
-+    end = datetime.datetime.now()
-+    time_delta = end - start
-+    if time_delta.seconds < 9:
-+        raise RuntimeError("nsslapd-db-locks-monitoring-pause attribute doesn't function correctly. "
-+                            f"Finished the execution in {time_delta.seconds} seconds")
-+    # In case something has failed - restart for the clean up
-+    inst.restart()
-diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
-index 571b0a58b..afb831c32 100644
---- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
-+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
-@@ -155,6 +155,8 @@ typedef unsigned short u_int16_t;
- #define DEFAULT_DNCACHE_MAXCOUNT -1 /* no limit */
- #define DEFAULT_DBCACHE_SIZE     33554432
- #define DEFAULT_DBCACHE_SIZE_STR "33554432"
-+#define DEFAULT_DBLOCK_PAUSE     500
-+#define DEFAULT_DBLOCK_PAUSE_STR "500"
- #define DEFAULT_MODE             0600
- #define DEFAULT_ALLIDSTHRESHOLD  4000
- #define DEFAULT_IDL_TUNE         1
-@@ -575,12 +577,21 @@ struct ldbminfo
-     char *li_backend_implement;          /* low layer backend implementation */
-     int li_noparentcheck;                /* check if parent exists on add */
- 
--    /* the next 3 fields are for the params that don't get changed until
-+    /* db lock monitoring */
-+    /* if we decide to move the values to bdb_config, we can use slapi_back_get_info function to retrieve the values */
-+    int32_t li_dblock_monitoring;          /* enables db locks monitoring thread - requires restart  */
-+    uint32_t li_dblock_monitoring_pause;   /* an interval for db locks monitoring thread */
-+    uint32_t li_dblock_threshold;          /* when the percentage is reached, abort the search in ldbm_back_next_search_entry - requires restart*/
-+    uint32_t li_dblock_threshold_reached;
-+
-+    /* the next 4 fields are for the params that don't get changed until
-      * the server is restarted (used by the admin console)
-      */
-     char *li_new_directory;
-     uint64_t li_new_dbcachesize;
-     int li_new_dblock;
-+    int32_t li_new_dblock_monitoring;
-+    uint64_t li_new_dblock_threshold;
- 
-     int li_new_dbncache;
- 
-diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
-index 738b841aa..167644943 100644
---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
-+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
-@@ -190,6 +190,102 @@ bdb_config_db_lock_set(void *arg, void *value, char *errorbuf, int phase, int ap
-     return retval;
- }
- 
-+static void *
-+bdb_config_db_lock_monitoring_get(void *arg)
-+{
-+    struct ldbminfo *li = (struct ldbminfo *)arg;
-+
-+    return (void *)((intptr_t)(li->li_new_dblock_monitoring));
-+}
-+
-+static int
-+bdb_config_db_lock_monitoring_set(void *arg, void *value, char *errorbuf __attribute__((unused)), int phase __attribute__((unused)), int apply)
-+{
-+    struct ldbminfo *li = (struct ldbminfo *)arg;
-+    int retval = LDAP_SUCCESS;
-+    int val = (int32_t)((intptr_t)value);
-+
-+    if (apply) {
-+        if (CONFIG_PHASE_RUNNING == phase) {
-+            li->li_new_dblock_monitoring = val;
-+            slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_monitoring_set",
-+                          "New nsslapd-db-lock-monitoring value will not take affect until the server is restarted\n");
-+        } else {
-+            li->li_new_dblock_monitoring = val;
-+            li->li_dblock_monitoring = val;
-+        }
-+    }
-+
-+    return retval;
-+}
-+
-+static void *
-+bdb_config_db_lock_pause_get(void *arg)
-+{
-+    struct ldbminfo *li = (struct ldbminfo *)arg;
-+
-+    return (void *)((uintptr_t)(slapi_atomic_load_32((int32_t *)&(li->li_dblock_monitoring_pause), __ATOMIC_RELAXED)));
-+}
-+
-+static int
-+bdb_config_db_lock_pause_set(void *arg, void *value, char *errorbuf, int phase __attribute__((unused)), int apply)
-+{
-+    struct ldbminfo *li = (struct ldbminfo *)arg;
-+    int retval = LDAP_SUCCESS;
-+    u_int32_t val = (u_int32_t)((uintptr_t)value);
-+
-+    if (val == 0) {
-+        slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_pause_set",
-+                      "%s was set to '0'. The default value will be used (%s)",
-+                      CONFIG_DB_LOCKS_PAUSE, DEFAULT_DBLOCK_PAUSE_STR);
-+        val = DEFAULT_DBLOCK_PAUSE;
-+    }
-+
-+    if (apply) {
-+        slapi_atomic_store_32((int32_t *)&(li->li_dblock_monitoring_pause), val, __ATOMIC_RELAXED);
-+    }
-+    return retval;
-+}
-+
-+static void *
-+bdb_config_db_lock_threshold_get(void *arg)
-+{
-+    struct ldbminfo *li = (struct ldbminfo *)arg;
-+
-+    return (void *)((uintptr_t)(li->li_new_dblock_threshold));
-+}
-+
-+static int
-+bdb_config_db_lock_threshold_set(void *arg, void *value, char *errorbuf, int phase __attribute__((unused)), int apply)
-+{
-+    struct ldbminfo *li = (struct ldbminfo *)arg;
-+    int retval = LDAP_SUCCESS;
-+    u_int32_t val = (u_int32_t)((uintptr_t)value);
-+
-+    if (val < 70 || val > 95) {
-+        slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
-+                              "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95",
-+                              CONFIG_DB_LOCKS_THRESHOLD, val);
-+        slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_lock_threshold_set",
-+                      "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95",
-+                      CONFIG_DB_LOCKS_THRESHOLD, val);
-+        retval = LDAP_OPERATIONS_ERROR;
-+        return retval;
-+    }
-+
-+    if (apply) {
-+        if (CONFIG_PHASE_RUNNING == phase) {
-+            li->li_new_dblock_threshold = val;
-+            slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_threshold_set",
-+                          "New nsslapd-db-lock-monitoring-threshold value will not take affect until the server is restarted\n");
-+        } else {
-+            li->li_new_dblock_threshold = val;
-+            li->li_dblock_threshold = val;
-+        }
-+    }
-+    return retval;
-+}
-+
- static void *
- bdb_config_dbcachesize_get(void *arg)
- {
-@@ -1409,6 +1505,9 @@ static config_info bdb_config_param[] = {
-     {CONFIG_SERIAL_LOCK, CONFIG_TYPE_ONOFF, "on", &bdb_config_serial_lock_get, &bdb_config_serial_lock_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
-     {CONFIG_USE_LEGACY_ERRORCODE, CONFIG_TYPE_ONOFF, "off", &bdb_config_legacy_errcode_get, &bdb_config_legacy_errcode_set, 0},
-     {CONFIG_DB_DEADLOCK_POLICY, CONFIG_TYPE_INT, STRINGIFYDEFINE(DB_LOCK_YOUNGEST), &bdb_config_db_deadlock_policy_get, &bdb_config_db_deadlock_policy_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
-+    {CONFIG_DB_LOCKS_MONITORING, CONFIG_TYPE_ONOFF, "on", &bdb_config_db_lock_monitoring_get, &bdb_config_db_lock_monitoring_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
-+    {CONFIG_DB_LOCKS_THRESHOLD, CONFIG_TYPE_INT, "90", &bdb_config_db_lock_threshold_get, &bdb_config_db_lock_threshold_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
-+    {CONFIG_DB_LOCKS_PAUSE, CONFIG_TYPE_INT, DEFAULT_DBLOCK_PAUSE_STR, &bdb_config_db_lock_pause_get, &bdb_config_db_lock_pause_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
-     {NULL, 0, NULL, NULL, NULL, 0}};
- 
- void
-diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
-index 6cccad8e6..2f25f67a2 100644
---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
-+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
-@@ -35,6 +35,8 @@
-     (env)->txn_checkpoint((env), (kbyte), (min), (flags))
- #define MEMP_STAT(env, gsp, fsp, flags, malloc) \
-     (env)->memp_stat((env), (gsp), (fsp), (flags))
-+#define LOCK_STAT(env, statp, flags, malloc) \
-+    (env)->lock_stat((env), (statp), (flags))
- #define MEMP_TRICKLE(env, pct, nwrotep) \
-     (env)->memp_trickle((env), (pct), (nwrotep))
- #define LOG_ARCHIVE(env, listp, flags, malloc) \
-@@ -66,6 +68,7 @@
- #define NEWDIR_MODE 0755
- #define DB_REGION_PREFIX "__db."
- 
-+static int locks_monitoring_threadmain(void *param);
- static int perf_threadmain(void *param);
- static int checkpoint_threadmain(void *param);
- static int trickle_threadmain(void *param);
-@@ -84,6 +87,7 @@ static int bdb_start_checkpoint_thread(struct ldbminfo *li);
- static int bdb_start_trickle_thread(struct ldbminfo *li);
- static int bdb_start_perf_thread(struct ldbminfo *li);
- static int bdb_start_txn_test_thread(struct ldbminfo *li);
-+static int bdb_start_locks_monitoring_thread(struct ldbminfo *li);
- static int trans_batch_count = 0;
- static int trans_batch_limit = 0;
- static int trans_batch_txn_min_sleep = 50; /* ms */
-@@ -1299,6 +1303,10 @@ bdb_start(struct ldbminfo *li, int dbmode)
-                 return return_value;
-             }
- 
-+            if (0 != (return_value = bdb_start_locks_monitoring_thread(li))) {
-+                return return_value;
-+            }
-+
-             /* We need to free the memory to avoid a leak
-              * Also, we have to evaluate if the performance counter
-              * should be preserved or not for database restore.
-@@ -2885,6 +2893,7 @@ bdb_start_perf_thread(struct ldbminfo *li)
-     return return_value;
- }
- 
-+
- /* Performance thread */
- static int
- perf_threadmain(void *param)
-@@ -2910,6 +2919,82 @@ perf_threadmain(void *param)
-     return 0;
- }
- 
-+
-+/*
-+ * create a thread for locks_monitoring_threadmain
-+ */
-+static int
-+bdb_start_locks_monitoring_thread(struct ldbminfo *li)
-+{
-+    int return_value = 0;
-+    if (li->li_dblock_monitoring) {
-+        if (NULL == PR_CreateThread(PR_USER_THREAD,
-+                                    (VFP)(void *)locks_monitoring_threadmain, li,
-+                                    PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
-+                                    PR_UNJOINABLE_THREAD,
-+                                    SLAPD_DEFAULT_THREAD_STACKSIZE)) {
-+            PRErrorCode prerr = PR_GetError();
-+            slapi_log_err(SLAPI_LOG_ERR, "bdb_start_locks_monitoring_thread",
-+                        "Failed to create database locks monitoring thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
-+                        prerr, slapd_pr_strerror(prerr));
-+            return_value = -1;
-+        }
-+    }
-+    return return_value;
-+}
-+
-+
-+/* DB Locks Monitoring thread */
-+static int
-+locks_monitoring_threadmain(void *param)
-+{
-+    int ret = 0;
-+    uint64_t current_locks = 0;
-+    uint64_t max_locks = 0;
-+    uint32_t lock_exhaustion = 0;
-+    PRIntervalTime interval;
-+    struct ldbminfo *li = NULL;
-+
-+    PR_ASSERT(NULL != param);
-+    li = (struct ldbminfo *)param;
-+
-+    dblayer_private *priv = li->li_dblayer_private;
-+    bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
-+    PR_ASSERT(NULL != priv);
-+
-+    INCR_THREAD_COUNT(pEnv);
-+
-+    while (!BDB_CONFIG(li)->bdb_stop_threads) {
-+        if (dblayer_db_uses_locking(pEnv->bdb_DB_ENV)) {
-+            DB_LOCK_STAT *lockstat = NULL;
-+            ret = LOCK_STAT(pEnv->bdb_DB_ENV, &lockstat, 0, (void *)slapi_ch_malloc);
-+            if (0 == ret) {
-+                current_locks = lockstat->st_nlocks;
-+                max_locks = lockstat->st_maxlocks;
-+                if (max_locks){
-+                    lock_exhaustion = (uint32_t)((double)current_locks / (double)max_locks * 100.0);
-+                } else {
-+                    lock_exhaustion = 0;
-+                }
-+                if ((li->li_dblock_threshold) &&
-+                    (lock_exhaustion >= li->li_dblock_threshold)) {
-+                    slapi_atomic_store_32((int32_t *)&(li->li_dblock_threshold_reached), 1, __ATOMIC_RELAXED);
-+                } else {
-+                    slapi_atomic_store_32((int32_t *)&(li->li_dblock_threshold_reached), 0, __ATOMIC_RELAXED);
-+                }
-+            }
-+            slapi_ch_free((void **)&lockstat);
-+        }
-+        interval = PR_MillisecondsToInterval(slapi_atomic_load_32((int32_t *)&(li->li_dblock_monitoring_pause), __ATOMIC_RELAXED));
-+        DS_Sleep(interval);
-+    }
-+
-+    DECR_THREAD_COUNT(pEnv);
-+    slapi_log_err(SLAPI_LOG_TRACE, "locks_monitoring_threadmain", "Leaving locks_monitoring_threadmain\n");
-+    return 0;
-+}
-+
-+
- /*
-  * create a thread for deadlock_threadmain
-  */
-diff --git a/ldap/servers/slapd/back-ldbm/init.c b/ldap/servers/slapd/back-ldbm/init.c
-index 893776699..4165c8fad 100644
---- a/ldap/servers/slapd/back-ldbm/init.c
-+++ b/ldap/servers/slapd/back-ldbm/init.c
-@@ -70,6 +70,9 @@ ldbm_back_init(Slapi_PBlock *pb)
-     /* Initialize the set of instances. */
-     li->li_instance_set = objset_new(&ldbm_back_instance_set_destructor);
- 
-+    /* Init lock threshold value */
-+    li->li_dblock_threshold_reached = 0;
-+
-     /* ask the factory to give us space in the Connection object
-          * (only bulk import uses this)
-          */
-diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c
-index 10cef250f..60884cf33 100644
---- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
-+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
-@@ -87,6 +87,9 @@ static char *ldbm_config_moved_attributes[] =
-         CONFIG_SERIAL_LOCK,
-         CONFIG_USE_LEGACY_ERRORCODE,
-         CONFIG_DB_DEADLOCK_POLICY,
-+        CONFIG_DB_LOCKS_MONITORING,
-+        CONFIG_DB_LOCKS_THRESHOLD,
-+        CONFIG_DB_LOCKS_PAUSE,
-         ""};
- 
- /* Used to add an array of entries, like the one above and
-diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.h b/ldap/servers/slapd/back-ldbm/ldbm_config.h
-index 58e64799c..6fa8292eb 100644
---- a/ldap/servers/slapd/back-ldbm/ldbm_config.h
-+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.h
-@@ -104,6 +104,9 @@ struct config_info
- #define CONFIG_DB_VERBOSE "nsslapd-db-verbose"
- #define CONFIG_DB_DEBUG "nsslapd-db-debug"
- #define CONFIG_DB_LOCK "nsslapd-db-locks"
-+#define CONFIG_DB_LOCKS_MONITORING "nsslapd-db-locks-monitoring-enabled"
-+#define CONFIG_DB_LOCKS_THRESHOLD "nsslapd-db-locks-monitoring-threshold"
-+#define CONFIG_DB_LOCKS_PAUSE "nsslapd-db-locks-monitoring-pause"
- #define CONFIG_DB_NAMED_REGIONS "nsslapd-db-named-regions"
- #define CONFIG_DB_PRIVATE_MEM "nsslapd-db-private-mem"
- #define CONFIG_DB_PRIVATE_IMPORT_MEM "nsslapd-db-private-import-mem"
-diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
-index 1a7b510d4..6e22debde 100644
---- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
-+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
-@@ -1472,6 +1472,7 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
-     slapi_pblock_get(pb, SLAPI_CONNECTION, &conn);
-     slapi_pblock_get(pb, SLAPI_OPERATION, &op);
- 
-+
-     if ((reverse_list = operation_is_flag_set(op, OP_FLAG_REVERSE_CANDIDATE_ORDER))) {
-         /*
-          * Start at the end of the list and work our way forward.  Since a single
-@@ -1538,6 +1539,18 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
- 
-     /* Find the next candidate entry and return it. */
-     while (1) {
-+        if (li->li_dblock_monitoring &&
-+            slapi_atomic_load_32((int32_t *)&(li->li_dblock_threshold_reached), __ATOMIC_RELAXED)) {
-+            slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_next_search_entry",
-+                          "DB locks threshold is reached (nsslapd-db-locks-monitoring-threshold "
-+                          "under cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config). "
-+                          "Please, increase nsslapd-db-locks according to your needs.\n");
-+            slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_ENTRY, NULL);
-+            delete_search_result_set(pb, &sr);
-+            rc = SLAPI_FAIL_GENERAL;
-+            slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "DB locks threshold is reached (nsslapd-db-locks-monitoring-threshold)", 0, NULL);
-+            goto bail;
-+        }
- 
-         /* check for abandon */
-         if (slapi_op_abandoned(pb) || (NULL == sr)) {
-diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
-index 388616b36..db7d01bbc 100644
---- a/ldap/servers/slapd/libglobs.c
-+++ b/ldap/servers/slapd/libglobs.c
-@@ -8171,8 +8171,8 @@ config_set(const char *attr, struct berval **values, char *errorbuf, int apply)
- #if 0
-         debugHashTable(attr);
- #endif
--        slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown attribute %s will be ignored", attr);
--        slapi_log_err(SLAPI_LOG_ERR, "config_set", "Unknown attribute %s will be ignored", attr);
-+        slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown attribute %s will be ignored\n", attr);
-+        slapi_log_err(SLAPI_LOG_ERR, "config_set", "Unknown attribute %s will be ignored\n", attr);
-         return LDAP_NO_SUCH_ATTRIBUTE;
-     }
- 
-diff --git a/src/cockpit/389-console/src/css/ds.css b/src/cockpit/389-console/src/css/ds.css
-index 9248116e7..3cf50b593 100644
---- a/src/cockpit/389-console/src/css/ds.css
-+++ b/src/cockpit/389-console/src/css/ds.css
-@@ -639,6 +639,10 @@ option {
-     padding-right: 0 !important;
- }
- 
-+.ds-vertical-scroll-auto {
-+  overflow-y: auto !important;
-+}
-+
- .alert {
-     max-width: 750px;
- }
-diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx
-index efa3ce6d5..11cae972c 100644
---- a/src/cockpit/389-console/src/database.jsx
-+++ b/src/cockpit/389-console/src/database.jsx
-@@ -157,6 +157,7 @@ export class Database extends React.Component {
-                     const attrs = config.attrs;
-                     let db_cache_auto = false;
-                     let import_cache_auto = false;
-+                    let dblocksMonitoring = false;
-                     let dbhome = "";
- 
-                     if ('nsslapd-db-home-directory' in attrs) {
-@@ -168,6 +169,9 @@ export class Database extends React.Component {
-                     if (attrs['nsslapd-import-cache-autosize'] != "0") {
-                         import_cache_auto = true;
-                     }
-+                    if (attrs['nsslapd-db-locks-monitoring-enabled'][0] == "on") {
-+                        dblocksMonitoring = true;
-+                    }
- 
-                     this.setState(() => (
-                         {
-@@ -187,6 +191,9 @@ export class Database extends React.Component {
-                                     txnlogdir: attrs['nsslapd-db-logdirectory'],
-                                     dbhomedir: dbhome,
-                                     dblocks: attrs['nsslapd-db-locks'],
-+                                    dblocksMonitoring: dblocksMonitoring,
-+                                    dblocksMonitoringThreshold: attrs['nsslapd-db-locks-monitoring-threshold'],
-+                                    dblocksMonitoringPause: attrs['nsslapd-db-locks-monitoring-pause'],
-                                     chxpoint: attrs['nsslapd-db-checkpoint-interval'],
-                                     compactinterval: attrs['nsslapd-db-compactdb-interval'],
-                                     importcacheauto: attrs['nsslapd-import-cache-autosize'],
-diff --git a/src/cockpit/389-console/src/index.html b/src/cockpit/389-console/src/index.html
-index 1278844fc..fd0eeb669 100644
---- a/src/cockpit/389-console/src/index.html
-+++ b/src/cockpit/389-console/src/index.html
-@@ -12,7 +12,7 @@
- </head>
- 
- 
--<body>
-+<body class="ds-vertical-scroll-auto">
-     <div id="dsinstance"></div>
-     <script src="index.js"></script>
- </body>
-diff --git a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
-index f6e662bca..6a71c138d 100644
---- a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
-+++ b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
-@@ -31,6 +31,9 @@ export class GlobalDatabaseConfig extends React.Component {
-             txnlogdir: this.props.data.txnlogdir,
-             dbhomedir: this.props.data.dbhomedir,
-             dblocks: this.props.data.dblocks,
-+            dblocksMonitoring: this.props.data.dblocksMonitoring,
-+            dblocksMonitoringThreshold: this.props.data.dblocksMonitoringThreshold,
-+            dblocksMonitoringPause: this.props.data.dblocksMonitoringPause,
-             chxpoint: this.props.data.chxpoint,
-             compactinterval: this.props.data.compactinterval,
-             importcachesize: this.props.data.importcachesize,
-@@ -47,6 +50,9 @@ export class GlobalDatabaseConfig extends React.Component {
-             _txnlogdir: this.props.data.txnlogdir,
-             _dbhomedir: this.props.data.dbhomedir,
-             _dblocks: this.props.data.dblocks,
-+            _dblocksMonitoring: this.props.data.dblocksMonitoring,
-+            _dblocksMonitoringThreshold: this.props.data.dblocksMonitoringThreshold,
-+            _dblocksMonitoringPause: this.props.data.dblocksMonitoringPause,
-             _chxpoint: this.props.data.chxpoint,
-             _compactinterval: this.props.data.compactinterval,
-             _importcachesize: this.props.data.importcachesize,
-@@ -55,6 +61,7 @@ export class GlobalDatabaseConfig extends React.Component {
-             _import_cache_auto: this.props.data.import_cache_auto,
-         };
-         this.handleChange = this.handleChange.bind(this);
-+        this.select_db_locks_monitoring = this.select_db_locks_monitoring.bind(this);
-         this.select_auto_cache = this.select_auto_cache.bind(this);
-         this.select_auto_import_cache = this.select_auto_import_cache.bind(this);
-         this.save_db_config = this.save_db_config.bind(this);
-@@ -76,6 +83,12 @@ export class GlobalDatabaseConfig extends React.Component {
-         }, this.handleChange(e));
-     }
- 
-+    select_db_locks_monitoring (val, e) {
-+        this.setState({
-+            dblocksMonitoring: !this.state.dblocksMonitoring
-+        }, this.handleChange(val, e));
-+    }
-+
-     handleChange(e) {
-         // Generic
-         const value = e.target.type === 'checkbox' ? e.target.checked : e.target.value;
-@@ -150,6 +163,21 @@ export class GlobalDatabaseConfig extends React.Component {
-             cmd.push("--locks=" + this.state.dblocks);
-             requireRestart = true;
-         }
-+        if (this.state._dblocksMonitoring != this.state.dblocksMonitoring) {
-+            if (this.state.dblocksMonitoring) {
-+                cmd.push("--locks-monitoring-enabled=on");
-+            } else {
-+                cmd.push("--locks-monitoring-enabled=off");
-+            }
-+            requireRestart = true;
-+        }
-+        if (this.state._dblocksMonitoringThreshold != this.state.dblocksMonitoringThreshold) {
-+            cmd.push("--locks-monitoring-threshold=" + this.state.dblocksMonitoringThreshold);
-+            requireRestart = true;
-+        }
-+        if (this.state._dblocksMonitoringPause != this.state.dblocksMonitoringPause) {
-+            cmd.push("--locks-monitoring-pause=" + this.state.dblocksMonitoringPause);
-+        }
-         if (this.state._chxpoint != this.state.chxpoint) {
-             cmd.push("--checkpoint-interval=" + this.state.chxpoint);
-             requireRestart = true;
-@@ -216,6 +244,28 @@ export class GlobalDatabaseConfig extends React.Component {
-         let import_cache_form;
-         let db_auto_checked = false;
-         let import_auto_checked = false;
-+        let dblocksMonitor = "";
-+
-+        if (this.state.dblocksMonitoring) {
-+            dblocksMonitor = <div className="ds-margin-top">
-+                <Row className="ds-margin-top" title="Sets the DB lock exhaustion value in percentage (valid range is 70-95). If too many locks are acquired, the server will abort the searches while the number of locks are not decreased. It helps to avoid DB corruption and long recovery. (nsslapd-db-locks-monitoring-threshold)">
-+                    <Col componentClass={ControlLabel} sm={4}>
-+                        DB Locks Threshold Percentage
-+                    </Col>
-+                    <Col sm={8}>
-+                        <input className="ds-input" type="number" id="dblocksMonitoringThreshold" size="10" onChange={this.handleChange} value={this.state.dblocksMonitoringThreshold} />
-+                    </Col>
-+                </Row>
-+                <Row className="ds-margin-top" title="Sets the amount of time (milliseconds) that the monitoring thread spends waiting between checks. (nsslapd-db-locks-monitoring-pause)">
-+                    <Col componentClass={ControlLabel} sm={4}>
-+                        DB Locks Pause Milliseconds
-+                    </Col>
-+                    <Col sm={8}>
-+                        <input className="ds-input" type="number" id="dblocksMonitoringPause" size="10" onChange={this.handleChange} value={this.state.dblocksMonitoringPause} />
-+                    </Col>
-+                </Row>
-+            </div>;
-+        }
- 
-         if (this.state.db_cache_auto) {
-             db_cache_form = <div id="auto-cache-form" className="ds-margin-left">
-@@ -422,14 +472,6 @@ export class GlobalDatabaseConfig extends React.Component {
-                                             <input id="dbhomedir" value={this.state.dbhomedir} onChange={this.handleChange} className="ds-input-auto" type="text" />
-                                         </Col>
-                                     </Row>
--                                    <Row className="ds-margin-top" title="The number of database locks (nsslapd-db-locks).">
--                                        <Col componentClass={ControlLabel} sm={4}>
--                                            Database Locks
--                                        </Col>
--                                        <Col sm={8}>
--                                            <input id="dblocks" value={this.state.dblocks} onChange={this.handleChange} className="ds-input-auto" type="text" />
--                                        </Col>
--                                    </Row>
-                                     <Row className="ds-margin-top" title="Amount of time in seconds after which the Directory Server sends a checkpoint entry to the database transaction log (nsslapd-db-checkpoint-interval).">
-                                         <Col componentClass={ControlLabel} sm={4}>
-                                             Database Checkpoint Interval
-@@ -446,6 +488,36 @@ export class GlobalDatabaseConfig extends React.Component {
-                                             <input id="compactinterval" value={this.state.compactinterval} onChange={this.handleChange} className="ds-input-auto" type="text" />
-                                         </Col>
-                                     </Row>
-+                                    <Row className="ds-margin-top" title="The number of database locks (nsslapd-db-locks).">
-+                                        <Col componentClass={ControlLabel} sm={4}>
-+                                            Database Locks
-+                                        </Col>
-+                                        <Col sm={8}>
-+                                            <input id="dblocks" value={this.state.dblocks} onChange={this.handleChange} className="ds-input-auto" type="text" />
-+                                        </Col>
-+                                    </Row>
-+                                    <Row>
-+                                        <Col sm={12}>
-+                                            <h5 className="ds-sub-header">DB Locks Monitoring</h5>
-+                                            <hr />
-+                                        </Col>
-+                                    </Row>
-+                                    <Row>
-+                                        <Col sm={12}>
-+                                            <Checkbox title="Set input to be set automatically"
-+                                                id="dblocksMonitoring"
-+                                                checked={this.state.dblocksMonitoring}
-+                                                onChange={this.select_db_locks_monitoring}
-+                                            >
-+                                                Enable Monitoring
-+                                            </Checkbox>
-+                                        </Col>
-+                                    </Row>
-+                                    <Row>
-+                                        <Col sm={12}>
-+                                            {dblocksMonitor}
-+                                        </Col>
-+                                    </Row>
-                                 </Form>
-                             </div>
-                         </div>
-diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
-index bcd7b383f..13bb27842 100644
---- a/src/lib389/lib389/backend.py
-+++ b/src/lib389/lib389/backend.py
-@@ -1011,6 +1011,9 @@ class DatabaseConfig(DSLdapObject):
-                     'nsslapd-db-transaction-batch-max-wait',
-                     'nsslapd-db-logbuf-size',
-                     'nsslapd-db-locks',
-+                    'nsslapd-db-locks-monitoring-enabled',
-+                    'nsslapd-db-locks-monitoring-threshold',
-+                    'nsslapd-db-locks-monitoring-pause',
-                     'nsslapd-db-private-import-mem',
-                     'nsslapd-import-cache-autosize',
-                     'nsslapd-cache-autosize',
-diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py
-index 6bfbcb036..722764d10 100644
---- a/src/lib389/lib389/cli_conf/backend.py
-+++ b/src/lib389/lib389/cli_conf/backend.py
-@@ -46,6 +46,9 @@ arg_to_attr = {
-         'txn_batch_max': 'nsslapd-db-transaction-batch-max-wait',
-         'logbufsize': 'nsslapd-db-logbuf-size',
-         'locks': 'nsslapd-db-locks',
-+        'locks_monitoring_enabled': 'nsslapd-db-locks-monitoring-enabled',
-+        'locks_monitoring_threshold': 'nsslapd-db-locks-monitoring-threshold',
-+        'locks_monitoring_pause': 'nsslapd-db-locks-monitoring-pause',
-         'import_cache_autosize': 'nsslapd-import-cache-autosize',
-         'cache_autosize': 'nsslapd-cache-autosize',
-         'cache_autosize_split': 'nsslapd-cache-autosize-split',
-@@ -998,6 +1001,13 @@ def create_parser(subparsers):
-                                                               'the batch count (only works when txn-batch-val is set)')
-     set_db_config_parser.add_argument('--logbufsize', help='Specifies the transaction log information buffer size')
-     set_db_config_parser.add_argument('--locks', help='Sets the maximum number of database locks')
-+    set_db_config_parser.add_argument('--locks-monitoring-enabled', help='Set to "on" or "off" to monitor DB locks. When it crosses the percentage value '
-+                                                                         'set with "--locks-monitoring-threshold" ("on" by default)')
-+    set_db_config_parser.add_argument('--locks-monitoring-threshold', help='Sets the DB lock exhaustion value in percentage (valid range is 70-95). If too many locks are '
-+                                                                           'acquired, the server will abort the searches while the number of locks '
-+                                                                           'are not decreased. It helps to avoid DB corruption and long recovery.')
-+    set_db_config_parser.add_argument('--locks-monitoring-pause', help='Sets the DB lock monitoring value in milliseconds for the amount of time '
-+                                                                       'that the monitoring thread spends waiting between checks.')
-     set_db_config_parser.add_argument('--import-cache-autosize', help='Set to "on" or "off" to automatically set the size of the import '
-                                                                        'cache to be used during the the import process of LDIF files')
-     set_db_config_parser.add_argument('--cache-autosize', help='Sets the percentage of free memory that is used in total for the database '
--- 
-2.26.3
-
diff --git a/SOURCES/0009-Issue-4972-gecos-with-IA5-introduces-a-compatibility.patch b/SOURCES/0009-Issue-4972-gecos-with-IA5-introduces-a-compatibility.patch
new file mode 100644
index 0000000..e895ba1
--- /dev/null
+++ b/SOURCES/0009-Issue-4972-gecos-with-IA5-introduces-a-compatibility.patch
@@ -0,0 +1,468 @@
+From 2ae2f53756b6f13e2816bb30812740cb7ad97403 Mon Sep 17 00:00:00 2001
+From: tbordaz <tbordaz@redhat.com>
+Date: Fri, 5 Nov 2021 09:56:43 +0100
+Subject: [PATCH 09/12] Issue 4972 - gecos with IA5 introduces a compatibility
+ issue with previous (#4981)
+
+releases where it was DirectoryString
+
+Bug description:
+       For years 'gecos' was DirectoryString (UTF8), with #50933 it was restricted to IA5 (ascii)
+       https://github.com/389ds/389-ds-base/commit/0683bcde1b667b6d0ca6e8d1ef605f17c51ea2f7#
+
+       IA5 definition conforms rfc2307 but is a problem for existing deployments
+       where entries can have 'gecos' attribute value with UTF8.
+
+Fix description:
+       Revert the definition to of 'gecos' being Directory String
+
+       Additional fix to make test_replica_backup_and_restore more
+       robust to CI
+
+relates: https://github.com/389ds/389-ds-base/issues/4972
+
+Reviewed by: William Brown, Pierre Rogier, James Chapman (Thanks !)
+
+Platforms tested: F34
+---
+ .../tests/suites/schema/schema_test.py        | 398 +++++++++++++++++-
+ ldap/schema/10rfc2307compat.ldif              |   6 +-
+ 2 files changed, 400 insertions(+), 4 deletions(-)
+
+diff --git a/dirsrvtests/tests/suites/schema/schema_test.py b/dirsrvtests/tests/suites/schema/schema_test.py
+index d590624b6..5d62b8d59 100644
+--- a/dirsrvtests/tests/suites/schema/schema_test.py
++++ b/dirsrvtests/tests/suites/schema/schema_test.py
+@@ -18,8 +18,12 @@ import pytest
+ import six
+ from ldap.cidict import cidict
+ from ldap.schema import SubSchema
++from lib389.schema import SchemaLegacy
+ from lib389._constants import *
+-from lib389.topologies import topology_st
++from lib389.topologies import topology_st, topology_m2 as topo_m2
++from lib389.idm.user import UserAccounts, UserAccount
++from lib389.replica import ReplicationManager
++from lib389.utils import ensure_bytes
+ 
+ pytestmark = pytest.mark.tier1
+ 
+@@ -165,6 +169,398 @@ def test_schema_comparewithfiles(topology_st):
+ 
+     log.info('test_schema_comparewithfiles: PASSED')
+ 
++def test_gecos_directoryString(topology_st):
++    """Check that gecos supports directoryString value
++
++    :id: aee422bb-6299-4124-b5cd-d7393dac19d3
++
++    :setup: Standalone instance
++
++    :steps:
++        1. Add a common user
++        2. replace gecos with a direstoryString value
++
++    :expectedresults:
++        1. Success
++        2. Success
++    """
++
++    users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
++
++    user_properties = {
++        'uid': 'testuser',
++        'cn' : 'testuser',
++        'sn' : 'user',
++        'uidNumber' : '1000',
++        'gidNumber' : '2000',
++        'homeDirectory' : '/home/testuser',
++    }
++    testuser = users.create(properties=user_properties)
++
++    # Add a gecos UTF value
++    testuser.replace('gecos', 'Hélène')
++
++def test_gecos_mixed_definition_topo(topo_m2, request):
++    """Check that replication is still working if schema contains
++       definitions that does not conform with a replicated entry
++
++    :id: d5940e71-d18a-4b71-aaf7-b9185361fffe
++    :setup: Two suppliers replication setup
++    :steps:
++        1. Create a testuser on M1
++        2  Stop M1 and M2
++        3  Change gecos def on M2 to be IA5
++        4  Update testuser with gecos directoryString value
++        5  Check replication is still working
++    :expectedresults:
++        1. success
++        2. success
++        3. success
++        4. success
++        5. success
++
++    """
++
++    repl = ReplicationManager(DEFAULT_SUFFIX)
++    m1 = topo_m2.ms["supplier1"]
++    m2 = topo_m2.ms["supplier2"]
++    
++
++    # create a test user
++    testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
++    testuser = UserAccount(m1, testuser_dn)
++    try:
++        testuser.create(properties={
++            'uid': 'testuser',
++            'cn': 'testuser',
++            'sn': 'testuser',
++            'uidNumber' : '1000',
++            'gidNumber' : '2000',
++            'homeDirectory' : '/home/testuser',
++        })
++    except ldap.ALREADY_EXISTS:
++        pass
++    repl.wait_for_replication(m1, m2)
++
++    # Stop suppliers to update the schema
++    m1.stop()
++    m2.stop()
++
++    # on M1: gecos is DirectoryString (default)
++    # on M2: gecos is IA5
++    schema_filename = (m2.schemadir + "/99user.ldif")
++    try:
++        with open(schema_filename, 'w') as schema_file:
++            schema_file.write("dn: cn=schema\n")
++            schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
++                              "'gecos' DESC 'The GECOS field; the common name' " +
++                              "EQUALITY caseIgnoreIA5Match " +
++                              "SUBSTR caseIgnoreIA5SubstringsMatch " +
++                              "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
++                              "SINGLE-VALUE )\n")
++        os.chmod(schema_filename, 0o777)
++    except OSError as e:
++        log.fatal("Failed to update schema file: " +
++                  "{} Error: {}".format(schema_filename, str(e)))
++
++    # start the instances
++    m1.start()
++    m2.start()
++
++    # Check that gecos is IA5 on M2
++    schema = SchemaLegacy(m2)
++    attributetypes = schema.query_attributetype('gecos')
++    assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
++
++
++    # Add a gecos UTF value on M1
++    testuser.replace('gecos', 'Hélène')
++
++    # Check replication is still working
++    testuser.replace('displayName', 'ascii value')
++    repl.wait_for_replication(m1, m2)
++    testuser_m2 = UserAccount(m2, testuser_dn)
++    assert testuser_m2.exists()
++    assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
++
++    def fin():
++        m1.start()
++        m2.start()
++        testuser.delete()
++        repl.wait_for_replication(m1, m2)
++
++        # on M2 restore a default 99user.ldif
++        m2.stop()
++        os.remove(m2.schemadir + "/99user.ldif")
++        schema_filename = (m2.schemadir + "/99user.ldif")
++        try:
++            with open(schema_filename, 'w') as schema_file:
++                schema_file.write("dn: cn=schema\n")
++            os.chmod(schema_filename, 0o777)
++        except OSError as e:
++            log.fatal("Failed to update schema file: " +
++                      "{} Error: {}".format(schema_filename, str(e)))
++        m2.start()
++        m1.start()
++
++    request.addfinalizer(fin)
++
++def test_gecos_directoryString_wins_M1(topo_m2, request):
++    """Check that if inital syntax are IA5(M2) and DirectoryString(M1)
++    Then directoryString wins when nsSchemaCSN M1 is the greatest
++
++    :id: ad119fa5-7671-45c8-b2ef-0b28ffb68fdb
++    :setup: Two suppliers replication setup
++    :steps:
++        1. Create a testuser on M1
++        2  Stop M1 and M2
++        3  Change gecos def on M2 to be IA5
++        4  Start M1 and M2
++        5  Update M1 schema so that M1 has greatest nsSchemaCSN
++        6  Update testuser with gecos directoryString value
++        7  Check replication is still working
++        8  Check gecos is DirectoryString on M1 and M2
++    :expectedresults:
++        1. success
++        2. success
++        3. success
++        4. success
++        5. success
++        6. success
++        7. success
++        8. success
++
++    """
++
++    repl = ReplicationManager(DEFAULT_SUFFIX)
++    m1 = topo_m2.ms["supplier1"]
++    m2 = topo_m2.ms["supplier2"]
++    
++
++    # create a test user
++    testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
++    testuser = UserAccount(m1, testuser_dn)
++    try:
++        testuser.create(properties={
++            'uid': 'testuser',
++            'cn': 'testuser',
++            'sn': 'testuser',
++            'uidNumber' : '1000',
++            'gidNumber' : '2000',
++            'homeDirectory' : '/home/testuser',
++        })
++    except ldap.ALREADY_EXISTS:
++        pass
++    repl.wait_for_replication(m1, m2)
++
++    # Stop suppliers to update the schema
++    m1.stop()
++    m2.stop()
++
++    # on M1: gecos is DirectoryString (default)
++    # on M2: gecos is IA5
++    schema_filename = (m2.schemadir + "/99user.ldif")
++    try:
++        with open(schema_filename, 'w') as schema_file:
++            schema_file.write("dn: cn=schema\n")
++            schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
++                              "'gecos' DESC 'The GECOS field; the common name' " +
++                              "EQUALITY caseIgnoreIA5Match " +
++                              "SUBSTR caseIgnoreIA5SubstringsMatch " +
++                              "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
++                              "SINGLE-VALUE )\n")
++        os.chmod(schema_filename, 0o777)
++    except OSError as e:
++        log.fatal("Failed to update schema file: " +
++                  "{} Error: {}".format(schema_filename, str(e)))
++
++    # start the instances
++    m1.start()
++    m2.start()
++
++    # Check that gecos is IA5 on M2
++    schema = SchemaLegacy(m2)
++    attributetypes = schema.query_attributetype('gecos')
++    assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
++
++
++    # update M1 schema to increase its nsschemaCSN
++    new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )"
++    m1.schema.add_schema('attributetypes', ensure_bytes(new_at))
++
++    # Add a gecos UTF value on M1
++    testuser.replace('gecos', 'Hélène')
++
++    # Check replication is still working
++    testuser.replace('displayName', 'ascii value')
++    repl.wait_for_replication(m1, m2)
++    testuser_m2 = UserAccount(m2, testuser_dn)
++    assert testuser_m2.exists()
++    assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
++
++    # Check that gecos is DirectoryString on M1
++    schema = SchemaLegacy(m1)
++    attributetypes = schema.query_attributetype('gecos')
++    assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
++
++    # Check that gecos is DirectoryString on M2
++    schema = SchemaLegacy(m2)
++    attributetypes = schema.query_attributetype('gecos')
++    assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
++
++    def fin():
++        m1.start()
++        m2.start()
++        testuser.delete()
++        m1.schema.del_schema('attributetypes', ensure_bytes(new_at))
++        repl.wait_for_replication(m1, m2)
++
++        # on M2 restore a default 99user.ldif
++        m2.stop()
++        os.remove(m2.schemadir + "/99user.ldif")
++        schema_filename = (m2.schemadir + "/99user.ldif")
++        try:
++            with open(schema_filename, 'w') as schema_file:
++                schema_file.write("dn: cn=schema\n")
++            os.chmod(schema_filename, 0o777)
++        except OSError as e:
++            log.fatal("Failed to update schema file: " +
++                      "{} Error: {}".format(schema_filename, str(e)))
++        m2.start()
++        m1.start()
++
++    request.addfinalizer(fin)
++
++def test_gecos_directoryString_wins_M2(topo_m2, request):
++    """Check that if inital syntax are IA5(M2) and DirectoryString(M1)
++    Then directoryString wins when nsSchemaCSN M2 is the greatest
++
++    :id: 2da7f1b1-f86d-4072-a940-ba56d4bc8348
++    :setup: Two suppliers replication setup
++    :steps:
++        1. Create a testuser on M1
++        2  Stop M1 and M2
++        3  Change gecos def on M2 to be IA5
++        4  Start M1 and M2
++        5  Update M2 schema so that M2 has greatest nsSchemaCSN
++        6  Update testuser on M2 and trigger replication to M1
++        7  Update testuser on M2 with gecos directoryString value
++        8  Check replication is still working
++        9  Check gecos is DirectoryString on M1 and M2
++    :expectedresults:
++        1. success
++        2. success
++        3. success
++        4. success
++        5. success
++        6. success
++        7. success
++        8. success
++        9. success
++
++    """
++
++    repl = ReplicationManager(DEFAULT_SUFFIX)
++    m1 = topo_m2.ms["supplier1"]
++    m2 = topo_m2.ms["supplier2"]
++    
++
++    # create a test user
++    testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
++    testuser = UserAccount(m1, testuser_dn)
++    try:
++        testuser.create(properties={
++            'uid': 'testuser',
++            'cn': 'testuser',
++            'sn': 'testuser',
++            'uidNumber' : '1000',
++            'gidNumber' : '2000',
++            'homeDirectory' : '/home/testuser',
++        })
++    except ldap.ALREADY_EXISTS:
++        pass
++    testuser.replace('displayName', 'to trigger replication M1-> M2')
++    repl.wait_for_replication(m1, m2)
++
++    # Stop suppliers to update the schema
++    m1.stop()
++    m2.stop()
++
++    # on M1: gecos is DirectoryString (default)
++    # on M2: gecos is IA5
++    schema_filename = (m2.schemadir + "/99user.ldif")
++    try:
++        with open(schema_filename, 'w') as schema_file:
++            schema_file.write("dn: cn=schema\n")
++            schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
++                              "'gecos' DESC 'The GECOS field; the common name' " +
++                              "EQUALITY caseIgnoreIA5Match " +
++                              "SUBSTR caseIgnoreIA5SubstringsMatch " +
++                              "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
++                              "SINGLE-VALUE )\n")
++        os.chmod(schema_filename, 0o777)
++    except OSError as e:
++        log.fatal("Failed to update schema file: " +
++                  "{} Error: {}".format(schema_filename, str(e)))
++
++    # start the instances
++    m1.start()
++    m2.start()
++
++    # Check that gecos is IA5 on M2
++    schema = SchemaLegacy(m2)
++    attributetypes = schema.query_attributetype('gecos')
++    assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
++
++    # update M2 schema to increase its nsschemaCSN
++    new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )"
++    m2.schema.add_schema('attributetypes', ensure_bytes(new_at))
++
++    # update just to trigger replication M2->M1
++    # and update of M2 schema
++    testuser_m2 = UserAccount(m2, testuser_dn)
++    testuser_m2.replace('displayName', 'to trigger replication M2-> M1')
++
++    # Add a gecos UTF value on M1
++    testuser.replace('gecos', 'Hélène')
++
++    # Check replication is still working
++    testuser.replace('displayName', 'ascii value')
++    repl.wait_for_replication(m1, m2)
++    assert testuser_m2.exists()
++    assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
++
++    # Check that gecos is DirectoryString on M1
++    schema = SchemaLegacy(m1)
++    attributetypes = schema.query_attributetype('gecos')
++    assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
++
++    # Check that gecos is DirectoryString on M2
++    schema = SchemaLegacy(m2)
++    attributetypes = schema.query_attributetype('gecos')
++    assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
++
++    def fin():
++        m1.start()
++        m2.start()
++        testuser.delete()
++        m1.schema.del_schema('attributetypes', ensure_bytes(new_at))
++        repl.wait_for_replication(m1, m2)
++
++        # on M2 restore a default 99user.ldif
++        m2.stop()
++        os.remove(m2.schemadir + "/99user.ldif")
++        schema_filename = (m2.schemadir + "/99user.ldif")
++        try:
++            with open(schema_filename, 'w') as schema_file:
++                schema_file.write("dn: cn=schema\n")
++            os.chmod(schema_filename, 0o777)
++        except OSError as e:
++            log.fatal("Failed to update schema file: " +
++                      "{} Error: {}".format(schema_filename, str(e)))
++        m2.start()
++
++    request.addfinalizer(fin)
+ 
+ if __name__ == '__main__':
+     # Run isolated
+diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif
+index 8ba72e1e3..998b8983b 100644
+--- a/ldap/schema/10rfc2307compat.ldif
++++ b/ldap/schema/10rfc2307compat.ldif
+@@ -21,9 +21,9 @@ attributeTypes: (
+ attributeTypes: (
+   1.3.6.1.1.1.1.2 NAME 'gecos'
+   DESC 'The GECOS field; the common name'
+-  EQUALITY caseIgnoreIA5Match
+-  SUBSTR caseIgnoreIA5SubstringsMatch
+-  SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
++  EQUALITY caseIgnoreMatch
++  SUBSTR caseIgnoreSubstringsMatch
++  SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
+   SINGLE-VALUE
+   )
+ attributeTypes: (
+-- 
+2.31.1
+
diff --git a/SOURCES/0010-Issue-4764-replicated-operation-sometime-checks-ACI-.patch b/SOURCES/0010-Issue-4764-replicated-operation-sometime-checks-ACI-.patch
deleted file mode 100644
index 489f4b3..0000000
--- a/SOURCES/0010-Issue-4764-replicated-operation-sometime-checks-ACI-.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From 7573c62a2e61293a4800e67919d79341fa1a1532 Mon Sep 17 00:00:00 2001
-From: progier389 <progier@redhat.com>
-Date: Wed, 26 May 2021 16:07:43 +0200
-Subject: [PATCH 10/12] Issue 4764 - replicated operation sometime checks ACI
- (#4783)
-
-(cherry picked from commit 0cfdea7abcacfca6686a6cf84dbf7ae1167f3022)
----
- ldap/servers/slapd/connection.c | 8 ++++++++
- 1 file changed, 8 insertions(+)
-
-diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
-index c7a15e775..e0c1a52d2 100644
---- a/ldap/servers/slapd/connection.c
-+++ b/ldap/servers/slapd/connection.c
-@@ -1771,6 +1771,14 @@ connection_threadmain()
-             }
-         }
- 
-+        /*
-+         * Fix bz 1931820 issue (the check to set OP_FLAG_REPLICATED may be done
-+         * before replication session is properly set).
-+         */
-+        if (replication_connection) {
-+            operation_set_flag(op, OP_FLAG_REPLICATED);
-+        }
-+
-         /*
-          * Call the do_<operation> function to process this request.
-          */
--- 
-2.26.3
-
diff --git a/SOURCES/0010-Issue-4997-Function-declaration-compiler-error-on-1..patch b/SOURCES/0010-Issue-4997-Function-declaration-compiler-error-on-1..patch
new file mode 100644
index 0000000..df7195a
--- /dev/null
+++ b/SOURCES/0010-Issue-4997-Function-declaration-compiler-error-on-1..patch
@@ -0,0 +1,32 @@
+From 3909877f12e50556e844bc20e72870a4fa905ada Mon Sep 17 00:00:00 2001
+From: James Chapman <jachapma@redhat.com>
+Date: Tue, 9 Nov 2021 12:55:28 +0000
+Subject: [PATCH 10/12] Issue 4997 - Function declaration compiler error on
+ 1.4.3
+
+Bug description: Building the server on the 1.4.3 branch generates a
+compiler error due to a typo in function declaration.
+
+Fixes: https://github.com/389ds/389-ds-base/issues/4997
+
+Reviewed by: @jchapman (one line commit rule)
+---
+ ldap/servers/slapd/slapi-private.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
+index 570765e47..d6d74e8a7 100644
+--- a/ldap/servers/slapd/slapi-private.h
++++ b/ldap/servers/slapd/slapi-private.h
+@@ -273,7 +273,7 @@ void *csngen_register_callbacks(CSNGen *gen, GenCSNFn genFn, void *genArg, Abort
+ void csngen_unregister_callbacks(CSNGen *gen, void *cookie);
+ 
+ /* debugging function */
+-void csngen_dump_state(const CSNGen *gen);
++void csngen_dump_state(const CSNGen *gen, int severity);
+ 
+ /* this function tests csn generator */
+ void csngen_test(void);
+-- 
+2.31.1
+
diff --git a/SOURCES/0011-Issue-4778-RFE-Allow-setting-TOD-for-db-compaction-a.patch b/SOURCES/0011-Issue-4778-RFE-Allow-setting-TOD-for-db-compaction-a.patch
deleted file mode 100644
index 2121550..0000000
--- a/SOURCES/0011-Issue-4778-RFE-Allow-setting-TOD-for-db-compaction-a.patch
+++ /dev/null
@@ -1,1453 +0,0 @@
-From c79630de8012a893ed3d1c46b41bc7871a07a3e2 Mon Sep 17 00:00:00 2001
-From: Mark Reynolds <mreynolds@redhat.com>
-Date: Wed, 26 May 2021 13:32:13 -0400
-Subject: [PATCH 11/12] Issue 4778 - RFE - Allow setting TOD for db compaction
- and add task
-
-Description:  Since database compaction can be costly it should be allowed
-              to set a time to execute it during offpeak hours.  Once the
-              compaction interval has been met, it will wait for the configured
-              time of day to do the compaction.  The default is just before
-              midnight: 23:59
-
-              A task was also created that can run compaction on demand,
-              and can also just target the replication changelog.  This could
-              be used in conjunction with a cronjob for more complex
-              execution patterns.
-
-ASAN tested and approved.
-
-relates: https://github.com/389ds/389-ds-base/issues/4778
-
-Reviewed by: spichugi(Thanks!)
----
- .../tests/suites/config/compact_test.py       |  81 ++++++
- ldap/schema/01core389.ldif                    |   3 +-
- ldap/servers/plugins/replication/cl5.h        |   1 +
- ldap/servers/plugins/replication/cl5_api.c    |  70 ++++-
- ldap/servers/plugins/replication/cl5_api.h    |   2 +-
- .../servers/plugins/replication/cl5_clcache.c |   3 -
- ldap/servers/plugins/replication/cl5_config.c | 102 ++++++-
- ldap/servers/plugins/replication/cl5_init.c   |   2 +-
- .../servers/plugins/replication/repl_shared.h |   2 +
- ldap/servers/plugins/retrocl/retrocl.c        |   1 -
- .../slapd/back-ldbm/db-bdb/bdb_config.c       |  79 ++++++
- .../slapd/back-ldbm/db-bdb/bdb_layer.c        | 258 ++++++++++++------
- .../slapd/back-ldbm/db-bdb/bdb_layer.h        |   4 +-
- ldap/servers/slapd/back-ldbm/init.c           |   2 +
- ldap/servers/slapd/back-ldbm/ldbm_config.h    |   1 +
- .../servers/slapd/back-ldbm/proto-back-ldbm.h |   1 +
- ldap/servers/slapd/filtercmp.c                |   5 +-
- ldap/servers/slapd/pblock.c                   |  17 +-
- ldap/servers/slapd/slap.h                     |   2 +
- ldap/servers/slapd/slapi-private.h            |   1 +
- ldap/servers/slapd/task.c                     | 102 ++++++-
- src/cockpit/389-console/src/database.jsx      |   1 +
- .../src/lib/database/databaseConfig.jsx       |  16 +-
- src/lib389/lib389/_constants.py               |   1 +
- src/lib389/lib389/backend.py                  |   1 +
- src/lib389/lib389/cli_conf/backend.py         |  24 +-
- src/lib389/lib389/cli_conf/replication.py     |   3 +
- src/lib389/lib389/tasks.py                    |  14 +-
- 28 files changed, 689 insertions(+), 110 deletions(-)
- create mode 100644 dirsrvtests/tests/suites/config/compact_test.py
-
-diff --git a/dirsrvtests/tests/suites/config/compact_test.py b/dirsrvtests/tests/suites/config/compact_test.py
-new file mode 100644
-index 000000000..1f1c097e4
---- /dev/null
-+++ b/dirsrvtests/tests/suites/config/compact_test.py
-@@ -0,0 +1,81 @@
-+import logging
-+import pytest
-+import os
-+import time
-+from lib389.tasks import DBCompactTask
-+from lib389.backend import DatabaseConfig
-+from lib389.replica import Changelog5
-+from lib389.topologies import topology_m1 as topo
-+
-+log = logging.getLogger(__name__)
-+
-+
-+def test_compact_db_task(topo):
-+    """Specify a test case purpose or name here
-+
-+    :id: 1b3222ef-a336-4259-be21-6a52f76e1859
-+    :setup: Standalone Instance
-+    :steps:
-+        1. Create task
-+        2. Check task was successful
-+        3. Check errors log to show task was run
-+        3. Create task just for replication
-+    :expectedresults:
-+        1. Success
-+        2. Success
-+        3. Success
-+        4. Success
-+    """
-+    inst = topo.ms["supplier1"]
-+
-+    task = DBCompactTask(inst)
-+    task.create()
-+    task.wait()
-+    assert task.get_exit_code() == 0
-+
-+    # Check errors log to make sure task actually compacted db
-+    assert inst.searchErrorsLog("Compacting databases")
-+    inst.deleteErrorLogs(restart=False)
-+
-+
-+def test_compaction_interval_and_time(topo):
-+    """Specify a test case purpose or name here
-+
-+    :id: f361bee9-d7e7-4569-9255-d7b60dd9d92e
-+    :setup: Supplier Instance
-+    :steps:
-+        1. Configure compact interval and time for database and changelog
-+        2. Check compaction occurs as expected
-+    :expectedresults:
-+        1. Success
-+        2. Success
-+    """
-+
-+    inst = topo.ms["supplier1"]
-+
-+    # Configure DB compaction
-+    config = DatabaseConfig(inst)
-+    config.set([('nsslapd-db-compactdb-interval', '2'), ('nsslapd-db-compactdb-time', '00:01')])
-+
-+    # Configure changelog compaction
-+    cl5 = Changelog5(inst)
-+    cl5.replace_many(
-+        ('nsslapd-changelogcompactdb-interval', '2'),
-+        ('nsslapd-changelogcompactdb-time', '00:01'),
-+        ('nsslapd-changelogtrim-interval',  '2')
-+    )
-+    inst.deleteErrorLogs()
-+
-+    # Check is compaction occurred
-+    time.sleep(6)
-+    assert inst.searchErrorsLog("Compacting databases")
-+    assert inst.searchErrorsLog("compacting replication changelogs")
-+    inst.deleteErrorLogs(restart=False)
-+
-+
-+if __name__ == '__main__':
-+    # Run isolated
-+    # -s for DEBUG mode
-+    CURRENT_FILE = os.path.realpath(__file__)
-+    pytest.main(["-s", CURRENT_FILE])
-+
-diff --git a/ldap/schema/01core389.ldif b/ldap/schema/01core389.ldif
-index 9e9a26c21..0c73e5114 100644
---- a/ldap/schema/01core389.ldif
-+++ b/ldap/schema/01core389.ldif
-@@ -285,6 +285,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2310 NAME 'nsds5ReplicaFlowControlWindow
- attributeTypes: ( 2.16.840.1.113730.3.1.2311 NAME 'nsds5ReplicaFlowControlPause' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
- attributeTypes: ( 2.16.840.1.113730.3.1.2313 NAME 'nsslapd-changelogtrim-interval' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
- attributeTypes: ( 2.16.840.1.113730.3.1.2314 NAME 'nsslapd-changelogcompactdb-interval' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
-+attributeTypes: ( 2.16.840.1.113730.3.1.2385 NAME 'nsslapd-changelogcompactdb-time' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
- attributeTypes: ( 2.16.840.1.113730.3.1.2315 NAME 'nsDS5ReplicaWaitForAsyncResults' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
- attributeTypes: ( 2.16.840.1.113730.3.1.2316 NAME 'nsslapd-auditfaillog-maxlogsize' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
- attributeTypes: ( 2.16.840.1.113730.3.1.2317 NAME 'nsslapd-auditfaillog-logrotationsync-enabled' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
-@@ -345,5 +346,5 @@ objectClasses: ( nsEncryptionConfig-oid NAME 'nsEncryptionConfig' DESC 'Netscape
- objectClasses: ( nsEncryptionModule-oid NAME 'nsEncryptionModule' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsSSLToken $ nsSSLPersonalityssl $ nsSSLActivation $ ServerKeyExtractFile $ ServerCertExtractFile ) X-ORIGIN 'Netscape' )
- objectClasses: ( 2.16.840.1.113730.3.2.327 NAME 'rootDNPluginConfig' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( rootdn-open-time $ rootdn-close-time $ rootdn-days-allowed $ rootdn-allow-host $ rootdn-deny-host $ rootdn-allow-ip $ rootdn-deny-ip ) X-ORIGIN 'Netscape' )
- objectClasses: ( 2.16.840.1.113730.3.2.328 NAME 'nsSchemaPolicy' DESC 'Netscape defined objectclass' SUP top  MAY ( cn $ schemaUpdateObjectclassAccept $ schemaUpdateObjectclassReject $ schemaUpdateAttributeAccept $ schemaUpdateAttributeReject) X-ORIGIN 'Netscape Directory Server' )
--objectClasses: ( 2.16.840.1.113730.3.2.332 NAME 'nsChangelogConfig' DESC 'Configuration of the changelog5 object' SUP top MUST ( cn $ nsslapd-changelogdir ) MAY ( nsslapd-changelogmaxage $ nsslapd-changelogtrim-interval $ nsslapd-changelogmaxentries $ nsslapd-changelogsuffix $ nsslapd-changelogcompactdb-interval $ nsslapd-encryptionalgorithm $ nsSymmetricKey ) X-ORIGIN '389 Directory Server' )
-+objectClasses: ( 2.16.840.1.113730.3.2.332 NAME 'nsChangelogConfig' DESC 'Configuration of the changelog5 object' SUP top MUST ( cn $ nsslapd-changelogdir ) MAY ( nsslapd-changelogmaxage $ nsslapd-changelogtrim-interval $ nsslapd-changelogmaxentries $ nsslapd-changelogsuffix $ nsslapd-changelogcompactdb-interval $ nsslapd-changelogcompactdb-time $ nsslapd-encryptionalgorithm $ nsSymmetricKey ) X-ORIGIN '389 Directory Server' )
- objectClasses: ( 2.16.840.1.113730.3.2.337 NAME 'rewriterEntry' DESC '' SUP top MUST ( nsslapd-libPath ) MAY ( cn $ nsslapd-filterrewriter $ nsslapd-returnedAttrRewriter ) X-ORIGIN '389 Directory Server' )
-diff --git a/ldap/servers/plugins/replication/cl5.h b/ldap/servers/plugins/replication/cl5.h
-index 2af57e369..99ea1c6a2 100644
---- a/ldap/servers/plugins/replication/cl5.h
-+++ b/ldap/servers/plugins/replication/cl5.h
-@@ -29,6 +29,7 @@ typedef struct changelog5Config
-     char *symmetricKey;
-     long compactInterval;
-     long trimInterval;
-+    char *compactTime;
- } changelog5Config;
- 
- /* initializes changelog*/
-diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
-index 403a6a666..75a2f46f5 100644
---- a/ldap/servers/plugins/replication/cl5_api.c
-+++ b/ldap/servers/plugins/replication/cl5_api.c
-@@ -158,6 +158,7 @@ typedef struct cl5trim
-     time_t maxAge;       /* maximum entry age in seconds                            */
-     int maxEntries;      /* maximum number of entries across all changelog files    */
-     int compactInterval; /* interval to compact changelog db */
-+    char *compactTime;   /* time to compact changelog db */
-     int trimInterval;    /* trimming interval */
-     PRLock *lock;        /* controls access to trimming configuration            */
- } CL5Trim;
-@@ -184,6 +185,7 @@ typedef struct cl5desc
-     PRLock *clLock;         /* Lock associated to clVar, used to notify threads on close */
-     PRCondVar *clCvar;      /* Condition Variable used to notify threads on close */
-     void *clcrypt_handle;   /* for cl encryption */
-+    char *compact_time;     /* Time to execute changelog compaction */
- } CL5Desc;
- 
- typedef void (*VFP)(void *);
-@@ -1025,7 +1027,7 @@ cl5GetState()
-                 CL5_BAD_STATE if changelog is not open
-  */
- int
--cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, int trimInterval)
-+cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval)
- {
-     if (s_cl5Desc.dbState == CL5_STATE_NONE) {
-         slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
-@@ -1061,6 +1063,10 @@ cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, int t
-         s_cl5Desc.dbTrim.compactInterval = compactInterval;
-     }
- 
-+    if (strcmp(compactTime, CL5_STR_IGNORE) != 0) {
-+        s_cl5Desc.dbTrim.compactTime = slapi_ch_strdup(compactTime);
-+    }
-+
-     if (trimInterval != CL5_NUM_IGNORE) {
-         s_cl5Desc.dbTrim.trimInterval = trimInterval;
-     }
-@@ -3077,16 +3083,48 @@ _cl5TrimCleanup(void)
- {
-     if (s_cl5Desc.dbTrim.lock)
-         PR_DestroyLock(s_cl5Desc.dbTrim.lock);
-+    slapi_ch_free_string(&s_cl5Desc.dbTrim.compactTime);
- 
-     memset(&s_cl5Desc.dbTrim, 0, sizeof(s_cl5Desc.dbTrim));
- }
- 
-+static time_t
-+_cl5_get_tod_expiration(char *expire_time)
-+{
-+    time_t start_time, todays_elapsed_time, now = time(NULL);
-+    struct tm *tm_struct = localtime(&now);
-+    char hour_str[3] = {0};
-+    char min_str[3] = {0};
-+    char *s = expire_time;
-+    char *endp = NULL;
-+    int32_t hour, min, expiring_time;
-+
-+    /* Get today's start time */
-+    todays_elapsed_time = (tm_struct->tm_hour * 3600) + (tm_struct->tm_min * 60) + (tm_struct->tm_sec);
-+    start_time = slapi_current_utc_time() - todays_elapsed_time;
-+
-+    /* Get the hour and minute and calculate the expiring time.  The time was
-+     * already validated in bdb_config.c:  HH:MM */
-+    hour_str[0] = *s++;
-+    hour_str[1] = *s++;
-+    s++;  /* skip colon */
-+    min_str[0] = *s++;
-+    min_str[1] = *s++;
-+    hour = strtoll(hour_str, &endp, 10);
-+    min = strtoll(min_str, &endp, 10);
-+    expiring_time = (hour * 60 * 60) + (min * 60);
-+
-+    return start_time + expiring_time;
-+}
-+
- static int
- _cl5TrimMain(void *param __attribute__((unused)))
- {
-     time_t timePrev = slapi_current_utc_time();
-     time_t timeCompactPrev = slapi_current_utc_time();
-     time_t timeNow;
-+    PRBool compacting = PR_FALSE;
-+    int32_t compactdb_time = 0;
- 
-     PR_AtomicIncrement(&s_cl5Desc.threadCount);
- 
-@@ -3097,11 +3135,26 @@ _cl5TrimMain(void *param __attribute__((unused)))
-             timePrev = timeNow;
-             _cl5DoTrimming();
-         }
-+
-+        if (!compacting) {
-+            /* Once we know we want to compact we need to stop refreshing the
-+             * TOD expiration. Otherwise if the compact time is close to
-+             * midnight we could roll over past midnight during the checkpoint
-+             * sleep interval, and we'd never actually compact the databases.
-+             * We also need to get this value before the sleep.
-+            */
-+            compactdb_time = _cl5_get_tod_expiration(s_cl5Desc.dbTrim.compactTime);
-+        }
-         if ((s_cl5Desc.dbTrim.compactInterval > 0) &&
--            (timeNow - timeCompactPrev >= s_cl5Desc.dbTrim.compactInterval)) {
--            /* time to trim */
--            timeCompactPrev = timeNow;
--            _cl5CompactDBs();
-+            (timeNow - timeCompactPrev >= s_cl5Desc.dbTrim.compactInterval))
-+        {
-+            compacting = PR_TRUE;
-+            if (slapi_current_utc_time() > compactdb_time) {
-+				/* time to trim */
-+				timeCompactPrev = timeNow;
-+				_cl5CompactDBs();
-+				compacting = PR_FALSE;
-+            }
-         }
-         if (NULL == s_cl5Desc.clLock) {
-             /* most likely, emergency */
-@@ -3215,6 +3268,10 @@ _cl5CompactDBs(void)
-                       rc, db_strerror(rc));
-         goto bail;
-     }
-+
-+
-+    slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl,
-+                  "_cl5CompactDBs - compacting replication changelogs...\n");
-     for (fileObj = objset_first_obj(s_cl5Desc.dbFiles);
-          fileObj;
-          fileObj = objset_next_obj(s_cl5Desc.dbFiles, fileObj)) {
-@@ -3235,6 +3292,9 @@ _cl5CompactDBs(void)
-                       "_cl5CompactDBs - %s - %d pages freed\n",
-                       dbFile->replName, c_data.compact_pages_free);
-     }
-+
-+    slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl,
-+                  "_cl5CompactDBs - compacting replication changelogs finished.\n");
- bail:
-     if (fileObj) {
-         object_release(fileObj);
-diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h
-index 302af97a0..4b0949fb3 100644
---- a/ldap/servers/plugins/replication/cl5_api.h
-+++ b/ldap/servers/plugins/replication/cl5_api.h
-@@ -236,7 +236,7 @@ int cl5GetState(void);
-    Return:        CL5_SUCCESS if successful;
-                 CL5_BAD_STATE if changelog has not been open
-  */
--int cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, int trimInterval);
-+int cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval);
- 
- void cl5DestroyIterator(void *iterator);
- 
-diff --git a/ldap/servers/plugins/replication/cl5_clcache.c b/ldap/servers/plugins/replication/cl5_clcache.c
-index 90dec4d54..e5a39c9c1 100644
---- a/ldap/servers/plugins/replication/cl5_clcache.c
-+++ b/ldap/servers/plugins/replication/cl5_clcache.c
-@@ -452,9 +452,6 @@ static int
- clcache_cursor_set(DBC *cursor, CLC_Buffer *buf)
- {
-     int rc;
--    uint32_t ulen;
--    uint32_t dlen;
--    uint32_t size;
- 
-     rc = cursor->c_get(cursor, &buf->buf_key, &buf->buf_data, DB_SET);
-     if (rc == DB_BUFFER_SMALL) {
-diff --git a/ldap/servers/plugins/replication/cl5_config.c b/ldap/servers/plugins/replication/cl5_config.c
-index e0530bed2..b32686788 100644
---- a/ldap/servers/plugins/replication/cl5_config.c
-+++ b/ldap/servers/plugins/replication/cl5_config.c
-@@ -131,6 +131,7 @@ changelog5_config_done(changelog5Config *config)
-         /* slapi_ch_free_string accepts NULL pointer */
-         slapi_ch_free_string(&config->maxAge);
-         slapi_ch_free_string(&config->dir);
-+        slapi_ch_free_string(&config->compactTime);
-         slapi_ch_free_string(&config->symmetricKey);
-         slapi_ch_free_string(&config->dbconfig.encryptionAlgorithm);
-         slapi_ch_free_string(&config->dbconfig.symmetricKey);
-@@ -211,7 +212,7 @@ changelog5_config_add(Slapi_PBlock *pb __attribute__((unused)),
-     }
- 
-     /* set trimming parameters */
--    rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.trimInterval);
-+    rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
-     if (rc != CL5_SUCCESS) {
-         *returncode = 1;
-         if (returntext) {
-@@ -302,6 +303,7 @@ changelog5_config_modify(Slapi_PBlock *pb,
-     config.compactInterval = CL5_NUM_IGNORE;
-     slapi_ch_free_string(&config.maxAge);
-     config.maxAge = slapi_ch_strdup(CL5_STR_IGNORE);
-+    config.compactTime = slapi_ch_strdup(CHANGELOGDB_COMPACT_TIME);
-     config.trimInterval = CL5_NUM_IGNORE;
- 
-     slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods);
-@@ -375,6 +377,55 @@ changelog5_config_modify(Slapi_PBlock *pb,
-                         *returncode = LDAP_UNWILLING_TO_PERFORM;
-                         goto done;
-                     }
-+                } else if (strcasecmp(config_attr, CONFIG_CHANGELOG_COMPACTTIME_ATTRIBUTE) == 0) {
-+                	if (config_attr_value && config_attr_value[0] != '\0') {
-+                	    char *val = slapi_ch_strdup(config_attr_value);
-+                        char *endp = NULL;
-+                        char *hour_str = NULL;
-+                        char *min_str = NULL;
-+                        int32_t hour, min;
-+                        errno = 0;
-+
-+                        slapi_ch_free_string(&config.compactTime);
-+
-+                      	if (strstr(val, ":")) {
-+                            /* Get the hour and minute */
-+                            hour_str = ldap_utf8strtok_r(val, ":", &min_str);
-+                  	        /* Validate hour */
-+                   	        hour = strtoll(hour_str, &endp, 10);
-+                 	        if (*endp != '\0' || errno == ERANGE || hour < 0 || hour > 23 || strlen(hour_str) != 2) {
-+          	       	            slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
-+                   	                    "Invalid hour set (%s), must be a two digit number between 00 and 23",
-+                   	                    hour_str);
-+                   	            slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config",
-+                                       "Invalid minute set (%s), must be a two digit number between 00 and 59.  "
-+                	       	                    "Using default of 23:59\n", hour_str);
-+                                *returncode = LDAP_UNWILLING_TO_PERFORM;
-+                   	            goto done;
-+           	       	        }
-+       	        	        /* Validate minute */
-+           	       	        min = strtoll(min_str, &endp, 10);
-+           	      	        if (*endp != '\0' || errno == ERANGE || min < 0 || min > 59 || strlen(min_str) != 2) {
-+                   	            slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
-+                  	                    "Invalid minute set (%s), must be a two digit number between 00 and 59",
-+                   	                    hour_str);
-+                   	            slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config",
-+                   	                    "Invalid minute set (%s), must be a two digit number between 00 and 59.  "
-+                   	                    "Using default of 23:59\n", min_str);
-+                                *returncode = LDAP_UNWILLING_TO_PERFORM;
-+                   	            goto done;
-+                   	        }
-+                   	    } else {
-+                   	        /* Wrong format */
-+                   	        slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
-+                   	                "Invalid setting (%s), must have a time format of HH:MM", val);
-+                   	        slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config",
-+                   	                "Invalid setting (%s), must have a time format of HH:MM\n", val);
-+                            *returncode = LDAP_UNWILLING_TO_PERFORM;
-+                   	        goto done;
-+                   	    }
-+                        config.compactTime = slapi_ch_strdup(config_attr_value);
-+                    }
-                 } else if (strcasecmp(config_attr, CONFIG_CHANGELOG_TRIM_ATTRIBUTE) == 0) {
-                     if (slapi_is_duration_valid(config_attr_value)) {
-                         config.trimInterval = (long)slapi_parse_duration(config_attr_value);
-@@ -419,6 +470,11 @@ changelog5_config_modify(Slapi_PBlock *pb,
-         if (originalConfig->maxAge)
-             config.maxAge = slapi_ch_strdup(originalConfig->maxAge);
-     }
-+    if (strcmp(config.compactTime, CL5_STR_IGNORE) == 0) {
-+        slapi_ch_free_string(&config.compactTime);
-+        if (originalConfig->compactTime)
-+            config.compactTime = slapi_ch_strdup(originalConfig->compactTime);
-+    }
- 
-     /* attempt to change chagelog dir */
-     if (config.dir) {
-@@ -519,7 +575,7 @@ changelog5_config_modify(Slapi_PBlock *pb,
-     if (config.maxEntries != CL5_NUM_IGNORE ||
-         config.trimInterval != CL5_NUM_IGNORE ||
-         strcmp(config.maxAge, CL5_STR_IGNORE) != 0) {
--        rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.trimInterval);
-+        rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
-         if (rc != CL5_SUCCESS) {
-             *returncode = 1;
-             if (returntext) {
-@@ -689,6 +745,7 @@ changelog5_extract_config(Slapi_Entry *entry, changelog5Config *config)
- {
-     const char *arg;
-     char *max_age = NULL;
-+    char *val = NULL;
- 
-     memset(config, 0, sizeof(*config));
-     config->dir = slapi_entry_attr_get_charptr(entry, CONFIG_CHANGELOG_DIR_ATTRIBUTE);
-@@ -711,6 +768,47 @@ changelog5_extract_config(Slapi_Entry *entry, changelog5Config *config)
-         config->compactInterval = CHANGELOGDB_COMPACT_INTERVAL;
-     }
- 
-+    arg = slapi_entry_attr_get_ref(entry, CONFIG_CHANGELOG_COMPACTTIME_ATTRIBUTE);
-+    if (arg) {
-+        char *endp = NULL;
-+        char *hour_str = NULL;
-+        char *min_str = NULL;
-+        int32_t hour, min;
-+        errno = 0;
-+
-+        val = slapi_ch_strdup((char *)arg);
-+    	if (strstr(val, ":")) {
-+            /* Get the hour and minute */
-+            hour_str = ldap_utf8strtok_r(val, ":", &min_str);
-+    	        /* Validate hour */
-+   	        hour = strtoll(hour_str, &endp, 10);
-+   	        if (*endp != '\0' || errno == ERANGE || hour < 0 || hour > 23 || strlen(hour_str) != 2) {
-+   	            slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config",
-+   	                    "Invalid minute set (%s), must be a two digit number between 00 and 59.  "
-+   	                    "Using default of 23:59\n", hour_str);
-+   	            goto set_default;
-+   	        }
-+    	        /* Validate minute */
-+   	        min = strtoll(min_str, &endp, 10);
-+  	        if (*endp != '\0' || errno == ERANGE || min < 0 || min > 59 || strlen(min_str) != 2) {
-+   	            slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config",
-+   	                    "Invalid minute set (%s), must be a two digit number between 00 and 59.  "
-+   	                    "Using default of 23:59\n", min_str);
-+   	            goto set_default;
-+   	        }
-+   	    } else {
-+   	        /* Wrong format */
-+   	        slapi_log_err(SLAPI_LOG_ERR, "changelog5_extract_config",
-+   	                "Invalid setting (%s), must have a time format of HH:MM\n", val);
-+   	        goto set_default;
-+   	    }
-+        config->compactTime = slapi_ch_strdup(arg);
-+    } else {
-+    	set_default:
-+        config->compactTime = slapi_ch_strdup(CHANGELOGDB_COMPACT_TIME);
-+    }
-+    slapi_ch_free_string(&val);
-+
-     arg = slapi_entry_attr_get_ref(entry, CONFIG_CHANGELOG_TRIM_ATTRIBUTE);
-     if (arg) {
-         if (slapi_is_duration_valid(arg)) {
-diff --git a/ldap/servers/plugins/replication/cl5_init.c b/ldap/servers/plugins/replication/cl5_init.c
-index 112c4ece4..251859714 100644
---- a/ldap/servers/plugins/replication/cl5_init.c
-+++ b/ldap/servers/plugins/replication/cl5_init.c
-@@ -57,7 +57,7 @@ changelog5_init()
-     }
- 
-     /* set trimming parameters */
--    rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.trimInterval);
-+    rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
-     if (rc != CL5_SUCCESS) {
-         slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
-                       "changelog5_init: failed to configure changelog trimming\n");
-diff --git a/ldap/servers/plugins/replication/repl_shared.h b/ldap/servers/plugins/replication/repl_shared.h
-index b1ed86934..6708e12f7 100644
---- a/ldap/servers/plugins/replication/repl_shared.h
-+++ b/ldap/servers/plugins/replication/repl_shared.h
-@@ -26,11 +26,13 @@
- 
- #define CHANGELOGDB_TRIM_INTERVAL 300        /* 5 minutes */
- #define CHANGELOGDB_COMPACT_INTERVAL 2592000 /* 30 days */
-+#define CHANGELOGDB_COMPACT_TIME "23:55" /* 30 days */
- 
- #define CONFIG_CHANGELOG_DIR_ATTRIBUTE "nsslapd-changelogdir"
- #define CONFIG_CHANGELOG_MAXENTRIES_ATTRIBUTE "nsslapd-changelogmaxentries"
- #define CONFIG_CHANGELOG_MAXAGE_ATTRIBUTE "nsslapd-changelogmaxage"
- #define CONFIG_CHANGELOG_COMPACTDB_ATTRIBUTE "nsslapd-changelogcompactdb-interval"
-+#define CONFIG_CHANGELOG_COMPACTTIME_ATTRIBUTE "nsslapd-changelogcompactdb-time"
- #define CONFIG_CHANGELOG_TRIM_ATTRIBUTE "nsslapd-changelogtrim-interval"
- /* Changelog Internal Configuration Parameters -> Changelog Cache related */
- #define CONFIG_CHANGELOG_ENCRYPTION_ALGORITHM "nsslapd-encryptionalgorithm"
-diff --git a/ldap/servers/plugins/retrocl/retrocl.c b/ldap/servers/plugins/retrocl/retrocl.c
-index 2a620301c..f73c81528 100644
---- a/ldap/servers/plugins/retrocl/retrocl.c
-+++ b/ldap/servers/plugins/retrocl/retrocl.c
-@@ -400,7 +400,6 @@ retrocl_start(Slapi_PBlock *pb)
- 
-         for (size_t i = 0; i < num_vals; i++) {
-             char *value = values[i];
--            size_t length = strlen(value);
- 
-             char *pos = strchr(value, ':');
-             if (pos == NULL) {
-diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
-index 167644943..4261c6ce2 100644
---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
-+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
-@@ -678,6 +678,84 @@ bdb_config_db_compactdb_interval_set(void *arg,
-     return retval;
- }
- 
-+static void *
-+bdb_config_db_compactdb_time_get(void *arg)
-+{
-+    struct ldbminfo *li = (struct ldbminfo *)arg;
-+    return (void *)slapi_ch_strdup(BDB_CONFIG(li)->bdb_compactdb_time);
-+}
-+
-+static int
-+bdb_config_db_compactdb_time_set(void *arg,
-+                                 void *value,
-+                                 char *errorbuf __attribute__((unused)),
-+                                 int phase __attribute__((unused)),
-+                                 int apply)
-+{
-+    struct ldbminfo *li = (struct ldbminfo *)arg;
-+    char *val = slapi_ch_strdup((char *)value);
-+    char *endp = NULL;
-+    char *hour_str = NULL;
-+    char *min_str = NULL;
-+    char *default_time = "23:59";
-+    int32_t hour, min;
-+    int retval = LDAP_SUCCESS;
-+    errno = 0;
-+
-+    if (strstr(val, ":")) {
-+        /* Get the hour and minute */
-+        hour_str = ldap_utf8strtok_r(val, ":", &min_str);
-+
-+        /* Validate hour */
-+        hour = strtoll(hour_str, &endp, 10);
-+        if (*endp != '\0' || errno == ERANGE || hour < 0 || hour > 23 || strlen(hour_str) != 2) {
-+            slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
-+                    "Invalid hour set (%s), must be a two digit number between 00 and 23",
-+                    hour_str);
-+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_compactdb_interval_set",
-+                    "Invalid minute set (%s), must be a two digit number between 00 and 59.  "
-+                    "Using default of 23:59\n", hour_str);
-+            retval = LDAP_OPERATIONS_ERROR;
-+            goto done;
-+        }
-+
-+        /* Validate minute */
-+        min = strtoll(min_str, &endp, 10);
-+        if (*endp != '\0' || errno == ERANGE || min < 0 || min > 59 || strlen(min_str) != 2) {
-+            slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
-+                    "Invalid minute set (%s), must be a two digit number between 00 and 59",
-+                    hour_str);
-+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_compactdb_interval_set",
-+                    "Invalid minute set (%s), must be a two digit number between 00 and 59.  "
-+                    "Using default of 23:59\n", min_str);
-+            retval = LDAP_OPERATIONS_ERROR;
-+            goto done;
-+        }
-+    } else {
-+        /* Wrong format */
-+        slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
-+                "Invalid setting (%s), must have a time format of HH:MM", val);
-+        slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_compactdb_interval_set",
-+                "Invalid setting (%s), must have a time format of HH:MM\n", val);
-+        retval = LDAP_OPERATIONS_ERROR;
-+        goto done;
-+    }
-+
-+done:
-+    if (apply) {
-+        slapi_ch_free((void **)&(BDB_CONFIG(li)->bdb_compactdb_time));
-+        if (retval) {
-+            /* Something went wrong, use the default */
-+            BDB_CONFIG(li)->bdb_compactdb_time = slapi_ch_strdup(default_time);
-+        } else {
-+            BDB_CONFIG(li)->bdb_compactdb_time = slapi_ch_strdup((char *)value);
-+        }
-+    }
-+    slapi_ch_free_string(&val);
-+
-+    return retval;
-+}
-+
- static void *
- bdb_config_db_page_size_get(void *arg)
- {
-@@ -1473,6 +1551,7 @@ static config_info bdb_config_param[] = {
-     {CONFIG_DB_TRANSACTION_WAIT, CONFIG_TYPE_ONOFF, "off", &bdb_config_db_transaction_wait_get, &bdb_config_db_transaction_wait_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
-     {CONFIG_DB_CHECKPOINT_INTERVAL, CONFIG_TYPE_INT, "60", &bdb_config_db_checkpoint_interval_get, &bdb_config_db_checkpoint_interval_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
-     {CONFIG_DB_COMPACTDB_INTERVAL, CONFIG_TYPE_INT, "2592000" /*30days*/, &bdb_config_db_compactdb_interval_get, &bdb_config_db_compactdb_interval_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
-+    {CONFIG_DB_COMPACTDB_TIME, CONFIG_TYPE_STRING, "23:59", &bdb_config_db_compactdb_time_get, &bdb_config_db_compactdb_time_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
-     {CONFIG_DB_TRANSACTION_BATCH, CONFIG_TYPE_INT, "0", &bdb_get_batch_transactions, &bdb_set_batch_transactions, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
-     {CONFIG_DB_TRANSACTION_BATCH_MIN_SLEEP, CONFIG_TYPE_INT, "50", &bdb_get_batch_txn_min_sleep, &bdb_set_batch_txn_min_sleep, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
-     {CONFIG_DB_TRANSACTION_BATCH_MAX_SLEEP, CONFIG_TYPE_INT, "50", &bdb_get_batch_txn_max_sleep, &bdb_set_batch_txn_max_sleep, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
-diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
-index 2f25f67a2..ec1976d38 100644
---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
-+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
-@@ -2126,6 +2126,7 @@ bdb_post_close(struct ldbminfo *li, int dbmode)
-          */
-         slapi_ch_free_string(&conf->bdb_dbhome_directory);
-         slapi_ch_free_string(&conf->bdb_home_directory);
-+        slapi_ch_free_string(&conf->bdb_compactdb_time);
-     }
- 
-     return return_value;
-@@ -3644,6 +3645,39 @@ log_flush_threadmain(void *param)
-     return 0;
- }
- 
-+/*
-+ * This refreshes the TOD expiration.  So live changes to the configuration
-+ * will take effect immediately.
-+ */
-+static time_t
-+bdb_get_tod_expiration(char *expire_time)
-+{
-+    time_t start_time, todays_elapsed_time, now = time(NULL);
-+    struct tm *tm_struct = localtime(&now);
-+    char hour_str[3] = {0};
-+    char min_str[3] = {0};
-+    char *s = expire_time;
-+    char *endp = NULL;
-+    int32_t hour, min, expiring_time;
-+
-+    /* Get today's start time */
-+    todays_elapsed_time = (tm_struct->tm_hour * 3600) + (tm_struct->tm_min * 60) + (tm_struct->tm_sec);
-+    start_time = slapi_current_utc_time() - todays_elapsed_time;
-+
-+    /* Get the hour and minute and calculate the expiring time.  The time was
-+     * already validated in bdb_config.c:  HH:MM */
-+    hour_str[0] = *s++;
-+    hour_str[1] = *s++;
-+    s++;  /* skip colon */
-+    min_str[0] = *s++;
-+    min_str[1] = *s++;
-+    hour = strtoll(hour_str, &endp, 10);
-+    min = strtoll(min_str, &endp, 10);
-+    expiring_time = (hour * 60 * 60) + (min * 60);
-+
-+    return start_time + expiring_time;
-+}
-+
- /*
-  * create a thread for checkpoint_threadmain
-  */
-@@ -3685,7 +3719,9 @@ checkpoint_threadmain(void *param)
-     time_t checkpoint_interval_update = 0;
-     time_t compactdb_interval = 0;
-     time_t checkpoint_interval = 0;
--    back_txn txn;
-+    int32_t compactdb_time = 0;
-+    PRBool compacting = PR_FALSE;
-+
- 
-     PR_ASSERT(NULL != param);
-     li = (struct ldbminfo *)param;
-@@ -3724,22 +3760,35 @@ checkpoint_threadmain(void *param)
-     slapi_timespec_expire_at(checkpoint_interval, &checkpoint_expire);
- 
-     while (!BDB_CONFIG(li)->bdb_stop_threads) {
--        /* sleep for a while */
--        /* why aren't we sleeping exactly the right amount of time ? */
--        /* answer---because the interval might be changed after the server
--         * starts up */
-+        PR_Lock(li->li_config_mutex);
-+        checkpoint_interval_update = (time_t)BDB_CONFIG(li)->bdb_checkpoint_interval;
-+        compactdb_interval_update = (time_t)BDB_CONFIG(li)->bdb_compactdb_interval;
-+        if (!compacting) {
-+            /* Once we know we want to compact we need to stop refreshing the
-+             * TOD expiration. Otherwise if the compact time is close to
-+             * midnight we could roll over past midnight during the checkpoint
-+             * sleep interval, and we'd never actually compact the databases.
-+             * We also need to get this value before the sleep.
-+             */
-+            compactdb_time = bdb_get_tod_expiration((char *)BDB_CONFIG(li)->bdb_compactdb_time);
-+        }
-+        PR_Unlock(li->li_config_mutex);
-+
-+        if (compactdb_interval_update != compactdb_interval) {
-+            /* Compact interval was changed, so reset the timer */
-+            slapi_timespec_expire_at(compactdb_interval_update, &compactdb_expire);
-+        }
- 
-+        /* Sleep for a while ...
-+         * Why aren't we sleeping exactly the right amount of time ?
-+         * Answer---because the interval might be changed after the server
-+         * starts up */
-         DS_Sleep(interval);
- 
-         if (0 == BDB_CONFIG(li)->bdb_enable_transactions) {
-             continue;
-         }
- 
--        PR_Lock(li->li_config_mutex);
--        checkpoint_interval_update = (time_t)BDB_CONFIG(li)->bdb_checkpoint_interval;
--        compactdb_interval_update = (time_t)BDB_CONFIG(li)->bdb_compactdb_interval;
--        PR_Unlock(li->li_config_mutex);
--
-         /* If the checkpoint has been updated OR we have expired */
-         if (checkpoint_interval != checkpoint_interval_update ||
-             slapi_timespec_expire_check(&checkpoint_expire) == TIMER_EXPIRED) {
-@@ -3807,94 +3856,37 @@ checkpoint_threadmain(void *param)
- 
-         /*
-          * Remember that if compactdb_interval is 0, timer_expired can
--         * never occur unless the value in compctdb_interval changes.
-+         * never occur unless the value in compactdb_interval changes.
-          *
--         * this could have been a bug infact, where compactdb_interval
-+         * this could have been a bug in fact, where compactdb_interval
-          * was 0, if you change while running it would never take effect ....
-          */
--        if (compactdb_interval_update != compactdb_interval ||
--            slapi_timespec_expire_check(&compactdb_expire) == TIMER_EXPIRED) {
--            int rc = 0;
--            Object *inst_obj;
--            ldbm_instance *inst;
--            DB *db = NULL;
--            DB_COMPACT c_data = {0};
--
--            for (inst_obj = objset_first_obj(li->li_instance_set);
--                 inst_obj;
--                 inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
--                inst = (ldbm_instance *)object_get_data(inst_obj);
--                rc = dblayer_get_id2entry(inst->inst_be, &db);
--                if (!db || rc) {
--                    continue;
--                }
--                slapi_log_err(SLAPI_LOG_NOTICE, "checkpoint_threadmain", "Compacting DB start: %s\n",
--                              inst->inst_name);
--
--                /*
--                 * It's possible for this to heap us after free because when we access db
--                 * *just* as the server shut's down, we don't know it. So we should probably
--                 * do something like wrapping access to the db var in a rwlock, and have "read"
--                 * to access, and take writes to change the state. This would prevent the issue.
--                 */
--                DBTYPE type;
--                rc = db->get_type(db, &type);
--                if (rc) {
--                    slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain",
--                                  "compactdb: failed to determine db type for %s: db error - %d %s\n",
--                                  inst->inst_name, rc, db_strerror(rc));
--                    continue;
--                }
-+        if (slapi_timespec_expire_check(&compactdb_expire) == TIMER_EXPIRED) {
-+            compacting = PR_TRUE;
-+            if (slapi_current_utc_time() < compactdb_time) {
-+                /* We have passed the interval, but we need to wait for a
-+                 * particular TOD to pass before compacting */
-+                continue;
-+            }
- 
--                rc = dblayer_txn_begin(inst->inst_be, NULL, &txn);
--                if (rc) {
--                    slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain", "compactdb: transaction begin failed: %d\n", rc);
--                    break;
--                }
--                /*
--                 * https://docs.oracle.com/cd/E17275_01/html/api_reference/C/BDB-C_APIReference.pdf
--                 * "DB_FREELIST_ONLY
--                 * Do no page compaction, only returning pages to the filesystem that are already free and at the end
--                 * of the file. This flag must be set if the database is a Hash access method database."
--                 *
--                 */
-+            /* Time to compact the DB's */
-+            dblayer_force_checkpoint(li);
-+            bdb_compact(li);
-+            dblayer_force_checkpoint(li);
- 
--                uint32_t compact_flags = DB_FREE_SPACE;
--                if (type == DB_HASH) {
--                    compact_flags |= DB_FREELIST_ONLY;
--                }
--                rc = db->compact(db, txn.back_txn_txn, NULL /*start*/, NULL /*stop*/,
--                                 &c_data, compact_flags, NULL /*end*/);
--                if (rc) {
--                    slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain",
--                                  "compactdb: failed to compact %s; db error - %d %s\n",
--                                  inst->inst_name, rc, db_strerror(rc));
--                    if ((rc = dblayer_txn_abort(inst->inst_be, &txn))) {
--                        slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain", "compactdb: failed to abort txn (%s) db error - %d %s\n",
--                                      inst->inst_name, rc, db_strerror(rc));
--                        break;
--                    }
--                } else {
--                    slapi_log_err(SLAPI_LOG_NOTICE, "checkpoint_threadmain",
--                                  "compactdb: compact %s - %d pages freed\n",
--                                  inst->inst_name, c_data.compact_pages_free);
--                    if ((rc = dblayer_txn_commit(inst->inst_be, &txn))) {
--                        slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain", "compactdb: failed to commit txn (%s) db error - %d %s\n",
--                                      inst->inst_name, rc, db_strerror(rc));
--                        break;
--                    }
--                }
--            }
-+            /* Now reset the timer and compacting flag */
-             compactdb_interval = compactdb_interval_update;
-             slapi_timespec_expire_at(compactdb_interval, &compactdb_expire);
-+            compacting = PR_FALSE;
-         }
-     }
--    slapi_log_err(SLAPI_LOG_TRACE, "checkpoint_threadmain", "Check point before leaving\n");
-+    slapi_log_err(SLAPI_LOG_HOUSE, "checkpoint_threadmain", "Check point before leaving\n");
-     rval = dblayer_force_checkpoint(li);
-+
- error_return:
- 
-     DECR_THREAD_COUNT(pEnv);
--    slapi_log_err(SLAPI_LOG_TRACE, "checkpoint_threadmain", "Leaving checkpoint_threadmain\n");
-+    slapi_log_err(SLAPI_LOG_HOUSE, "checkpoint_threadmain", "Leaving checkpoint_threadmain\n");
-     return rval;
- }
- 
-@@ -6209,3 +6201,99 @@ bdb_back_ctrl(Slapi_Backend *be, int cmd, void *info)
- 
-     return rc;
- }
-+
-+int32_t
-+ldbm_back_compact(Slapi_Backend *be)
-+{
-+    struct ldbminfo *li = NULL;
-+    int32_t rc = -1;
-+
-+    li = (struct ldbminfo *)be->be_database->plg_private;
-+    dblayer_force_checkpoint(li);
-+    rc = bdb_compact(li);
-+    dblayer_force_checkpoint(li);
-+    return rc;
-+}
-+
-+
-+int32_t
-+bdb_compact(struct ldbminfo *li)
-+{
-+    Object *inst_obj;
-+    ldbm_instance *inst;
-+    DB *db = NULL;
-+    back_txn txn = {0};
-+    int rc = 0;
-+    DB_COMPACT c_data = {0};
-+
-+    slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact",
-+                  "Compacting databases ...\n");
-+    for (inst_obj = objset_first_obj(li->li_instance_set);
-+        inst_obj;
-+        inst_obj = objset_next_obj(li->li_instance_set, inst_obj))
-+    {
-+        inst = (ldbm_instance *)object_get_data(inst_obj);
-+        rc = dblayer_get_id2entry(inst->inst_be, &db);
-+        if (!db || rc) {
-+            continue;
-+        }
-+        slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", "Compacting DB start: %s\n",
-+                      inst->inst_name);
-+
-+        /*
-+         * It's possible for this to heap us after free because when we access db
-+         * *just* as the server shut's down, we don't know it. So we should probably
-+         * do something like wrapping access to the db var in a rwlock, and have "read"
-+         * to access, and take writes to change the state. This would prevent the issue.
-+         */
-+        DBTYPE type;
-+        rc = db->get_type(db, &type);
-+        if (rc) {
-+            slapi_log_err(SLAPI_LOG_ERR, "bdb_compact",
-+                          "compactdb: failed to determine db type for %s: db error - %d %s\n",
-+                          inst->inst_name, rc, db_strerror(rc));
-+            continue;
-+        }
-+
-+        rc = dblayer_txn_begin(inst->inst_be, NULL, &txn);
-+        if (rc) {
-+            slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", "compactdb: transaction begin failed: %d\n", rc);
-+            break;
-+        }
-+        /*
-+         * https://docs.oracle.com/cd/E17275_01/html/api_reference/C/BDB-C_APIReference.pdf
-+         * "DB_FREELIST_ONLY
-+         * Do no page compaction, only returning pages to the filesystem that are already free and at the end
-+         * of the file. This flag must be set if the database is a Hash access method database."
-+         *
-+         */
-+        uint32_t compact_flags = DB_FREE_SPACE;
-+        if (type == DB_HASH) {
-+            compact_flags |= DB_FREELIST_ONLY;
-+        }
-+        rc = db->compact(db, txn.back_txn_txn, NULL /*start*/, NULL /*stop*/,
-+                         &c_data, compact_flags, NULL /*end*/);
-+        if (rc) {
-+            slapi_log_err(SLAPI_LOG_ERR, "bdb_compact",
-+                    "compactdb: failed to compact %s; db error - %d %s\n",
-+                    inst->inst_name, rc, db_strerror(rc));
-+            if ((rc = dblayer_txn_abort(inst->inst_be, &txn))) {
-+                slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", "compactdb: failed to abort txn (%s) db error - %d %s\n",
-+                              inst->inst_name, rc, db_strerror(rc));
-+                break;
-+            }
-+        } else {
-+            slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact",
-+                          "compactdb: compact %s - %d pages freed\n",
-+                          inst->inst_name, c_data.compact_pages_free);
-+            if ((rc = dblayer_txn_commit(inst->inst_be, &txn))) {
-+                slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", "compactdb: failed to commit txn (%s) db error - %d %s\n",
-+                              inst->inst_name, rc, db_strerror(rc));
-+                break;
-+            }
-+        }
-+    }
-+    slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", "Compacting databases finished.\n");
-+
-+    return rc;
-+}
-diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h
-index 6bb04d21a..e3a49dbac 100644
---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h
-+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h
-@@ -79,7 +79,8 @@ typedef struct bdb_config
-     int bdb_previous_lock_config;  /* Max lock count when we last shut down--
-                                       * used to determine if we delete the mpool */
-     u_int32_t bdb_deadlock_policy; /* i.e. the atype to DB_ENV->lock_detect in deadlock_threadmain */
--    int bdb_compactdb_interval;    /* interval to execute compact id2entry dbs */
-+    int32_t bdb_compactdb_interval; /* interval to execute compact id2entry dbs */
-+    char *bdb_compactdb_time;       /* time of day to execute compact id2entry dbs */
- } bdb_config;
- 
- int bdb_init(struct ldbminfo *li, config_info *config_array);
-@@ -96,6 +97,7 @@ int bdb_db_size(Slapi_PBlock *pb);
- int bdb_upgradedb(Slapi_PBlock *pb);
- int bdb_upgradednformat(Slapi_PBlock *pb);
- int bdb_upgradeddformat(Slapi_PBlock *pb);
-+int32_t bdb_compact(struct ldbminfo *li);
- int bdb_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task);
- int bdb_cleanup(struct ldbminfo *li);
- int bdb_txn_begin(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool use_lock);
-diff --git a/ldap/servers/slapd/back-ldbm/init.c b/ldap/servers/slapd/back-ldbm/init.c
-index 4165c8fad..42c9bd00a 100644
---- a/ldap/servers/slapd/back-ldbm/init.c
-+++ b/ldap/servers/slapd/back-ldbm/init.c
-@@ -180,6 +180,8 @@ ldbm_back_init(Slapi_PBlock *pb)
-                            (void *)ldbm_back_set_info);
-     rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_DB_CTRL_INFO_FN,
-                            (void *)ldbm_back_ctrl_info);
-+    rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_DB_COMPACT_FN,
-+                           (void *)ldbm_back_compact);
- 
-     if (rc != 0) {
-         slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_init", "Failed %d\n", rc);
-diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.h b/ldap/servers/slapd/back-ldbm/ldbm_config.h
-index 6fa8292eb..48446193e 100644
---- a/ldap/servers/slapd/back-ldbm/ldbm_config.h
-+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.h
-@@ -84,6 +84,7 @@ struct config_info
- #define CONFIG_DB_TRANSACTION_WAIT "nsslapd-db-transaction-wait"
- #define CONFIG_DB_CHECKPOINT_INTERVAL "nsslapd-db-checkpoint-interval"
- #define CONFIG_DB_COMPACTDB_INTERVAL "nsslapd-db-compactdb-interval"
-+#define CONFIG_DB_COMPACTDB_TIME "nsslapd-db-compactdb-time"
- #define CONFIG_DB_TRANSACTION_BATCH "nsslapd-db-transaction-batch-val"
- #define CONFIG_DB_TRANSACTION_BATCH_MIN_SLEEP "nsslapd-db-transaction-batch-min-wait"
- #define CONFIG_DB_TRANSACTION_BATCH_MAX_SLEEP "nsslapd-db-transaction-batch-max-wait"
-diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
-index 5d618a89c..30c9003bf 100644
---- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
-+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
-@@ -478,6 +478,7 @@ void ldbm_back_search_results_release(void **search_results);
- int ldbm_back_init(Slapi_PBlock *pb);
- void ldbm_back_prev_search_results(Slapi_PBlock *pb);
- int ldbm_back_isinitialized(void);
-+int32_t ldbm_back_compact(Slapi_Backend *be);
- 
- /*
-  * vlv.c
-diff --git a/ldap/servers/slapd/filtercmp.c b/ldap/servers/slapd/filtercmp.c
-index f7e3ed4d5..c886267bd 100644
---- a/ldap/servers/slapd/filtercmp.c
-+++ b/ldap/servers/slapd/filtercmp.c
-@@ -344,7 +344,6 @@ slapi_filter_compare(struct slapi_filter *f1, struct slapi_filter *f2)
-     struct berval *inval1[2], *inval2[2], **outval1, **outval2;
-     int ret;
-     Slapi_Attr sattr;
--    int cmplen;
- 
-     slapi_log_err(SLAPI_LOG_TRACE, "slapi_filter_compare", "=>\n");
- 
-@@ -379,11 +378,11 @@ slapi_filter_compare(struct slapi_filter *f1, struct slapi_filter *f2)
-         if (key1 && key2) {
-             struct berval bvkey1 = {
-                 slapi_value_get_length(key1[0]),
--                slapi_value_get_string(key1[0])
-+				(char *)slapi_value_get_string(key1[0])
-             };
-             struct berval bvkey2 = {
-                 slapi_value_get_length(key2[0]),
--                slapi_value_get_string(key2[0])
-+				(char *)slapi_value_get_string(key2[0])
-             };
-             ret = slapi_berval_cmp(&bvkey1, &bvkey2);
-         }
-diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
-index f7d1f8885..fcac53839 100644
---- a/ldap/servers/slapd/pblock.c
-+++ b/ldap/servers/slapd/pblock.c
-@@ -925,6 +925,12 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value)
-         }
-         (*(IFP *)value) = pblock->pb_plugin->plg_db2ldif;
-         break;
-+    case SLAPI_PLUGIN_DB_COMPACT_FN:
-+        if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
-+            return (-1);
-+        }
-+        (*(IFP *)value) = pblock->pb_plugin->plg_dbcompact;
-+        break;
-     case SLAPI_PLUGIN_DB_DB2INDEX_FN:
-         if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
-             return (-1);
-@@ -2925,7 +2931,12 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
-         }
-         pblock->pb_backend->be_noacl = *((int *)value);
-         break;
--
-+    case SLAPI_PLUGIN_DB_COMPACT_FN:
-+        if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
-+            return (-1);
-+        }
-+        pblock->pb_plugin->plg_dbcompact = (IFP)value;
-+        break;
- 
-     /* extendedop plugin functions */
-     case SLAPI_PLUGIN_EXT_OP_FN:
-@@ -4137,8 +4148,8 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
-         break;
- 
-     case SLAPI_URP_TOMBSTONE_CONFLICT_DN:
--	pblock->pb_intop->pb_urp_tombstone_conflict_dn = (char *)value;
--	break;
-+        pblock->pb_intop->pb_urp_tombstone_conflict_dn = (char *)value;
-+        break;
- 
-     case SLAPI_URP_TOMBSTONE_UNIQUEID:
-         _pblock_assert_pb_intop(pblock);
-diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
-index 3126a65f3..c48516157 100644
---- a/ldap/servers/slapd/slap.h
-+++ b/ldap/servers/slapd/slap.h
-@@ -1041,6 +1041,7 @@ struct slapdplugin
-             IFP plg_un_db_ldif2db;              /* ldif 2 database */
-             IFP plg_un_db_db2ldif;              /* database 2 ldif */
-             IFP plg_un_db_db2index;             /* database 2 index */
-+            IFP plg_un_db_dbcompact;            /* compact database */
-             IFP plg_un_db_archive2db;           /* ldif 2 database */
-             IFP plg_un_db_db2archive;           /* database 2 ldif */
-             IFP plg_un_db_upgradedb;            /* convert old idl to new */
-@@ -1082,6 +1083,7 @@ struct slapdplugin
- #define plg_result plg_un.plg_un_db.plg_un_db_result
- #define plg_ldif2db plg_un.plg_un_db.plg_un_db_ldif2db
- #define plg_db2ldif plg_un.plg_un_db.plg_un_db_db2ldif
-+#define plg_dbcompact plg_un.plg_un_db.plg_un_db_dbcompact
- #define plg_db2index plg_un.plg_un_db.plg_un_db_db2index
- #define plg_archive2db plg_un.plg_un_db.plg_un_db_archive2db
- #define plg_db2archive plg_un.plg_un_db.plg_un_db_db2archive
-diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
-index b956ebe63..570765e47 100644
---- a/ldap/servers/slapd/slapi-private.h
-+++ b/ldap/servers/slapd/slapi-private.h
-@@ -928,6 +928,7 @@ int proxyauth_get_dn(Slapi_PBlock *pb, char **proxydnp, char **errtextp);
- #define SLAPI_PLUGIN_DB_GET_INFO_FN               290
- #define SLAPI_PLUGIN_DB_SET_INFO_FN               291
- #define SLAPI_PLUGIN_DB_CTRL_INFO_FN              292
-+#define SLAPI_PLUGIN_DB_COMPACT_FN                294
- 
- /**** End of database plugin interface. **************************************/
- 
-diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c
-index 93d31b806..4c7262ab3 100644
---- a/ldap/servers/slapd/task.c
-+++ b/ldap/servers/slapd/task.c
-@@ -1,6 +1,6 @@
- /** BEGIN COPYRIGHT BLOCK
-  * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
-- * Copyright (C) 2005 Red Hat, Inc.
-+ * Copyright (C) 2021 Red Hat, Inc.
-  * All rights reserved.
-  *
-  * License: GPL (version 3 or any later version).
-@@ -2928,6 +2928,105 @@ des2aes_task_destructor(Slapi_Task *task)
-                   "des2aes_task_destructor <--\n");
- }
- 
-+struct task_compact_data
-+{
-+    char *suffix;
-+    Slapi_Task *task;
-+};
-+
-+static void
-+compact_db_task_destructor(Slapi_Task *task)
-+{
-+    slapi_log_err(SLAPI_LOG_PLUGIN, "compact db task",
-+                  "compact_db_task_destructor -->\n");
-+    if (task) {
-+        struct task_compact_data *mydata = (struct task_compact_data *)slapi_task_get_data(task);
-+        while (slapi_task_get_refcount(task) > 0) {
-+            /* Yield to wait for the task to finish */
-+            DS_Sleep(PR_MillisecondsToInterval(100));
-+        }
-+        if (mydata) {
-+            slapi_ch_free((void **)&mydata);
-+        }
-+    }
-+    slapi_log_err(SLAPI_LOG_PLUGIN, "compact db task",
-+                  "compact_db_task_destructor <--\n");
-+}
-+
-+static void
-+task_compact_thread(void *arg)
-+{
-+    struct task_compact_data *task_data = arg;
-+    Slapi_Task *task = task_data->task;
-+    Slapi_Backend *be = NULL;
-+    char *cookie = NULL;
-+    int32_t rc = -1;
-+
-+    slapi_task_inc_refcount(task);
-+    slapi_task_begin(task, 1);
-+
-+    be = slapi_get_first_backend(&cookie);
-+    while (be) {
-+        if (be->be_private == 0) {
-+            /* Found a non-private backend, start compacting */
-+            rc = (be->be_database->plg_dbcompact)(be);
-+            break;
-+        }
-+        be = (backend *)slapi_get_next_backend(cookie);
-+    }
-+    slapi_ch_free_string(&cookie);
-+
-+    slapi_task_finish(task, rc);
-+    slapi_task_dec_refcount(task);
-+}
-+
-+/*
-+ * compact the BDB database
-+ *
-+ *  dn: cn=compact_it,cn=compact db,cn=tasks,cn=config
-+ *  objectclass: top
-+ *  objectclass: extensibleObject
-+ *  cn: compact_it
-+ */
-+static int
-+task_compact_db_add(Slapi_PBlock *pb,
-+                    Slapi_Entry *e,
-+                    Slapi_Entry *eAfter __attribute__((unused)),
-+                    int *returncode,
-+                    char *returntext,
-+                    void *arg __attribute__((unused)))
-+{
-+    Slapi_Task *task = slapi_new_task(slapi_entry_get_ndn(e));
-+    struct task_compact_data *task_data = NULL;
-+    PRThread *thread = NULL;
-+
-+    slapi_task_log_notice(task, "Beginning database compaction task...\n");
-+
-+    /* Register our destructor for cleaning up our private data */
-+    slapi_task_set_destructor_fn(task, compact_db_task_destructor);
-+
-+    task_data = (struct task_compact_data *)slapi_ch_calloc(1, sizeof(struct task_compact_data));
-+    task_data->task = task;
-+    slapi_task_set_data(task, task_data);
-+
-+    /* Start the compaction as a separate thread */
-+    thread = PR_CreateThread(PR_USER_THREAD, task_compact_thread,
-+             (void *)task_data, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
-+             PR_UNJOINABLE_THREAD, SLAPD_DEFAULT_THREAD_STACKSIZE);
-+    if (thread == NULL) {
-+        slapi_log_err(SLAPI_LOG_ERR, "task_compact_db_add", "Unable to create db compact thread!\n");
-+        *returncode = LDAP_OPERATIONS_ERROR;
-+        slapi_ch_free((void **)&task_data);
-+    }
-+
-+    if (*returncode != LDAP_SUCCESS) {
-+        slapi_task_finish(task, *returncode);
-+        return SLAPI_DSE_CALLBACK_ERROR;
-+    }
-+
-+    return SLAPI_DSE_CALLBACK_OK;
-+}
-+
- /* cleanup old tasks that may still be in the DSE from a previous session
-  * (this can happen if the server crashes [no matter how unlikely we like
-  * to think that is].)
-@@ -3010,6 +3109,7 @@ task_init(void)
-     slapi_task_register_handler("sysconfig reload", task_sysconfig_reload_add);
-     slapi_task_register_handler("fixup tombstones", task_fixup_tombstones_add);
-     slapi_task_register_handler("des2aes", task_des2aes);
-+    slapi_task_register_handler("compact db", task_compact_db_add);
- }
- 
- /* called when the server is shutting down -- abort all existing tasks */
-diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx
-index 11cae972c..b73dc8460 100644
---- a/src/cockpit/389-console/src/database.jsx
-+++ b/src/cockpit/389-console/src/database.jsx
-@@ -196,6 +196,7 @@ export class Database extends React.Component {
-                                     dblocksMonitoringPause: attrs['nsslapd-db-locks-monitoring-pause'],
-                                     chxpoint: attrs['nsslapd-db-checkpoint-interval'],
-                                     compactinterval: attrs['nsslapd-db-compactdb-interval'],
-+                                    compacttime: attrs['nsslapd-db-compactdb-time'],
-                                     importcacheauto: attrs['nsslapd-import-cache-autosize'],
-                                     importcachesize: attrs['nsslapd-import-cachesize'],
-                                 },
-diff --git a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
-index 6a71c138d..1fa9f2cc2 100644
---- a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
-+++ b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
-@@ -36,6 +36,7 @@ export class GlobalDatabaseConfig extends React.Component {
-             dblocksMonitoringPause: this.props.data.dblocksMonitoringPause,
-             chxpoint: this.props.data.chxpoint,
-             compactinterval: this.props.data.compactinterval,
-+            compacttime: this.props.data.compacttime,
-             importcachesize: this.props.data.importcachesize,
-             importcacheauto: this.props.data.importcacheauto,
-             // These variables store the original value (used for saving config)
-@@ -55,6 +56,7 @@ export class GlobalDatabaseConfig extends React.Component {
-             _dblocksMonitoringPause: this.props.data.dblocksMonitoringPause,
-             _chxpoint: this.props.data.chxpoint,
-             _compactinterval: this.props.data.compactinterval,
-+            _compacttime: this.props.data.compacttime,
-             _importcachesize: this.props.data.importcachesize,
-             _importcacheauto: this.props.data.importcacheauto,
-             _db_cache_auto: this.props.data.db_cache_auto,
-@@ -186,6 +188,10 @@ export class GlobalDatabaseConfig extends React.Component {
-             cmd.push("--compactdb-interval=" + this.state.compactinterval);
-             requireRestart = true;
-         }
-+        if (this.state._compacttime != this.state.compacttime) {
-+            cmd.push("--compactdb-time=" + this.state.compacttime);
-+            requireRestart = true;
-+        }
-         if (this.state.import_cache_auto) {
-             // Auto cache is selected
-             if (this.state._import_cache_auto != this.state.import_cache_auto) {
-@@ -485,7 +491,15 @@ export class GlobalDatabaseConfig extends React.Component {
-                                             Database Compact Interval
-                                         </Col>
-                                         <Col sm={8}>
--                                            <input id="compactinterval" value={this.state.compactinterval} onChange={this.handleChange} className="ds-input-auto" type="text" />
-+                                            <input id="compactinterval" value={this.state.compactinterval} onChange={this.handleChange} className="ds-input-auto" type="number" />
-+                                        </Col>
-+                                    </Row>
-+                                    <Row className="ds-margin-top" title="The Time Of Day to perform the database compaction after the compact interval has been met.  Uses the format: 'HH:MM' and defaults to '23:59'. (nsslapd-db-compactdb-time)">
-+                                        <Col componentClass={ControlLabel} sm={4}>
-+                                            Database Compact Time
-+                                        </Col>
-+                                        <Col sm={8}>
-+                                            <input id="compacttime" value={this.state.compacttime} onChange={this.handleChange} className="ds-input-auto" type="number" />
-                                         </Col>
-                                     </Row>
-                                     <Row className="ds-margin-top" title="The number of database locks (nsslapd-db-locks).">
-diff --git a/src/lib389/lib389/_constants.py b/src/lib389/lib389/_constants.py
-index c184c8d4f..d6161cebb 100644
---- a/src/lib389/lib389/_constants.py
-+++ b/src/lib389/lib389/_constants.py
-@@ -154,6 +154,7 @@ DN_EUUID_TASK = "cn=entryuuid task,%s" % DN_TASKS
- DN_TOMB_FIXUP_TASK = "cn=fixup tombstones,%s" % DN_TASKS
- DN_FIXUP_LINKED_ATTIBUTES = "cn=fixup linked attributes,%s" % DN_TASKS
- DN_AUTOMEMBER_REBUILD_TASK = "cn=automember rebuild membership,%s" % DN_TASKS
-+DN_COMPACTDB_TASK = "cn=compact db,%s" % DN_TASKS
- 
- # Script Constants
- LDIF2DB = 'ldif2db'
-diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
-index 13bb27842..ad78a6ffe 100644
---- a/src/lib389/lib389/backend.py
-+++ b/src/lib389/lib389/backend.py
-@@ -1005,6 +1005,7 @@ class DatabaseConfig(DSLdapObject):
-                     'nsslapd-db-transaction-wait',
-                     'nsslapd-db-checkpoint-interval',
-                     'nsslapd-db-compactdb-interval',
-+                    'nsslapd-db-compactdb-time',
-                     'nsslapd-db-page-size',
-                     'nsslapd-db-transaction-batch-val',
-                     'nsslapd-db-transaction-batch-min-wait',
-diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py
-index 722764d10..7b2f32c23 100644
---- a/src/lib389/lib389/cli_conf/backend.py
-+++ b/src/lib389/lib389/cli_conf/backend.py
-@@ -1,5 +1,5 @@
- # --- BEGIN COPYRIGHT BLOCK ---
--# Copyright (C) 2020 Red Hat, Inc.
-+# Copyright (C) 2021 Red Hat, Inc.
- # Copyright (C) 2019 William Brown <william@blackhats.net.au>
- # All rights reserved.
- #
-@@ -19,6 +19,7 @@ from lib389.chaining import (ChainingLinks)
- from lib389.monitor import MonitorLDBM
- from lib389.replica import Replicas
- from lib389.utils import ensure_str, is_a_dn, is_dn_parent
-+from lib389.tasks import DBCompactTask
- from lib389._constants import *
- from lib389.cli_base import (
-     _format_status,
-@@ -41,6 +42,7 @@ arg_to_attr = {
-         'txn_wait': 'nsslapd-db-transaction-wait',
-         'checkpoint_interval': 'nsslapd-db-checkpoint-interval',
-         'compactdb_interval': 'nsslapd-db-compactdb-interval',
-+        'compactdb_time': 'nsslapd-db-compactdb-time',
-         'txn_batch_val': 'nsslapd-db-transaction-batch-val',
-         'txn_batch_min': 'nsslapd-db-transaction-batch-min-wait',
-         'txn_batch_max': 'nsslapd-db-transaction-batch-max-wait',
-@@ -789,6 +791,18 @@ def backend_reindex_vlv(inst, basedn, log, args):
-     log.info("Successfully reindexed VLV indexes")
- 
- 
-+def backend_compact(inst, basedn, log, args):
-+    task = DBCompactTask(inst)
-+    task_properties = {}
-+    if args.only_changelog:
-+        task_properties = {'justChangelog': 'yes'}
-+    task.create(properties=task_properties)
-+    task.wait()
-+    if task.get_exit_code() != 0:
-+        raise ValueError("Failed to create Database Compaction Task")
-+    log.info("Successfully started Database Compaction Task")
-+
-+
- def create_parser(subparsers):
-     backend_parser = subparsers.add_parser('backend', help="Manage database suffixes and backends")
-     subcommands = backend_parser.add_subparsers(help="action")
-@@ -994,6 +1008,7 @@ def create_parser(subparsers):
-     set_db_config_parser.add_argument('--checkpoint-interval', help='Sets the amount of time in seconds after which the Directory Server sends a '
-                                                                     'checkpoint entry to the database transaction log')
-     set_db_config_parser.add_argument('--compactdb-interval', help='Sets the interval in seconds when the database is compacted')
-+    set_db_config_parser.add_argument('--compactdb-time', help='Sets the Time Of Day to compact the database after the "compactdb interval" has been reached:  Use this format to set the hour and minute: HH:MM')
-     set_db_config_parser.add_argument('--txn-batch-val', help='Specifies how many transactions will be batched before being committed')
-     set_db_config_parser.add_argument('--txn-batch-min', help='Controls when transactions should be flushed earliest, independently of '
-                                                               'the batch count (only works when txn-batch-val is set)')
-@@ -1121,3 +1136,10 @@ def create_parser(subparsers):
-     #######################################################
-     get_tree_parser = subcommands.add_parser('get-tree', help='Get a representation of the suffix tree')
-     get_tree_parser.set_defaults(func=backend_get_tree)
-+
-+    #######################################################
-+    # Run the db compaction task
-+    #######################################################
-+    compact_parser = subcommands.add_parser('compact-db', help='Compact the database and the replication changelog')
-+    compact_parser.set_defaults(func=backend_compact)
-+    compact_parser.add_argument('--only-changelog', action='store_true', help='Only compact the Replication Change Log')
-diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
-index 04886f632..3478a0a1f 100644
---- a/src/lib389/lib389/cli_conf/replication.py
-+++ b/src/lib389/lib389/cli_conf/replication.py
-@@ -37,6 +37,7 @@ arg_to_attr = {
-         'max_entries': 'nsslapd-changelogmaxentries',
-         'max_age': 'nsslapd-changelogmaxage',
-         'compact_interval': 'nsslapd-changelogcompactdb-interval',
-+        'compact_time': 'nsslapd-changelogcompactdb-time',
-         'trim_interval': 'nsslapd-changelogtrim-interval',
-         'encrypt_algo': 'nsslapd-encryptionalgorithm',
-         'encrypt_key': 'nssymmetrickey',
-@@ -1216,6 +1217,8 @@ def create_parser(subparsers):
-     repl_set_cl.add_argument('--max-entries', help="The maximum number of entries to get in the replication changelog")
-     repl_set_cl.add_argument('--max-age', help="The maximum age of a replication changelog entry")
-     repl_set_cl.add_argument('--compact-interval', help="The replication changelog compaction interval")
-+    repl_set_cl.add_argument('--compact-time', help='Sets the Time Of Day to compact the database after the changelog "compact interval" '
-+                                                    'has been reached:  Use this format to set the hour and minute: HH:MM')
-     repl_set_cl.add_argument('--trim-interval', help="The interval to check if the replication changelog can be trimmed")
- 
-     repl_get_cl = repl_subcommands.add_parser('get-changelog', help='Display replication changelog attributes.')
-diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
-index 590c6ee79..b64bc6ce5 100644
---- a/src/lib389/lib389/tasks.py
-+++ b/src/lib389/lib389/tasks.py
-@@ -217,6 +217,19 @@ class EntryUUIDFixupTask(Task):
-         self._must_attributes.extend(['basedn'])
- 
- 
-+class DBCompactTask(Task):
-+    """A single instance of compactdb task entry
-+
-+    :param instance: An instance
-+    :type instance: lib389.DirSrv
-+    """
-+
-+    def __init__(self, instance, dn=None):
-+        self.cn = 'compact_db_' + Task._get_task_date()
-+        dn = "cn=" + self.cn + "," + DN_COMPACTDB_TASK
-+        super(DBCompactTask, self).__init__(instance, dn)
-+
-+
- class SchemaReloadTask(Task):
-     """A single instance of schema reload task entry
- 
-@@ -227,7 +240,6 @@ class SchemaReloadTask(Task):
-     def __init__(self, instance, dn=None):
-         self.cn = 'schema_reload_' + Task._get_task_date()
-         dn = "cn=" + self.cn + ",cn=schema reload task," + DN_TASKS
--
-         super(SchemaReloadTask, self).__init__(instance, dn)
- 
- 
--- 
-2.26.3
-
diff --git a/SOURCES/0011-Issue-4978-use-more-portable-python-command-for-chec.patch b/SOURCES/0011-Issue-4978-use-more-portable-python-command-for-chec.patch
new file mode 100644
index 0000000..1414b9d
--- /dev/null
+++ b/SOURCES/0011-Issue-4978-use-more-portable-python-command-for-chec.patch
@@ -0,0 +1,32 @@
+From 60d570e52465b58167301f64792f5f85cbc85e20 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Wed, 10 Nov 2021 08:53:45 -0500
+Subject: [PATCH 11/12] Issue 4978 - use more portable python command for
+ checking containers
+
+Description:  During the installation check for containers use arguments
+              for subprocess.run() that work on all versions of python
+
+relates: https://github.com/389ds/389-ds-base/issues/4978
+
+Reviewed by: mreynolds(one line commit rule)
+---
+ src/lib389/lib389/instance/setup.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
+index 7b0147cf9..b23d2deb8 100644
+--- a/src/lib389/lib389/instance/setup.py
++++ b/src/lib389/lib389/instance/setup.py
+@@ -734,7 +734,7 @@ class SetupDs(object):
+         # Check if we are in a container, if so don't use /dev/shm for the db home dir
+         # as containers typically don't allocate enough space for dev/shm and we don't
+         # want to unexpectedly break the server after an upgrade
+-        container_result = subprocess.run(["systemd-detect-virt", "-c"], capture_output=True)
++        container_result = subprocess.run(["systemd-detect-virt", "-c"], stdout=subprocess.PIPE)
+         if container_result.returncode == 0:
+             # In a container, set the db_home_dir to the db path
+             self.log.debug("Container detected setting db home directory to db directory.")
+-- 
+2.31.1
+
diff --git a/SOURCES/0012-Issue-4778-RFE-Add-changelog-compaction-task-in-1.4..patch b/SOURCES/0012-Issue-4778-RFE-Add-changelog-compaction-task-in-1.4..patch
deleted file mode 100644
index 94618f6..0000000
--- a/SOURCES/0012-Issue-4778-RFE-Add-changelog-compaction-task-in-1.4..patch
+++ /dev/null
@@ -1,155 +0,0 @@
-From 580880a598a8f9972994684c49593a4cf8b8969b Mon Sep 17 00:00:00 2001
-From: Mark Reynolds <mreynolds@redhat.com>
-Date: Sat, 29 May 2021 13:19:53 -0400
-Subject: [PATCH 12/12] Issue 4778 - RFE - Add changelog compaction task in
- 1.4.3
-
-Description:  In 1.4.3 the replication changelog is a separate database,
-              so it needs a separate "nsds5task" compaction task (COMPACT_CL5)
-
-relates: https://github.com/389ds/389-ds-base/issues/4778
-
-ASAN tested and approved
-
-Reviewed by: mreynolds
----
- ldap/servers/plugins/replication/cl5_api.c    | 21 +++++++++----------
- ldap/servers/plugins/replication/cl5_api.h    |  1 +
- .../replication/repl5_replica_config.c        |  9 +++++++-
- 3 files changed, 19 insertions(+), 12 deletions(-)
-
-diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
-index 75a2f46f5..4c5077b48 100644
---- a/ldap/servers/plugins/replication/cl5_api.c
-+++ b/ldap/servers/plugins/replication/cl5_api.c
-@@ -266,7 +266,6 @@ static int _cl5TrimInit(void);
- static void _cl5TrimCleanup(void);
- static int _cl5TrimMain(void *param);
- static void _cl5DoTrimming(void);
--static void _cl5CompactDBs(void);
- static void _cl5PurgeRID(Object *file_obj, ReplicaId cleaned_rid);
- static int _cl5PurgeGetFirstEntry(Object *file_obj, CL5Entry *entry, void **iterator, DB_TXN *txnid, int rid, DBT *key);
- static int _cl5PurgeGetNextEntry(CL5Entry *entry, void *iterator, DBT *key);
-@@ -3152,7 +3151,7 @@ _cl5TrimMain(void *param __attribute__((unused)))
-             if (slapi_current_utc_time() > compactdb_time) {
- 				/* time to trim */
- 				timeCompactPrev = timeNow;
--				_cl5CompactDBs();
-+				cl5CompactDBs();
- 				compacting = PR_FALSE;
-             }
-         }
-@@ -3250,8 +3249,8 @@ _cl5DoPurging(cleanruv_purge_data *purge_data)
- }
- 
- /* clear free page files to reduce changelog */
--static void
--_cl5CompactDBs(void)
-+void
-+cl5CompactDBs(void)
- {
-     int rc;
-     Object *fileObj = NULL;
-@@ -3264,14 +3263,14 @@ _cl5CompactDBs(void)
-     rc = TXN_BEGIN(s_cl5Desc.dbEnv, NULL, &txnid, 0);
-     if (rc) {
-         slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
--                      "_cl5CompactDBs - Failed to begin transaction; db error - %d %s\n",
-+                      "cl5CompactDBs - Failed to begin transaction; db error - %d %s\n",
-                       rc, db_strerror(rc));
-         goto bail;
-     }
- 
- 
-     slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl,
--                  "_cl5CompactDBs - compacting replication changelogs...\n");
-+                  "cl5CompactDBs - compacting replication changelogs...\n");
-     for (fileObj = objset_first_obj(s_cl5Desc.dbFiles);
-          fileObj;
-          fileObj = objset_next_obj(s_cl5Desc.dbFiles, fileObj)) {
-@@ -3284,17 +3283,17 @@ _cl5CompactDBs(void)
-                          &c_data, DB_FREE_SPACE, NULL /*end*/);
-         if (rc) {
-             slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
--                          "_cl5CompactDBs - Failed to compact %s; db error - %d %s\n",
-+                          "cl5CompactDBs - Failed to compact %s; db error - %d %s\n",
-                           dbFile->replName, rc, db_strerror(rc));
-             goto bail;
-         }
-         slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
--                      "_cl5CompactDBs - %s - %d pages freed\n",
-+                      "cl5CompactDBs - %s - %d pages freed\n",
-                       dbFile->replName, c_data.compact_pages_free);
-     }
- 
-     slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl,
--                  "_cl5CompactDBs - compacting replication changelogs finished.\n");
-+                  "cl5CompactDBs - compacting replication changelogs finished.\n");
- bail:
-     if (fileObj) {
-         object_release(fileObj);
-@@ -3303,14 +3302,14 @@ bail:
-         rc = TXN_ABORT(txnid);
-         if (rc) {
-             slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
--                          "_cl5CompactDBs - Failed to abort transaction; db error - %d %s\n",
-+                          "cl5CompactDBs - Failed to abort transaction; db error - %d %s\n",
-                           rc, db_strerror(rc));
-         }
-     } else {
-         rc = TXN_COMMIT(txnid);
-         if (rc) {
-             slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
--                          "_cl5CompactDBs - Failed to commit transaction; db error - %d %s\n",
-+                          "cl5CompactDBs - Failed to commit transaction; db error - %d %s\n",
-                           rc, db_strerror(rc));
-         }
-     }
-diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h
-index 4b0949fb3..11db771f2 100644
---- a/ldap/servers/plugins/replication/cl5_api.h
-+++ b/ldap/servers/plugins/replication/cl5_api.h
-@@ -405,5 +405,6 @@ int cl5DeleteRUV(void);
- void cl5CleanRUV(ReplicaId rid);
- void cl5NotifyCleanup(int rid);
- void trigger_cl_purging(cleanruv_purge_data *purge_data);
-+void cl5CompactDBs(void);
- 
- #endif
-diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
-index a969ef82f..e708a1ccb 100644
---- a/ldap/servers/plugins/replication/repl5_replica_config.c
-+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
-@@ -29,6 +29,8 @@
- #define CLEANRUVLEN 8
- #define CLEANALLRUV "CLEANALLRUV"
- #define CLEANALLRUVLEN 11
-+#define COMPACT_CL5 "COMPACT_CL5"
-+#define COMPACT_CL5_LEN 11
- #define REPLICA_RDN "cn=replica"
- 
- #define CLEANALLRUV_MAX_WAIT 7200 /* 2 hours */
-@@ -1050,7 +1052,6 @@ replica_config_change_flags(Replica *r, const char *new_flags, char *returntext
- static int
- replica_execute_task(Replica *r, const char *task_name, char *returntext, int apply_mods)
- {
--
-     if (strcasecmp(task_name, CL2LDIF_TASK) == 0) {
-         if (apply_mods) {
-             return replica_execute_cl2ldif_task(r, returntext);
-@@ -1084,6 +1085,12 @@ replica_execute_task(Replica *r, const char *task_name, char *returntext, int ap
-             return replica_execute_cleanall_ruv_task(r, (ReplicaId)temprid, empty_task, "no", PR_TRUE, returntext);
-         } else
-             return LDAP_SUCCESS;
-+    } else if (strncasecmp(task_name, COMPACT_CL5, COMPACT_CL5_LEN) == 0) {
-+        /* compact the replication changelogs */
-+        if (apply_mods) {
-+            cl5CompactDBs();
-+        }
-+        return LDAP_SUCCESS;
-     } else {
-         PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, "Unsupported replica task - %s", task_name);
-         slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
--- 
-2.26.3
-
diff --git a/SOURCES/0012-Issue-4959-Invalid-etc-hosts-setup-can-cause-isLocal.patch b/SOURCES/0012-Issue-4959-Invalid-etc-hosts-setup-can-cause-isLocal.patch
new file mode 100644
index 0000000..5600d8c
--- /dev/null
+++ b/SOURCES/0012-Issue-4959-Invalid-etc-hosts-setup-can-cause-isLocal.patch
@@ -0,0 +1,31 @@
+From 2c6653edef793d46815e6df607e55d68e14fe232 Mon Sep 17 00:00:00 2001
+From: spike <spike@fedoraproject.org>
+Date: Fri, 5 Nov 2021 13:56:41 +0100
+Subject: [PATCH 12/12] Issue 4959 - Invalid /etc/hosts setup can cause
+ isLocalHost to fail.
+
+Description: Use local_simple_allocate in dsctl so that isLocal is always set properly
+
+Relates: https://github.com/389ds/389-ds-base/issues/4959
+
+Reviewed by: @droideck (Thanks!)
+---
+ src/lib389/cli/dsctl | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/lib389/cli/dsctl b/src/lib389/cli/dsctl
+index b6c42b5cc..d2ea6cd29 100755
+--- a/src/lib389/cli/dsctl
++++ b/src/lib389/cli/dsctl
+@@ -135,7 +135,7 @@ if __name__ == '__main__':
+         log.error("Unable to access instance information. Are you running as the correct user? (usually dirsrv or root)")
+         sys.exit(1)
+ 
+-    inst.allocate(insts[0])
++    inst.local_simple_allocate(insts[0]['server-id'])
+     log.debug('Instance allocated')
+ 
+     try:
+-- 
+2.31.1
+
diff --git a/SOURCES/0013-CVE-2021-4091-BZ-2030367-double-free-of-the-virtual-.patch b/SOURCES/0013-CVE-2021-4091-BZ-2030367-double-free-of-the-virtual-.patch
new file mode 100644
index 0000000..b7da4bf
--- /dev/null
+++ b/SOURCES/0013-CVE-2021-4091-BZ-2030367-double-free-of-the-virtual-.patch
@@ -0,0 +1,105 @@
+From d000349089eb15b3476ec302f4279f118336290e Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Thu, 16 Dec 2021 16:13:08 -0500
+Subject: [PATCH 1/2] CVE-2021-4091 (BZ#2030367) double-free of the virtual
+ attribute context in persistent search
+
+description:
+	A search is processed by a worker using a private pblock.
+	If the search is persistent, the worker spawn a thread
+	and kind of duplicate its private pblock so that the spawn
+        thread continue to process the persistent search.
+	Then worker ends the initial search, reinit (free) its private pblock,
+        and returns monitoring the wait_queue.
+	When the persistent search completes, it frees the duplicated
+	pblock.
+	The problem is that private pblock and duplicated pblock
+        are referring to a same structure (pb_vattr_context).
+        That lead to a double free
+
+Fix:
+	When cloning the pblock (slapi_pblock_clone) make sure
+	to transfert the references inside the original (private)
+	pblock to the target (cloned) one
+        That includes pb_vattr_context pointer.
+
+Reviewed by: Mark Reynolds, James Chapman, Pierre Rogier (Thanks !)
+---
+ ldap/servers/slapd/connection.c |  8 +++++---
+ ldap/servers/slapd/pblock.c     | 14 ++++++++++++--
+ 2 files changed, 17 insertions(+), 5 deletions(-)
+
+diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
+index e0c1a52d2..fc7ed9c4a 100644
+--- a/ldap/servers/slapd/connection.c
++++ b/ldap/servers/slapd/connection.c
+@@ -1823,9 +1823,11 @@ connection_threadmain()
+                 pthread_mutex_unlock(&(conn->c_mutex));
+             }
+             /* ps_add makes a shallow copy of the pb - so we
+-                 * can't free it or init it here - just set operation to NULL.
+-                 * ps_send_results will call connection_remove_operation_ext to free it
+-                 */
++             * can't free it or init it here - just set operation to NULL.
++             * ps_send_results will call connection_remove_operation_ext to free it
++             * The connection_thread private pblock ('pb') has be cloned and should only
++             * be reinit (slapi_pblock_init)
++             */
+             slapi_pblock_set(pb, SLAPI_OPERATION, NULL);
+             slapi_pblock_init(pb);
+         } else {
+diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
+index a64986aeb..c78d1250f 100644
+--- a/ldap/servers/slapd/pblock.c
++++ b/ldap/servers/slapd/pblock.c
+@@ -292,6 +292,12 @@ _pblock_assert_pb_deprecated(Slapi_PBlock *pblock)
+     }
+ }
+ 
++/* It clones the pblock
++ * the content of the source pblock is transfered
++ * to the target pblock (returned)
++ * The source pblock should not be used for any operation
++ * it needs to be reinit (slapi_pblock_init)
++ */
+ Slapi_PBlock *
+ slapi_pblock_clone(Slapi_PBlock *pb)
+ {
+@@ -312,28 +318,32 @@ slapi_pblock_clone(Slapi_PBlock *pb)
+     if (pb->pb_task != NULL) {
+         _pblock_assert_pb_task(new_pb);
+         *(new_pb->pb_task) = *(pb->pb_task);
++        memset(pb->pb_task, 0, sizeof(slapi_pblock_task));
+     }
+     if (pb->pb_mr != NULL) {
+         _pblock_assert_pb_mr(new_pb);
+         *(new_pb->pb_mr) = *(pb->pb_mr);
++        memset(pb->pb_mr, 0, sizeof(slapi_pblock_matching_rule));
+     }
+     if (pb->pb_misc != NULL) {
+         _pblock_assert_pb_misc(new_pb);
+         *(new_pb->pb_misc) = *(pb->pb_misc);
++        memset(pb->pb_misc, 0, sizeof(slapi_pblock_misc));
+     }
+     if (pb->pb_intop != NULL) {
+         _pblock_assert_pb_intop(new_pb);
+         *(new_pb->pb_intop) = *(pb->pb_intop);
+-        /* set pwdpolicy to NULL so this clone allocates its own policy */
+-        new_pb->pb_intop->pwdpolicy = NULL;
++        memset(pb->pb_intop, 0, sizeof(slapi_pblock_intop));
+     }
+     if (pb->pb_intplugin != NULL) {
+         _pblock_assert_pb_intplugin(new_pb);
+         *(new_pb->pb_intplugin) = *(pb->pb_intplugin);
++        memset(pb->pb_intplugin, 0,sizeof(slapi_pblock_intplugin));
+     }
+     if (pb->pb_deprecated != NULL) {
+         _pblock_assert_pb_deprecated(new_pb);
+         *(new_pb->pb_deprecated) = *(pb->pb_deprecated);
++        memset(pb->pb_deprecated, 0, sizeof(slapi_pblock_deprecated));
+     }
+ #ifdef PBLOCK_ANALYTICS
+     new_pb->analytics = NULL;
+-- 
+2.31.1
+
diff --git a/SOURCES/0013-Issue-4797-ACL-IP-ADDRESS-evaluation-may-corrupt-c_i.patch b/SOURCES/0013-Issue-4797-ACL-IP-ADDRESS-evaluation-may-corrupt-c_i.patch
deleted file mode 100644
index db28cfa..0000000
--- a/SOURCES/0013-Issue-4797-ACL-IP-ADDRESS-evaluation-may-corrupt-c_i.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From bc41bbb89405b2059b80e344b2d4c59ae39aabe6 Mon Sep 17 00:00:00 2001
-From: tbordaz <tbordaz@redhat.com>
-Date: Thu, 10 Jun 2021 15:03:27 +0200
-Subject: [PATCH 1/3] Issue 4797 - ACL IP ADDRESS evaluation may corrupt
- c_isreplication_session connection flags (#4799)
-
-Bug description:
-	The fix for ticket #3764 was broken with a missing break in a
-	switch. The consequence is that while setting the client IP
-	address in the pblock (SLAPI_CONN_CLIENTNETADDR_ACLIP), the
-	connection is erroneously set as replication connection.
-        This can lead to crash or failure of testcase
-        test_access_from_certain_network_only_ip.
-        This bug was quite hidden until the fix for #4764 is
-        showing it more frequently
-
-Fix description:
-	Add the missing break
-
-relates: https://github.com/389ds/389-ds-base/issues/4797
-
-Reviewed by: Mark Reynolds
-
-Platforms tested: F33
----
- ldap/servers/slapd/pblock.c | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
-index fcac53839..a64986aeb 100644
---- a/ldap/servers/slapd/pblock.c
-+++ b/ldap/servers/slapd/pblock.c
-@@ -2595,7 +2595,7 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
-         pblock->pb_conn->c_authtype = slapi_ch_strdup((char *)value);
-         pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
-         break;
--	case SLAPI_CONN_CLIENTNETADDR_ACLIP:
-+    case SLAPI_CONN_CLIENTNETADDR_ACLIP:
-         if (pblock->pb_conn == NULL) {
-             break;
-         }
-@@ -2603,6 +2603,7 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
-         slapi_ch_free((void **)&pblock->pb_conn->cin_addr_aclip);
-         pblock->pb_conn->cin_addr_aclip = (PRNetAddr *)value;
-         pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
-+        break;
-     case SLAPI_CONN_IS_REPLICATION_SESSION:
-         if (pblock->pb_conn == NULL) {
-             slapi_log_err(SLAPI_LOG_ERR,
--- 
-2.31.1
-
diff --git a/SOURCES/0014-Issue-4396-Minor-memory-leak-in-backend-4558-4572.patch b/SOURCES/0014-Issue-4396-Minor-memory-leak-in-backend-4558-4572.patch
deleted file mode 100644
index eb16fcb..0000000
--- a/SOURCES/0014-Issue-4396-Minor-memory-leak-in-backend-4558-4572.patch
+++ /dev/null
@@ -1,79 +0,0 @@
-From b3170e39519530c39d59202413b20e6bd466224d Mon Sep 17 00:00:00 2001
-From: James Chapman <jachapma@redhat.com>
-Date: Wed, 27 Jan 2021 09:56:38 +0000
-Subject: [PATCH 2/3] Issue 4396 - Minor memory leak in backend (#4558) (#4572)
-
-Bug Description: As multiple suffixes per backend were no longer used, this
-functionality has been replaced with a single suffix per backend. Legacy
-code remains that adds multiple suffixes to the dse internal backend,
-resulting in memory allocations that are lost.
-
-Also a minor typo is corrected in backend.c
-
-Fix Description: Calls to be_addsuffix on the DSE backend are removed
-as they are never used.
-
-Fixes: https://github.com/389ds/389-ds-base/issues/4396
-
-Reviewed by: mreynolds389, Firstyear, droideck (Thank you)
----
- ldap/servers/slapd/backend.c |  2 +-
- ldap/servers/slapd/fedse.c   | 12 +++---------
- 2 files changed, 4 insertions(+), 10 deletions(-)
-
-diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
-index bc52b4643..5707504a9 100644
---- a/ldap/servers/slapd/backend.c
-+++ b/ldap/servers/slapd/backend.c
-@@ -42,7 +42,7 @@ be_init(Slapi_Backend *be, const char *type, const char *name, int isprivate, in
-     }
-     be->be_monitordn = slapi_create_dn_string("cn=monitor,cn=%s,cn=%s,cn=plugins,cn=config",
-                                               name, type);
--    if (NULL == be->be_configdn) {
-+    if (NULL == be->be_monitordn) {
-         slapi_log_err(SLAPI_LOG_ERR,
-                       "be_init", "Failed create instance monitor dn for "
-                                  "plugin %s, instance %s\n",
-diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
-index 0d645f909..7b820b540 100644
---- a/ldap/servers/slapd/fedse.c
-+++ b/ldap/servers/slapd/fedse.c
-@@ -2827,7 +2827,7 @@ search_snmp(Slapi_PBlock *pb __attribute__((unused)),
- }
- 
- /*
-- * Called from config.c to install the internal backends
-+ * Called from main.c to install the internal backends
-  */
- int
- setup_internal_backends(char *configdir)
-@@ -2846,7 +2846,6 @@ setup_internal_backends(char *configdir)
-         Slapi_DN counters;
-         Slapi_DN snmp;
-         Slapi_DN root;
--        Slapi_Backend *be;
-         Slapi_DN encryption;
-         Slapi_DN saslmapping;
-         Slapi_DN plugins;
-@@ -2895,16 +2894,11 @@ setup_internal_backends(char *configdir)
-         dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &saslmapping, LDAP_SCOPE_SUBTREE, "(objectclass=nsSaslMapping)", sasl_map_config_add, NULL, NULL);
-         dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &plugins, LDAP_SCOPE_SUBTREE, "(objectclass=nsSlapdPlugin)", check_plugin_path, NULL, NULL);
- 
--        be = be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
--        be_addsuffix(be, &root);
--        be_addsuffix(be, &monitor);
--        be_addsuffix(be, &config);
-+        be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
- 
-         /*
--         * Now that the be's are in place, we can
--         * setup the mapping tree.
-+         * Now that the be's are in place, we can setup the mapping tree.
-          */
--
-         if (mapping_tree_init()) {
-             slapi_log_err(SLAPI_LOG_EMERG, "setup_internal_backends", "Failed to init mapping tree\n");
-             exit(1);
--- 
-2.31.1
-
diff --git a/SOURCES/0014-Issue-5127-run-restorecon-on-dev-shm-at-server-start.patch b/SOURCES/0014-Issue-5127-run-restorecon-on-dev-shm-at-server-start.patch
new file mode 100644
index 0000000..7097431
--- /dev/null
+++ b/SOURCES/0014-Issue-5127-run-restorecon-on-dev-shm-at-server-start.patch
@@ -0,0 +1,102 @@
+From 03ca5111a8de602ecef9ad33206ba593b242d0df Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Fri, 21 Jan 2022 10:15:35 -0500
+Subject: [PATCH 1/2] Issue 5127 - run restorecon on /dev/shm at server startup
+
+Description:
+
+Update the systemd service file to execute a script that runs
+restorecon on the DB home directory.  This addresses issues with
+backup/restore, reboot, and FS restore issues that can happen when
+/dev/shm is missing or created outside of dscreate.
+
+relates: https://github.com/389ds/389-ds-base/issues/5127
+
+Reviewed by: progier & viktor (Thanks!!)
+---
+ Makefile.am                          |  2 +-
+ rpm/389-ds-base.spec.in              |  1 +
+ wrappers/ds_selinux_restorecon.sh.in | 33 ++++++++++++++++++++++++++++
+ wrappers/systemd.template.service.in |  1 +
+ 4 files changed, 36 insertions(+), 1 deletion(-)
+ create mode 100644 wrappers/ds_selinux_restorecon.sh.in
+
+diff --git a/Makefile.am b/Makefile.am
+index fc5a6a7d1..d6ad273c3 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -775,7 +775,7 @@ libexec_SCRIPTS += ldap/admin/src/scripts/ds_selinux_enabled \
+ 	ldap/admin/src/scripts/ds_selinux_port_query
+ endif
+ if SYSTEMD
+-libexec_SCRIPTS += wrappers/ds_systemd_ask_password_acl
++libexec_SCRIPTS += wrappers/ds_systemd_ask_password_acl wrappers/ds_selinux_restorecon.sh
+ endif
+ 
+ install-data-hook:
+diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
+index d80de8422..6c0d95abd 100644
+--- a/rpm/389-ds-base.spec.in
++++ b/rpm/389-ds-base.spec.in
+@@ -623,6 +623,7 @@ exit 0
+ %{_sbindir}/ns-slapd
+ %{_mandir}/man8/ns-slapd.8.gz
+ %{_libexecdir}/%{pkgname}/ds_systemd_ask_password_acl
++%{_libexecdir}/%{pkgname}/ds_selinux_restorecon.sh
+ %{_mandir}/man5/99user.ldif.5.gz
+ %{_mandir}/man5/certmap.conf.5.gz
+ %{_mandir}/man5/slapd-collations.conf.5.gz
+diff --git a/wrappers/ds_selinux_restorecon.sh.in b/wrappers/ds_selinux_restorecon.sh.in
+new file mode 100644
+index 000000000..063347de3
+--- /dev/null
++++ b/wrappers/ds_selinux_restorecon.sh.in
+@@ -0,0 +1,33 @@
++#!/bin/sh
++# BEGIN COPYRIGHT BLOCK
++# Copyright (C) 2022 Red Hat, Inc.
++#
++# All rights reserved.
++#
++# License: GPL (version 3 or any later version).
++# See LICENSE for details.
++# END COPYRIGHT BLOCK
++
++# Make sure we have the path to the dse.ldif
++if [ -z $1 ]
++then
++    echo "usage: ${0} /etc/dirsrv/slapd-<instance>/dse.ldif"
++    exit 0
++fi
++
++if ! command -v restorecon &> /dev/null
++then
++    # restorecon is not available
++    exit 0
++fi
++
++# Grep the db_home_dir out of the config file
++DS_HOME_DIR=`grep 'nsslapd-db-home-directory: ' $1 | awk '{print $2}'`
++if [ -z "$DS_HOME_DIR" ]
++then
++    # No DB home set, that's ok
++    exit 0
++fi
++
++# Now run restorecon
++restorecon ${DS_HOME_DIR}
+diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in
+index a8c21a9be..4485e0ec0 100644
+--- a/wrappers/systemd.template.service.in
++++ b/wrappers/systemd.template.service.in
+@@ -14,6 +14,7 @@ EnvironmentFile=-@initconfigdir@/@package_name@
+ EnvironmentFile=-@initconfigdir@/@package_name@-%i
+ PIDFile=/run/@package_name@/slapd-%i.pid
+ ExecStartPre=@libexecdir@/ds_systemd_ask_password_acl @instconfigdir@/slapd-%i/dse.ldif
++ExecStartPre=@libexecdir@/ds_selinux_restorecon.sh @instconfigdir@/slapd-%i/dse.ldif
+ ExecStart=@sbindir@/ns-slapd -D @instconfigdir@/slapd-%i -i /run/@package_name@/slapd-%i.pid
+ PrivateTmp=on
+ 
+-- 
+2.31.1
+
diff --git a/SOURCES/0015-Issue-4700-Regression-in-winsync-replication-agreeme.patch b/SOURCES/0015-Issue-4700-Regression-in-winsync-replication-agreeme.patch
deleted file mode 100644
index 9e5231d..0000000
--- a/SOURCES/0015-Issue-4700-Regression-in-winsync-replication-agreeme.patch
+++ /dev/null
@@ -1,66 +0,0 @@
-From 8d06fdf44b0d337f1e321e61ee1b22972ddea917 Mon Sep 17 00:00:00 2001
-From: tbordaz <tbordaz@redhat.com>
-Date: Fri, 2 Apr 2021 14:05:41 +0200
-Subject: [PATCH 3/3] Issue 4700 - Regression in winsync replication agreement
- (#4712)
-
-Bug description:
-	#4396 fixes a memory leak but did not set 'cn=config' as
-	DSE backend.
-	It had no signicant impact unless with sidgen IPA plugin
-
-Fix description:
-	revert the portion of the #4364 patch that set be_suffix
-	in be_addsuffix, free the suffix before setting it
-
-relates: https://github.com/389ds/389-ds-base/issues/4700
-
-Reviewed by: Pierre Rogier (thanks !)
-
-Platforms tested: F33
----
- ldap/servers/slapd/backend.c | 3 ++-
- ldap/servers/slapd/fedse.c   | 6 +++++-
- 2 files changed, 7 insertions(+), 2 deletions(-)
-
-diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
-index 5707504a9..5db706841 100644
---- a/ldap/servers/slapd/backend.c
-+++ b/ldap/servers/slapd/backend.c
-@@ -173,7 +173,8 @@ void
- be_addsuffix(Slapi_Backend *be, const Slapi_DN *suffix)
- {
-     if (be->be_state != BE_STATE_DELETED) {
--        be->be_suffix = slapi_sdn_dup(suffix);;
-+        slapi_sdn_free(&be->be_suffix);
-+        be->be_suffix = slapi_sdn_dup(suffix);
-     }
- }
- 
-diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
-index 7b820b540..44159c991 100644
---- a/ldap/servers/slapd/fedse.c
-+++ b/ldap/servers/slapd/fedse.c
-@@ -2846,6 +2846,7 @@ setup_internal_backends(char *configdir)
-         Slapi_DN counters;
-         Slapi_DN snmp;
-         Slapi_DN root;
-+        Slapi_Backend *be;
-         Slapi_DN encryption;
-         Slapi_DN saslmapping;
-         Slapi_DN plugins;
-@@ -2894,7 +2895,10 @@ setup_internal_backends(char *configdir)
-         dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &saslmapping, LDAP_SCOPE_SUBTREE, "(objectclass=nsSaslMapping)", sasl_map_config_add, NULL, NULL);
-         dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &plugins, LDAP_SCOPE_SUBTREE, "(objectclass=nsSlapdPlugin)", check_plugin_path, NULL, NULL);
- 
--        be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
-+        be = be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
-+        be_addsuffix(be, &root);
-+        be_addsuffix(be, &monitor);
-+        be_addsuffix(be, &config);
- 
-         /*
-          * Now that the be's are in place, we can setup the mapping tree.
--- 
-2.31.1
-
diff --git a/SOURCES/0015-Issue-5127-ds_selinux_restorecon.sh-always-exit-0.patch b/SOURCES/0015-Issue-5127-ds_selinux_restorecon.sh-always-exit-0.patch
new file mode 100644
index 0000000..566d0ea
--- /dev/null
+++ b/SOURCES/0015-Issue-5127-ds_selinux_restorecon.sh-always-exit-0.patch
@@ -0,0 +1,35 @@
+From 0ed471bae52bb0debd23336cbc5f3f1d400cbbc9 Mon Sep 17 00:00:00 2001
+From: Adam Williamson <awilliam@redhat.com>
+Date: Thu, 27 Jan 2022 11:07:26 -0800
+Subject: [PATCH] Issue 5127 - ds_selinux_restorecon.sh: always exit 0
+
+Description:
+
+We don't want to error out and give up on starting the service
+if the restorecon fails - it might just be that the directory
+doesn't exist and doesn't need restoring. Issue identified and
+fix suggested by Simon Farnsworth
+
+relates: https://github.com/389ds/389-ds-base/issues/5127
+
+Reviewed by: adamw & mreynolds
+---
+ wrappers/ds_selinux_restorecon.sh.in | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/wrappers/ds_selinux_restorecon.sh.in b/wrappers/ds_selinux_restorecon.sh.in
+index 063347de3..2d7386233 100644
+--- a/wrappers/ds_selinux_restorecon.sh.in
++++ b/wrappers/ds_selinux_restorecon.sh.in
+@@ -29,5 +29,6 @@ then
+     exit 0
+ fi
+ 
+-# Now run restorecon
+-restorecon ${DS_HOME_DIR}
++# Now run restorecon, but don't die if it fails (could be that the
++# directory doesn't exist)
++restorecon ${DS_HOME_DIR} || :
+-- 
+2.31.1
+
diff --git a/SOURCES/0016-Issue-4725-Fix-compiler-warnings.patch b/SOURCES/0016-Issue-4725-Fix-compiler-warnings.patch
deleted file mode 100644
index 2371384..0000000
--- a/SOURCES/0016-Issue-4725-Fix-compiler-warnings.patch
+++ /dev/null
@@ -1,88 +0,0 @@
-From 7345c51c68dfd90a704ccbb0e5b1e736af80f146 Mon Sep 17 00:00:00 2001
-From: Thierry Bordaz <tbordaz@redhat.com>
-Date: Mon, 17 May 2021 16:10:22 +0200
-Subject: [PATCH] Issue 4725 - Fix compiler warnings
-
----
- ldap/servers/slapd/proto-slap.h | 2 +-
- ldap/servers/slapd/pw.c         | 9 ++++-----
- ldap/servers/slapd/pw_retry.c   | 2 --
- 3 files changed, 5 insertions(+), 8 deletions(-)
-
-diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
-index 6ff178127..2768d5a1d 100644
---- a/ldap/servers/slapd/proto-slap.h
-+++ b/ldap/servers/slapd/proto-slap.h
-@@ -1012,7 +1012,7 @@ int add_shadow_ext_password_attrs(Slapi_PBlock *pb, Slapi_Entry **e);
-  * pw_retry.c
-  */
- int update_pw_retry(Slapi_PBlock *pb);
--int update_trp_pw_usecount(Slapi_PBlock *pb, Slapi_Entry *e, int32_t use_count);
-+int update_tpr_pw_usecount(Slapi_PBlock *pb, Slapi_Entry *e, int32_t use_count);
- void pw_apply_mods(const Slapi_DN *sdn, Slapi_Mods *mods);
- void pw_set_componentID(struct slapi_componentid *cid);
- struct slapi_componentid *pw_get_componentID(void);
-diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
-index d98422513..2a167c8f1 100644
---- a/ldap/servers/slapd/pw.c
-+++ b/ldap/servers/slapd/pw.c
-@@ -2622,7 +2622,6 @@ int
- slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int send_result) {
-     passwdPolicy *pwpolicy = NULL;
-     char *dn = NULL;
--    int tpr_maxuse;
-     char *value;
-     time_t cur_time;
-     char *cur_time_str = NULL;
-@@ -2638,7 +2637,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
-         return 0;
-     }
- 
--    if (slapi_entry_attr_hasvalue(bind_target_entry, "pwdTPRReset", "TRUE") == NULL) {
-+    if (!slapi_entry_attr_hasvalue(bind_target_entry, "pwdTPRReset", "TRUE")) {
-         /* the password was not reset by an admin while a TRP pwp was set, just returned */
-         return 0;
-     }
-@@ -2646,7 +2645,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
-     /* Check entry TPR max use */
-     if (pwpolicy->pw_tpr_maxuse >= 0) {
-         uint use_count;
--        value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRUseCount");
-+        value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRUseCount");
-         if (value) {
-             /* max Use is enforced */
-             use_count = strtoull(value, 0, 0);
-@@ -2681,7 +2680,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
- 
-     /* Check entry TPR expiration at a specific time */
-     if (pwpolicy->pw_tpr_delay_expire_at >= 0) {
--        value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRExpireAt");
-+        value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRExpireAt");
-         if (value) {
-             /* max Use is enforced */
-             if (difftime(parse_genTime(cur_time_str), parse_genTime(value)) >= 0) {
-@@ -2709,7 +2708,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
- 
-     /* Check entry TPR valid after a specific time */
-     if (pwpolicy->pw_tpr_delay_valid_from >= 0) {
--        value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRValidFrom");
-+        value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRValidFrom");
-         if (value) {
-             /* validity after a specific time is enforced */
-             if (difftime(parse_genTime(value), parse_genTime(cur_time_str)) >= 0) {
-diff --git a/ldap/servers/slapd/pw_retry.c b/ldap/servers/slapd/pw_retry.c
-index 5d13eb636..af54aa19d 100644
---- a/ldap/servers/slapd/pw_retry.c
-+++ b/ldap/servers/slapd/pw_retry.c
-@@ -163,8 +163,6 @@ set_retry_cnt_and_time(Slapi_PBlock *pb, int count, time_t cur_time)
- int
- set_tpr_usecount_mods(Slapi_PBlock *pb, Slapi_Mods *smods, int count)
- {
--    char *timestr;
--    time_t unlock_time;
-     char retry_cnt[16] = {0}; /* 1-65535 */
-     const char *dn = NULL;
-     Slapi_DN *sdn = NULL;
--- 
-2.31.1
-
diff --git a/SOURCES/0016-Issue-4775-Add-entryuuid-CLI-and-Fixup-4776.patch b/SOURCES/0016-Issue-4775-Add-entryuuid-CLI-and-Fixup-4776.patch
new file mode 100644
index 0000000..05d7f36
--- /dev/null
+++ b/SOURCES/0016-Issue-4775-Add-entryuuid-CLI-and-Fixup-4776.patch
@@ -0,0 +1,262 @@
+From 93588ea455aff691bdfbf59cdef4df8fcedb69f2 Mon Sep 17 00:00:00 2001
+From: Firstyear <william@blackhats.net.au>
+Date: Thu, 19 Aug 2021 10:46:00 +1000
+Subject: [PATCH 1/2] Issue 4775 - Add entryuuid CLI and Fixup (#4776)
+
+Bug Description: EntryUUID when added was missing it's CLI
+and helpers for fixups.
+
+Fix Description: Add the CLI elements.
+
+fixes: https://github.com/389ds/389-ds-base/issues/4775
+
+Author: William Brown <william@blackhats.net.au>
+
+Review by: @mreynolds389 (thanks!)
+---
+ src/lib389/lib389/cli_conf/plugin.py          |  6 ++-
+ .../lib389/cli_conf/plugins/entryuuid.py      | 39 ++++++++++++++
+ src/plugins/entryuuid/src/lib.rs              | 54 ++++++++-----------
+ 3 files changed, 65 insertions(+), 34 deletions(-)
+ create mode 100644 src/lib389/lib389/cli_conf/plugins/entryuuid.py
+
+diff --git a/src/lib389/lib389/cli_conf/plugin.py b/src/lib389/lib389/cli_conf/plugin.py
+index 560c57f9b..7c0cf2c80 100644
+--- a/src/lib389/lib389/cli_conf/plugin.py
++++ b/src/lib389/lib389/cli_conf/plugin.py
+@@ -1,5 +1,5 @@
+ # --- BEGIN COPYRIGHT BLOCK ---
+-# Copyright (C) 2018 Red Hat, Inc.
++# Copyright (C) 2022 Red Hat, Inc.
+ # All rights reserved.
+ #
+ # License: GPL (version 3 or any later version).
+@@ -27,6 +27,8 @@ from lib389.cli_conf.plugins import passthroughauth as cli_passthroughauth
+ from lib389.cli_conf.plugins import retrochangelog as cli_retrochangelog
+ from lib389.cli_conf.plugins import automember as cli_automember
+ from lib389.cli_conf.plugins import posix_winsync as cli_posix_winsync
++from lib389.cli_conf.plugins import contentsync as cli_contentsync
++from lib389.cli_conf.plugins import entryuuid as cli_entryuuid
+ 
+ SINGULAR = Plugin
+ MANY = Plugins
+@@ -113,6 +115,8 @@ def create_parser(subparsers):
+     cli_passthroughauth.create_parser(subcommands)
+     cli_retrochangelog.create_parser(subcommands)
+     cli_posix_winsync.create_parser(subcommands)
++    cli_contentsync.create_parser(subcommands)
++    cli_entryuuid.create_parser(subcommands)
+ 
+     list_parser = subcommands.add_parser('list', help="List current configured (enabled and disabled) plugins")
+     list_parser.set_defaults(func=plugin_list)
+diff --git a/src/lib389/lib389/cli_conf/plugins/entryuuid.py b/src/lib389/lib389/cli_conf/plugins/entryuuid.py
+new file mode 100644
+index 000000000..6c86bff4b
+--- /dev/null
++++ b/src/lib389/lib389/cli_conf/plugins/entryuuid.py
+@@ -0,0 +1,39 @@
++# --- BEGIN COPYRIGHT BLOCK ---
++# Copyright (C) 2021 William Brown <william@blackhats.net.au>
++# All rights reserved.
++#
++# License: GPL (version 3 or any later version).
++# See LICENSE for details.
++# --- END COPYRIGHT BLOCK ---
++
++import ldap
++from lib389.plugins import EntryUUIDPlugin
++from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add
++
++def do_fixup(inst, basedn, log, args):
++    plugin = EntryUUIDPlugin(inst)
++    log.info('Attempting to add task entry...')
++    if not plugin.status():
++        log.error("'%s' is disabled. Fix up task can't be executed" % plugin.rdn)
++        return
++    fixup_task = plugin.fixup(args.DN, args.filter)
++    fixup_task.wait()
++    exitcode = fixup_task.get_exit_code()
++    if exitcode != 0:
++        log.error('EntryUUID fixup task has failed. Please, check the error log for more - %s' % exitcode)
++    else:
++        log.info('Successfully added task entry')
++
++def create_parser(subparsers):
++    referint = subparsers.add_parser('entryuuid', help='Manage and configure EntryUUID plugin')
++    subcommands = referint.add_subparsers(help='action')
++
++    add_generic_plugin_parsers(subcommands, EntryUUIDPlugin)
++
++    fixup = subcommands.add_parser('fixup', help='Run the fix-up task for EntryUUID plugin')
++    fixup.set_defaults(func=do_fixup)
++    fixup.add_argument('DN', help="Base DN that contains entries to fix up")
++    fixup.add_argument('-f', '--filter',
++                       help='Filter for entries to fix up.\n If omitted, all entries under base DN'
++                            'will have their EntryUUID attribute regenerated if not present.')
++
+diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
+index da9f0c239..29a9f1258 100644
+--- a/src/plugins/entryuuid/src/lib.rs
++++ b/src/plugins/entryuuid/src/lib.rs
+@@ -33,7 +33,7 @@ fn assign_uuid(e: &mut EntryRef) {
+     // 🚧 safety barrier 🚧
+     if e.contains_attr("entryUUID") {
+         log_error!(
+-            ErrorLevel::Trace,
++            ErrorLevel::Plugin,
+             "assign_uuid -> entryUUID exists, skipping dn {}",
+             sdn.to_dn_string()
+         );
+@@ -47,7 +47,7 @@ fn assign_uuid(e: &mut EntryRef) {
+     if sdn.is_below_suffix(&*config_sdn) || sdn.is_below_suffix(&*schema_sdn) {
+         // We don't need to assign to these suffixes.
+         log_error!(
+-            ErrorLevel::Trace,
++            ErrorLevel::Plugin,
+             "assign_uuid -> not assigning to {:?} as part of system suffix",
+             sdn.to_dn_string()
+         );
+@@ -57,7 +57,7 @@ fn assign_uuid(e: &mut EntryRef) {
+     // Generate a new Uuid.
+     let u: Uuid = Uuid::new_v4();
+     log_error!(
+-        ErrorLevel::Trace,
++        ErrorLevel::Plugin,
+         "assign_uuid -> assigning {:?} to dn {}",
+         u,
+         sdn.to_dn_string()
+@@ -78,13 +78,13 @@ impl SlapiPlugin3 for EntryUuid {
+     fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> {
+         if pb.get_is_replicated_operation() {
+             log_error!(
+-                ErrorLevel::Trace,
++                ErrorLevel::Plugin,
+                 "betxn_pre_add -> replicated operation, will not change"
+             );
+             return Ok(());
+         }
+ 
+-        log_error!(ErrorLevel::Trace, "betxn_pre_add -> start");
++        log_error!(ErrorLevel::Plugin, "betxn_pre_add -> start");
+ 
+         let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?;
+         assign_uuid(&mut e);
+@@ -105,7 +105,7 @@ impl SlapiPlugin3 for EntryUuid {
+                 .first()
+                 .ok_or_else(|| {
+                     log_error!(
+-                        ErrorLevel::Trace,
++                        ErrorLevel::Plugin,
+                         "task_validate basedn error -> empty value array?"
+                     );
+                     LDAPError::Operation
+@@ -113,7 +113,7 @@ impl SlapiPlugin3 for EntryUuid {
+                 .as_ref()
+                 .try_into()
+                 .map_err(|e| {
+-                    log_error!(ErrorLevel::Trace, "task_validate basedn error -> {:?}", e);
++                    log_error!(ErrorLevel::Plugin, "task_validate basedn error -> {:?}", e);
+                     LDAPError::Operation
+                 })?,
+             None => return Err(LDAPError::ObjectClassViolation),
+@@ -124,7 +124,7 @@ impl SlapiPlugin3 for EntryUuid {
+                 .first()
+                 .ok_or_else(|| {
+                     log_error!(
+-                        ErrorLevel::Trace,
++                        ErrorLevel::Plugin,
+                         "task_validate filter error -> empty value array?"
+                     );
+                     LDAPError::Operation
+@@ -132,7 +132,7 @@ impl SlapiPlugin3 for EntryUuid {
+                 .as_ref()
+                 .try_into()
+                 .map_err(|e| {
+-                    log_error!(ErrorLevel::Trace, "task_validate filter error -> {:?}", e);
++                    log_error!(ErrorLevel::Plugin, "task_validate filter error -> {:?}", e);
+                     LDAPError::Operation
+                 })?,
+             None => {
+@@ -144,17 +144,11 @@ impl SlapiPlugin3 for EntryUuid {
+         // Error if the first filter is empty?
+ 
+         // Now, to make things faster, we wrap the filter in a exclude term.
+-
+-        // 2021 - #4877 because we allow entryuuid to be strings, on import these may
+-        // be invalid. As a result, we DO need to allow the fixup to check the entryuuid
+-        // value is correct, so we can not exclude these during the search.
+-        /*
+         let raw_filter = if !raw_filter.starts_with('(') && !raw_filter.ends_with('(') {
+             format!("(&({})(!(entryuuid=*)))", raw_filter)
+         } else {
+             format!("(&{}(!(entryuuid=*)))", raw_filter)
+         };
+-        */
+ 
+         Ok(FixupData { basedn, raw_filter })
+     }
+@@ -165,7 +159,7 @@ impl SlapiPlugin3 for EntryUuid {
+ 
+     fn task_handler(_task: &Task, data: Self::TaskData) -> Result<Self::TaskData, PluginError> {
+         log_error!(
+-            ErrorLevel::Trace,
++            ErrorLevel::Plugin,
+             "task_handler -> start thread with -> {:?}",
+             data
+         );
+@@ -205,12 +199,12 @@ impl SlapiPlugin3 for EntryUuid {
+     }
+ 
+     fn start(_pb: &mut PblockRef) -> Result<(), PluginError> {
+-        log_error!(ErrorLevel::Trace, "plugin start");
++        log_error!(ErrorLevel::Plugin, "plugin start");
+         Ok(())
+     }
+ 
+     fn close(_pb: &mut PblockRef) -> Result<(), PluginError> {
+-        log_error!(ErrorLevel::Trace, "plugin close");
++        log_error!(ErrorLevel::Plugin, "plugin close");
+         Ok(())
+     }
+ }
+@@ -219,20 +213,14 @@ pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError
+     /* Supply a modification to the entry. */
+     let sdn = e.get_sdnref();
+ 
+-    /* Check that entryuuid doesn't already exist, and is valid */
+-    if let Some(valueset) = e.get_attr("entryUUID") {
+-        if valueset.iter().all(|v| {
+-            let u: Result<Uuid, _> = (&v).try_into();
+-            u.is_ok()
+-        }) {
+-            // All values were valid uuid, move on!
+-            log_error!(
+-                ErrorLevel::Plugin,
+-                "skipping fixup for -> {}",
+-                sdn.to_dn_string()
+-            );
+-            return Ok(());
+-        }
++    /* Sanity check that entryuuid doesn't already exist */
++    if e.contains_attr("entryUUID") {
++        log_error!(
++            ErrorLevel::Plugin,
++            "skipping fixup for -> {}",
++            sdn.to_dn_string()
++        );
++        return Ok(());
+     }
+ 
+     // Setup the modifications
+@@ -248,7 +236,7 @@ pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError
+ 
+     match lmod.execute() {
+         Ok(_) => {
+-            log_error!(ErrorLevel::Trace, "fixed-up -> {}", sdn.to_dn_string());
++            log_error!(ErrorLevel::Plugin, "fixed-up -> {}", sdn.to_dn_string());
+             Ok(())
+         }
+         Err(e) => {
+-- 
+2.34.1
+
diff --git a/SOURCES/0017-Issue-4775-Fix-cherry-pick-error.patch b/SOURCES/0017-Issue-4775-Fix-cherry-pick-error.patch
new file mode 100644
index 0000000..f9e5b2c
--- /dev/null
+++ b/SOURCES/0017-Issue-4775-Fix-cherry-pick-error.patch
@@ -0,0 +1,42 @@
+From 525f2307fa3e2d0ae55c8c922e6f7220a1e5bd1b Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Thu, 3 Feb 2022 16:51:38 -0500
+Subject: [PATCH] Issue 4775 - Fix cherry-pick error
+
+Bug Description: EntryUUID when added was missing it's CLI
+and helpers for fixups.
+
+Fix Description: Add the CLI elements.
+
+fixes: https://github.com/389ds/389-ds-base/issues/4775
+
+Author: William Brown <william@blackhats.net.au>
+
+Review by: @mreynolds389 (thanks!)
+---
+ src/lib389/lib389/cli_conf/plugin.py | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/src/lib389/lib389/cli_conf/plugin.py b/src/lib389/lib389/cli_conf/plugin.py
+index 7c0cf2c80..fb0ef3077 100644
+--- a/src/lib389/lib389/cli_conf/plugin.py
++++ b/src/lib389/lib389/cli_conf/plugin.py
+@@ -27,7 +27,6 @@ from lib389.cli_conf.plugins import passthroughauth as cli_passthroughauth
+ from lib389.cli_conf.plugins import retrochangelog as cli_retrochangelog
+ from lib389.cli_conf.plugins import automember as cli_automember
+ from lib389.cli_conf.plugins import posix_winsync as cli_posix_winsync
+-from lib389.cli_conf.plugins import contentsync as cli_contentsync
+ from lib389.cli_conf.plugins import entryuuid as cli_entryuuid
+ 
+ SINGULAR = Plugin
+@@ -115,7 +114,6 @@ def create_parser(subparsers):
+     cli_passthroughauth.create_parser(subcommands)
+     cli_retrochangelog.create_parser(subcommands)
+     cli_posix_winsync.create_parser(subcommands)
+-    cli_contentsync.create_parser(subcommands)
+     cli_entryuuid.create_parser(subcommands)
+ 
+     list_parser = subcommands.add_parser('list', help="List current configured (enabled and disabled) plugins")
+-- 
+2.34.1
+
diff --git a/SOURCES/0017-Issue-4814-_cl5_get_tod_expiration-may-crash-at-star.patch b/SOURCES/0017-Issue-4814-_cl5_get_tod_expiration-may-crash-at-star.patch
deleted file mode 100644
index 6785c04..0000000
--- a/SOURCES/0017-Issue-4814-_cl5_get_tod_expiration-may-crash-at-star.patch
+++ /dev/null
@@ -1,202 +0,0 @@
-From 59266365eda8130abf6901263efae4c87586376a Mon Sep 17 00:00:00 2001
-From: Thierry Bordaz <tbordaz@redhat.com>
-Date: Mon, 28 Jun 2021 16:40:15 +0200
-Subject: [PATCH] Issue 4814 - _cl5_get_tod_expiration may crash at startup
-
-Bug description:
-	This bug exist only in 1.4.3 branch
-	In 1.4.3, CL open as a separated database so
-        compaction mechanism is started along a CL
-        mechanism (CL trimming).
-        The problem is that the configuration of the CL
-        compaction is done after the compaction mechanism
-        (is started). Depending on thread scheduling it
-        crashes
-
-Fix description:
-        Make sure configuration of compaction thread is
-        taken into account (cl5ConfigSetCompaction) before
-	the compaction thread starts (cl5open)
-
-relates: https://github.com/389ds/389-ds-base/issues/4814
-
-Reviewed by: Mark Reynolds, Simon Pichugin (thanks !)
-
-Platforms tested: 8.5
----
- ldap/servers/plugins/replication/cl5_api.c    | 24 ++++++++++++-------
- ldap/servers/plugins/replication/cl5_api.h    | 10 +++++++-
- ldap/servers/plugins/replication/cl5_config.c |  8 +++++--
- ldap/servers/plugins/replication/cl5_init.c   |  4 +++-
- ldap/servers/plugins/replication/cl5_test.c   |  2 +-
- .../servers/plugins/replication/repl_shared.h |  2 +-
- 6 files changed, 35 insertions(+), 15 deletions(-)
-
-diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
-index 4c5077b48..954b6b9e3 100644
---- a/ldap/servers/plugins/replication/cl5_api.c
-+++ b/ldap/servers/plugins/replication/cl5_api.c
-@@ -1016,6 +1016,20 @@ cl5GetState()
-     return s_cl5Desc.dbState;
- }
- 
-+void
-+cl5ConfigSetCompaction(int compactInterval, char *compactTime)
-+{
-+
-+    if (compactInterval != CL5_NUM_IGNORE) {
-+        s_cl5Desc.dbTrim.compactInterval = compactInterval;
-+    }
-+
-+    if (strcmp(compactTime, CL5_STR_IGNORE) != 0) {
-+        s_cl5Desc.dbTrim.compactTime = slapi_ch_strdup(compactTime);
-+    }
-+
-+}
-+
- /* Name:        cl5ConfigTrimming
-    Description:    sets changelog trimming parameters; changelog must be open.
-    Parameters:  maxEntries - maximum number of entries in the chnagelog (in all files);
-@@ -1026,7 +1040,7 @@ cl5GetState()
-                 CL5_BAD_STATE if changelog is not open
-  */
- int
--cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval)
-+cl5ConfigTrimming(int maxEntries, const char *maxAge, int trimInterval)
- {
-     if (s_cl5Desc.dbState == CL5_STATE_NONE) {
-         slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
-@@ -1058,14 +1072,6 @@ cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char
-         s_cl5Desc.dbTrim.maxEntries = maxEntries;
-     }
- 
--    if (compactInterval != CL5_NUM_IGNORE) {
--        s_cl5Desc.dbTrim.compactInterval = compactInterval;
--    }
--
--    if (strcmp(compactTime, CL5_STR_IGNORE) != 0) {
--        s_cl5Desc.dbTrim.compactTime = slapi_ch_strdup(compactTime);
--    }
--
-     if (trimInterval != CL5_NUM_IGNORE) {
-         s_cl5Desc.dbTrim.trimInterval = trimInterval;
-     }
-diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h
-index 11db771f2..6aa48aec4 100644
---- a/ldap/servers/plugins/replication/cl5_api.h
-+++ b/ldap/servers/plugins/replication/cl5_api.h
-@@ -227,6 +227,14 @@ int cl5ImportLDIF(const char *clDir, const char *ldifFile, Replica **replicas);
- 
- int cl5GetState(void);
- 
-+/* Name:        cl5ConfigSetCompaction
-+ * Description: sets the database compaction parameters
-+ * Parameters:  compactInterval - Interval for compaction default is 30days
-+ *              compactTime - Compact time default is 23:59
-+ * Return:      void
-+ */
-+void cl5ConfigSetCompaction(int compactInterval, char *compactTime);
-+
- /* Name:        cl5ConfigTrimming
-    Description:    sets changelog trimming parameters
-    Parameters:  maxEntries - maximum number of entries in the log;
-@@ -236,7 +244,7 @@ int cl5GetState(void);
-    Return:        CL5_SUCCESS if successful;
-                 CL5_BAD_STATE if changelog has not been open
-  */
--int cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval);
-+int cl5ConfigTrimming(int maxEntries, const char *maxAge, int trimInterval);
- 
- void cl5DestroyIterator(void *iterator);
- 
-diff --git a/ldap/servers/plugins/replication/cl5_config.c b/ldap/servers/plugins/replication/cl5_config.c
-index b32686788..a43534c9b 100644
---- a/ldap/servers/plugins/replication/cl5_config.c
-+++ b/ldap/servers/plugins/replication/cl5_config.c
-@@ -197,6 +197,8 @@ changelog5_config_add(Slapi_PBlock *pb __attribute__((unused)),
- 
-         goto done;
-     }
-+    /* Set compaction parameters */
-+    cl5ConfigSetCompaction(config.compactInterval, config.compactTime);
- 
-     /* start the changelog */
-     rc = cl5Open(config.dir, &config.dbconfig);
-@@ -212,7 +214,7 @@ changelog5_config_add(Slapi_PBlock *pb __attribute__((unused)),
-     }
- 
-     /* set trimming parameters */
--    rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
-+    rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval);
-     if (rc != CL5_SUCCESS) {
-         *returncode = 1;
-         if (returntext) {
-@@ -548,6 +550,8 @@ changelog5_config_modify(Slapi_PBlock *pb,
-                 slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
-                               "changelog5_config_modify - Deleted the changelog at %s\n", currentDir);
-             }
-+            /* Set compaction parameters */
-+            cl5ConfigSetCompaction(config.compactInterval, config.compactTime);
- 
-             rc = cl5Open(config.dir, &config.dbconfig);
-             if (rc != CL5_SUCCESS) {
-@@ -575,7 +579,7 @@ changelog5_config_modify(Slapi_PBlock *pb,
-     if (config.maxEntries != CL5_NUM_IGNORE ||
-         config.trimInterval != CL5_NUM_IGNORE ||
-         strcmp(config.maxAge, CL5_STR_IGNORE) != 0) {
--        rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
-+        rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval);
-         if (rc != CL5_SUCCESS) {
-             *returncode = 1;
-             if (returntext) {
-diff --git a/ldap/servers/plugins/replication/cl5_init.c b/ldap/servers/plugins/replication/cl5_init.c
-index 251859714..567e0274c 100644
---- a/ldap/servers/plugins/replication/cl5_init.c
-+++ b/ldap/servers/plugins/replication/cl5_init.c
-@@ -45,6 +45,8 @@ changelog5_init()
-         rc = 0; /* OK */
-         goto done;
-     }
-+    /* Set compaction parameters */
-+    cl5ConfigSetCompaction(config.compactInterval, config.compactTime);
- 
-     /* start changelog */
-     rc = cl5Open(config.dir, &config.dbconfig);
-@@ -57,7 +59,7 @@ changelog5_init()
-     }
- 
-     /* set trimming parameters */
--    rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
-+    rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval);
-     if (rc != CL5_SUCCESS) {
-         slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
-                       "changelog5_init: failed to configure changelog trimming\n");
-diff --git a/ldap/servers/plugins/replication/cl5_test.c b/ldap/servers/plugins/replication/cl5_test.c
-index d6656653c..efb8c543a 100644
---- a/ldap/servers/plugins/replication/cl5_test.c
-+++ b/ldap/servers/plugins/replication/cl5_test.c
-@@ -281,7 +281,7 @@ testTrimming()
-         rc = populateChangelog(300, NULL);
- 
-         if (rc == 0)
--            rc = cl5ConfigTrimming(300, "1d", CHANGELOGDB_COMPACT_INTERVAL, CHANGELOGDB_TRIM_INTERVAL);
-+            rc = cl5ConfigTrimming(300, "1d", CHANGELOGDB_TRIM_INTERVAL);
- 
-         interval = PR_SecondsToInterval(300); /* 5 min is default trimming interval */
-         slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
-diff --git a/ldap/servers/plugins/replication/repl_shared.h b/ldap/servers/plugins/replication/repl_shared.h
-index 6708e12f7..b59b2bd27 100644
---- a/ldap/servers/plugins/replication/repl_shared.h
-+++ b/ldap/servers/plugins/replication/repl_shared.h
-@@ -26,7 +26,7 @@
- 
- #define CHANGELOGDB_TRIM_INTERVAL 300        /* 5 minutes */
- #define CHANGELOGDB_COMPACT_INTERVAL 2592000 /* 30 days */
--#define CHANGELOGDB_COMPACT_TIME "23:55" /* 30 days */
-+#define CHANGELOGDB_COMPACT_TIME "23:59"     /* around midnight */
- 
- #define CONFIG_CHANGELOG_DIR_ATTRIBUTE "nsslapd-changelogdir"
- #define CONFIG_CHANGELOG_MAXENTRIES_ATTRIBUTE "nsslapd-changelogmaxentries"
--- 
-2.31.1
-
diff --git a/SOURCES/0018-Issue-4789-Temporary-password-rules-are-not-enforce-.patch b/SOURCES/0018-Issue-4789-Temporary-password-rules-are-not-enforce-.patch
deleted file mode 100644
index 5ab86af..0000000
--- a/SOURCES/0018-Issue-4789-Temporary-password-rules-are-not-enforce-.patch
+++ /dev/null
@@ -1,51 +0,0 @@
-From e7fdfe527a5f72674fe4b577a0555cabf8ec73a5 Mon Sep 17 00:00:00 2001
-From: tbordaz <tbordaz@redhat.com>
-Date: Mon, 7 Jun 2021 11:23:35 +0200
-Subject: [PATCH] Issue 4789 - Temporary password rules are not enforce with
- local password policy (#4790)
-
-Bug description:
-	When allocating a password policy structure (new_passwdPolicy)
-        it is initialized with the local policy definition or
-	the global one. If it exists a local policy entry, the TPR
-        attributes (passwordTPRMaxUse, passwordTPRDelayValidFrom and
-        passwordTPRDelayExpireAt) are not taken into account.
-
-Fix description:
-	Take into account TPR attributes to initialize the policy
-
-relates: https://github.com/389ds/389-ds-base/issues/4789
-
-Reviewed by: Simon Pichugin, William Brown
-
-Platforms tested: F34
----
- ldap/servers/slapd/pw.c | 12 ++++++++++++
- 1 file changed, 12 insertions(+)
-
-diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
-index 2a167c8f1..7680df41d 100644
---- a/ldap/servers/slapd/pw.c
-+++ b/ldap/servers/slapd/pw.c
-@@ -2356,6 +2356,18 @@ new_passwdPolicy(Slapi_PBlock *pb, const char *dn)
-                     if ((sval = attr_get_present_values(attr))) {
-                         pwdpolicy->pw_dict_path = (char *)slapi_value_get_string(*sval);
-                     }
-+                } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_MAXUSE)) {
-+                    if ((sval = attr_get_present_values(attr))) {
-+                        pwdpolicy->pw_tpr_maxuse = slapi_value_get_int(*sval);
-+                    }
-+                } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_DELAY_EXPIRE_AT)) {
-+                    if ((sval = attr_get_present_values(attr))) {
-+                        pwdpolicy->pw_tpr_delay_expire_at = slapi_value_get_int(*sval);
-+                    }
-+                } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_DELAY_VALID_FROM)) {
-+                    if ((sval = attr_get_present_values(attr))) {
-+                        pwdpolicy->pw_tpr_delay_valid_from = slapi_value_get_int(*sval);
-+                    }
-                 }
-             } /* end of for() loop */
-             if (pw_entry) {
--- 
-2.31.1
-
diff --git a/SOURCES/0019-Issue-4788-CLI-should-support-Temporary-Password-Rul.patch b/SOURCES/0019-Issue-4788-CLI-should-support-Temporary-Password-Rul.patch
deleted file mode 100644
index f9e4266..0000000
--- a/SOURCES/0019-Issue-4788-CLI-should-support-Temporary-Password-Rul.patch
+++ /dev/null
@@ -1,350 +0,0 @@
-From 6a741b3ef50babf2ac2479437a38829204ffd438 Mon Sep 17 00:00:00 2001
-From: tbordaz <tbordaz@redhat.com>
-Date: Thu, 17 Jun 2021 16:22:09 +0200
-Subject: [PATCH] Issue 4788 - CLI should support Temporary Password Rules
- attributes (#4793)
-
-Bug description:
-    Since #4725, password policy support temporary password rules.
-    CLI (dsconf) does not support this RFE and only direct ldap
-    operation can configure global/local password policy
-
-Fix description:
-    Update dsconf to support this new RFE.
-    To run successfully the testcase it relies on #4788
-
-relates: #4788
-
-Reviewed by: Simon Pichugin (thanks !!)
-
-Platforms tested: F34
----
- .../password/pwdPolicy_attribute_test.py      | 172 ++++++++++++++++--
- src/lib389/lib389/cli_conf/pwpolicy.py        |   5 +-
- src/lib389/lib389/pwpolicy.py                 |   5 +-
- 3 files changed, 165 insertions(+), 17 deletions(-)
-
-diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
-index aee3a91ad..085d0a373 100644
---- a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
-+++ b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
-@@ -34,7 +34,7 @@ log = logging.getLogger(__name__)
- 
- 
- @pytest.fixture(scope="module")
--def create_user(topology_st, request):
-+def test_user(topology_st, request):
-     """User for binding operation"""
-     topology_st.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on')
-     log.info('Adding test user {}')
-@@ -56,10 +56,11 @@ def create_user(topology_st, request):
-         topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
- 
-     request.addfinalizer(fin)
-+    return user
- 
- 
- @pytest.fixture(scope="module")
--def password_policy(topology_st, create_user):
-+def password_policy(topology_st, test_user):
-     """Set up password policy for subtree and user"""
- 
-     pwp = PwPolicyManager(topology_st.standalone)
-@@ -71,7 +72,7 @@ def password_policy(topology_st, create_user):
-     pwp.create_user_policy(TEST_USER_DN, policy_props)
- 
- @pytest.mark.skipif(ds_is_older('1.4.3.3'), reason="Not implemented")
--def test_pwd_reset(topology_st, create_user):
-+def test_pwd_reset(topology_st, test_user):
-     """Test new password policy attribute "pwdReset"
- 
-     :id: 03db357b-4800-411e-a36e-28a534293004
-@@ -124,7 +125,7 @@ def test_pwd_reset(topology_st, create_user):
-                          [('on', 'off', ldap.UNWILLING_TO_PERFORM),
-                           ('off', 'off', ldap.UNWILLING_TO_PERFORM),
-                           ('off', 'on', False), ('on', 'on', False)])
--def test_change_pwd(topology_st, create_user, password_policy,
-+def test_change_pwd(topology_st, test_user, password_policy,
-                     subtree_pwchange, user_pwchange, exception):
-     """Verify that 'passwordChange' attr works as expected
-     User should have a priority over a subtree.
-@@ -184,7 +185,7 @@ def test_change_pwd(topology_st, create_user, password_policy,
-         user.reset_password(TEST_USER_PWD)
- 
- 
--def test_pwd_min_age(topology_st, create_user, password_policy):
-+def test_pwd_min_age(topology_st, test_user, password_policy):
-     """If we set passwordMinAge to some value, for example to 10, then it
-     should not allow the user to change the password within 10 seconds after
-     his previous change.
-@@ -257,7 +258,7 @@ def test_pwd_min_age(topology_st, create_user, password_policy):
-         topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-         user.reset_password(TEST_USER_PWD)
- 
--def test_global_tpr_maxuse_1(topology_st, create_user, request):
-+def test_global_tpr_maxuse_1(topology_st, test_user, request):
-     """Test global TPR policy : passwordTPRMaxUse
-     Test that after passwordTPRMaxUse failures to bind
-     additional bind with valid password are failing with CONSTRAINT_VIOLATION
-@@ -374,7 +375,7 @@ def test_global_tpr_maxuse_1(topology_st, create_user, request):
- 
-     request.addfinalizer(fin)
- 
--def test_global_tpr_maxuse_2(topology_st, create_user, request):
-+def test_global_tpr_maxuse_2(topology_st, test_user, request):
-     """Test global TPR policy : passwordTPRMaxUse
-     Test that after less than passwordTPRMaxUse failures to bind
-     additional bind with valid password are successfull
-@@ -474,7 +475,7 @@ def test_global_tpr_maxuse_2(topology_st, create_user, request):
- 
-     request.addfinalizer(fin)
- 
--def test_global_tpr_maxuse_3(topology_st, create_user, request):
-+def test_global_tpr_maxuse_3(topology_st, test_user, request):
-     """Test global TPR policy : passwordTPRMaxUse
-     Test that after less than passwordTPRMaxUse failures to bind
-     A bind with valid password is successfull but passwordMustChange
-@@ -587,7 +588,7 @@ def test_global_tpr_maxuse_3(topology_st, create_user, request):
- 
-     request.addfinalizer(fin)
- 
--def test_global_tpr_maxuse_4(topology_st, create_user, request):
-+def test_global_tpr_maxuse_4(topology_st, test_user, request):
-     """Test global TPR policy : passwordTPRMaxUse
-     Test that a TPR attribute passwordTPRMaxUse
-     can be updated by DM but not the by user itself
-@@ -701,7 +702,148 @@ def test_global_tpr_maxuse_4(topology_st, create_user, request):
- 
-     request.addfinalizer(fin)
- 
--def test_global_tpr_delayValidFrom_1(topology_st, create_user, request):
-+def test_local_tpr_maxuse_5(topology_st, test_user, request):
-+    """Test TPR local policy overpass global one: passwordTPRMaxUse
-+    Test that after passwordTPRMaxUse failures to bind
-+    additional bind with valid password are failing with CONSTRAINT_VIOLATION
-+
-+    :id: c3919707-d804-445a-8754-8385b1072c42
-+    :customerscenario: False
-+    :setup: Standalone instance
-+    :steps:
-+        1. Global password policy Enable passwordMustChange
-+        2. Global password policy Set passwordTPRMaxUse=5
-+        3. Global password policy Set passwordMaxFailure to a higher value to not disturb the test
-+        4. Local password policy Enable passwordMustChange
-+        5. Local password policy Set passwordTPRMaxUse=10 (higher than global)
-+        6. Bind with a wrong password 10 times and check INVALID_CREDENTIALS
-+        7. Check that passwordTPRUseCount got to the limit (5)
-+        8. Bind with a wrong password (CONSTRAINT_VIOLATION)
-+           and check passwordTPRUseCount overpass the limit by 1 (11)
-+        9. Bind with a valid password 10 times and check CONSTRAINT_VIOLATION
-+           and check passwordTPRUseCount increases
-+        10. Reset password policy configuration and remove local password from user
-+    :expected results:
-+        1. Success
-+        2. Success
-+        3. Success
-+        4. Success
-+        5. Success
-+        6. Success
-+        7. Success
-+        8. Success
-+        9. Success
-+        10. Success
-+    """
-+
-+    global_tpr_maxuse = 5
-+    # Set global password policy config, passwordMaxFailure being higher than
-+    # passwordTPRMaxUse so that TPR is enforced first
-+    topology_st.standalone.config.replace('passwordMustChange', 'on')
-+    topology_st.standalone.config.replace('passwordMaxFailure', str(global_tpr_maxuse + 20))
-+    topology_st.standalone.config.replace('passwordTPRMaxUse', str(global_tpr_maxuse))
-+    time.sleep(.5)
-+
-+    local_tpr_maxuse = global_tpr_maxuse + 5
-+    # Reset user's password with a local password policy
-+    # that has passwordTPRMaxUse higher than global
-+    #our_user = UserAccount(topology_st.standalone, TEST_USER_DN)
-+    subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(),
-+                     'slapd-standalone1',
-+                     'localpwp',
-+                     'adduser',
-+                     test_user.dn])
-+    subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(),
-+                     'slapd-standalone1',
-+                     'localpwp',
-+                     'set',
-+                     '--pwptprmaxuse',
-+                     str(local_tpr_maxuse),
-+                     '--pwdmustchange',
-+                     'on',
-+                     test_user.dn])
-+    test_user.replace('userpassword', PASSWORD)
-+    time.sleep(.5)
-+
-+    # look up to passwordTPRMaxUse with failing
-+    # bind to check that the limits of TPR are enforced
-+    for i in range(local_tpr_maxuse):
-+        # Bind as user with a wrong password
-+        with pytest.raises(ldap.INVALID_CREDENTIALS):
-+            test_user.rebind('wrong password')
-+        time.sleep(.5)
-+
-+        # Check that pwdReset is TRUE
-+        topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-+        #assert test_user.get_attr_val_utf8('pwdReset') == 'TRUE'
-+
-+        # Check that pwdTPRReset is TRUE
-+        assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
-+        assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(i+1)
-+        log.info("%dth failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (i+1, i+1))
-+
-+
-+    # Now the #failures reached passwordTPRMaxUse
-+    # Check that pwdReset is TRUE
-+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-+
-+    # Check that pwdTPRReset is TRUE
-+    assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
-+    assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse)
-+    log.info("last failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (local_tpr_maxuse))
-+
-+    # Bind as user with wrong password --> ldap.CONSTRAINT_VIOLATION
-+    with pytest.raises(ldap.CONSTRAINT_VIOLATION):
-+        test_user.rebind("wrong password")
-+    time.sleep(.5)
-+
-+    # Check that pwdReset is TRUE
-+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-+
-+    # Check that pwdTPRReset is TRUE
-+    assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
-+    assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse + 1)
-+    log.info("failing bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (local_tpr_maxuse + i))
-+
-+    # Now check that all next attempts with correct password are all in LDAP_CONSTRAINT_VIOLATION
-+    # and passwordTPRRetryCount remains unchanged
-+    # account is now similar to locked
-+    for i in range(10):
-+        # Bind as user with valid password
-+        with pytest.raises(ldap.CONSTRAINT_VIOLATION):
-+            test_user.rebind(PASSWORD)
-+        time.sleep(.5)
-+
-+        # Check that pwdReset is TRUE
-+        topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-+
-+        # Check that pwdTPRReset is TRUE
-+        # pwdTPRUseCount keeps increasing
-+        assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
-+        assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse + i + 2)
-+        log.info("Rejected bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (local_tpr_maxuse + i + 2))
-+
-+
-+    def fin():
-+        topology_st.standalone.restart()
-+        # Reset password policy config
-+        topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-+        topology_st.standalone.config.replace('passwordMustChange', 'off')
-+
-+        # Remove local password policy from that entry
-+        subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(),
-+                        'slapd-standalone1',
-+                        'localpwp',
-+                        'remove',
-+                        test_user.dn])
-+
-+        # Reset user's password
-+        test_user.replace('userpassword', TEST_USER_PWD)
-+
-+
-+    request.addfinalizer(fin)
-+
-+def test_global_tpr_delayValidFrom_1(topology_st, test_user, request):
-     """Test global TPR policy : passwordTPRDelayValidFrom
-     Test that a TPR password is not valid before reset time +
-     passwordTPRDelayValidFrom
-@@ -766,7 +908,7 @@ def test_global_tpr_delayValidFrom_1(topology_st, create_user, request):
- 
-     request.addfinalizer(fin)
- 
--def test_global_tpr_delayValidFrom_2(topology_st, create_user, request):
-+def test_global_tpr_delayValidFrom_2(topology_st, test_user, request):
-     """Test global TPR policy : passwordTPRDelayValidFrom
-     Test that a TPR password is valid after reset time +
-     passwordTPRDelayValidFrom
-@@ -838,7 +980,7 @@ def test_global_tpr_delayValidFrom_2(topology_st, create_user, request):
- 
-     request.addfinalizer(fin)
- 
--def test_global_tpr_delayValidFrom_3(topology_st, create_user, request):
-+def test_global_tpr_delayValidFrom_3(topology_st, test_user, request):
-     """Test global TPR policy : passwordTPRDelayValidFrom
-     Test that a TPR attribute passwordTPRDelayValidFrom
-     can be updated by DM but not the by user itself
-@@ -940,7 +1082,7 @@ def test_global_tpr_delayValidFrom_3(topology_st, create_user, request):
- 
-     request.addfinalizer(fin)
- 
--def test_global_tpr_delayExpireAt_1(topology_st, create_user, request):
-+def test_global_tpr_delayExpireAt_1(topology_st, test_user, request):
-     """Test global TPR policy : passwordTPRDelayExpireAt
-     Test that a TPR password is not valid after reset time +
-     passwordTPRDelayExpireAt
-@@ -1010,7 +1152,7 @@ def test_global_tpr_delayExpireAt_1(topology_st, create_user, request):
- 
-     request.addfinalizer(fin)
- 
--def test_global_tpr_delayExpireAt_2(topology_st, create_user, request):
-+def test_global_tpr_delayExpireAt_2(topology_st, test_user, request):
-     """Test global TPR policy : passwordTPRDelayExpireAt
-     Test that a TPR password is valid before reset time +
-     passwordTPRDelayExpireAt
-@@ -1082,7 +1224,7 @@ def test_global_tpr_delayExpireAt_2(topology_st, create_user, request):
- 
-     request.addfinalizer(fin)
- 
--def test_global_tpr_delayExpireAt_3(topology_st, create_user, request):
-+def test_global_tpr_delayExpireAt_3(topology_st, test_user, request):
-     """Test global TPR policy : passwordTPRDelayExpireAt
-     Test that a TPR attribute passwordTPRDelayExpireAt
-     can be updated by DM but not the by user itself
-diff --git a/src/lib389/lib389/cli_conf/pwpolicy.py b/src/lib389/lib389/cli_conf/pwpolicy.py
-index 2838afcb8..26af6e7ec 100644
---- a/src/lib389/lib389/cli_conf/pwpolicy.py
-+++ b/src/lib389/lib389/cli_conf/pwpolicy.py
-@@ -255,6 +255,9 @@ def create_parser(subparsers):
-     set_parser.add_argument('--pwpinheritglobal', help="Set to \"on\" to allow local policies to inherit the global policy")
-     set_parser.add_argument('--pwddictcheck', help="Set to \"on\" to enforce CrackLib dictionary checking")
-     set_parser.add_argument('--pwddictpath', help="Filesystem path to specific/custom CrackLib dictionary files")
-+    set_parser.add_argument('--pwptprmaxuse', help="Number of times a reset password can be used for authentication")
-+    set_parser.add_argument('--pwptprdelayexpireat', help="Number of seconds after which a reset password expires")
-+    set_parser.add_argument('--pwptprdelayvalidfrom', help="Number of seconds to wait before using a reset password to authenticated")
-     # delete local password policy
-     del_parser = local_subcommands.add_parser('remove', help='Remove a local password policy')
-     del_parser.set_defaults(func=del_local_policy)
-@@ -291,4 +294,4 @@ def create_parser(subparsers):
-     #############################################
-     set_parser.add_argument('DN', nargs=1, help='Set the local policy for this entry DN')
-     add_subtree_parser.add_argument('DN', nargs=1, help='Add/replace the subtree policy for this entry DN')
--    add_user_parser.add_argument('DN', nargs=1, help='Add/replace the local password policy for this entry DN')
-\ No newline at end of file
-+    add_user_parser.add_argument('DN', nargs=1, help='Add/replace the local password policy for this entry DN')
-diff --git a/src/lib389/lib389/pwpolicy.py b/src/lib389/lib389/pwpolicy.py
-index 8653cb195..d2427933b 100644
---- a/src/lib389/lib389/pwpolicy.py
-+++ b/src/lib389/lib389/pwpolicy.py
-@@ -65,7 +65,10 @@ class PwPolicyManager(object):
-             'pwddictcheck': 'passworddictcheck',
-             'pwddictpath': 'passworddictpath',
-             'pwdallowhash': 'nsslapd-allow-hashed-passwords',
--            'pwpinheritglobal': 'nsslapd-pwpolicy-inherit-global'
-+            'pwpinheritglobal': 'nsslapd-pwpolicy-inherit-global',
-+            'pwptprmaxuse': 'passwordTPRMaxUse',
-+            'pwptprdelayexpireat': 'passwordTPRDelayExpireAt',
-+            'pwptprdelayvalidfrom': 'passwordTPRDelayValidFrom'
-         }
- 
-     def is_subtree_policy(self, dn):
--- 
-2.31.1
-
diff --git a/SOURCES/0020-Issue-4447-Crash-when-the-Referential-Integrity-log-.patch b/SOURCES/0020-Issue-4447-Crash-when-the-Referential-Integrity-log-.patch
deleted file mode 100644
index 193d44b..0000000
--- a/SOURCES/0020-Issue-4447-Crash-when-the-Referential-Integrity-log-.patch
+++ /dev/null
@@ -1,179 +0,0 @@
-From 7b7217538908ae58df864ef5cd82e1d3303c189f Mon Sep 17 00:00:00 2001
-From: Mark Reynolds <mreynolds@redhat.com>
-Date: Mon, 7 Jun 2021 12:58:42 -0400
-Subject: [PATCH] Issue 4447 - Crash when the Referential Integrity log is
- manually edited
-
-Bug Description:  If the referint log is manually edited with a string
-                  that is not a DN the server will crash when processing
-                  the log.
-
-Fix Description:  Check for NULL pointers when strtoking the file line.
-
-relates: https://github.com/389ds/389-ds-base/issues/4447
-
-Reviewed by: firstyear(Thanks!)
----
- .../tests/suites/plugins/referint_test.py     | 72 +++++++++++++++----
- ldap/servers/plugins/referint/referint.c      |  7 ++
- src/lib389/lib389/plugins.py                  | 15 ++++
- 3 files changed, 80 insertions(+), 14 deletions(-)
-
-diff --git a/dirsrvtests/tests/suites/plugins/referint_test.py b/dirsrvtests/tests/suites/plugins/referint_test.py
-index 02b985767..fda602545 100644
---- a/dirsrvtests/tests/suites/plugins/referint_test.py
-+++ b/dirsrvtests/tests/suites/plugins/referint_test.py
-@@ -1,5 +1,5 @@
- # --- BEGIN COPYRIGHT BLOCK ---
--# Copyright (C) 2016 Red Hat, Inc.
-+# Copyright (C) 2021 Red Hat, Inc.
- # All rights reserved.
- #
- # License: GPL (version 3 or any later version).
-@@ -12,13 +12,11 @@ Created on Dec 12, 2019
- @author: tbordaz
- '''
- import logging
--import subprocess
- import pytest
- from lib389 import Entry
--from lib389.utils import *
--from lib389.plugins import *
--from lib389._constants import *
--from lib389.idm.user import UserAccounts, UserAccount
-+from lib389.plugins import ReferentialIntegrityPlugin
-+from lib389._constants import DEFAULT_SUFFIX
-+from lib389.idm.user import UserAccounts
- from lib389.idm.group import Groups
- from lib389.topologies import topology_st as topo
- 
-@@ -29,21 +27,27 @@ log = logging.getLogger(__name__)
- ESCAPED_RDN_BASE = "foo\\,oo"
- def _user_get_dn(no):
-     uid = '%s%d' % (ESCAPED_RDN_BASE, no)
--    dn = 'uid=%s,%s' % (uid, SUFFIX)
-+    dn = 'uid=%s,%s' % (uid, DEFAULT_SUFFIX)
-     return (uid, dn)
- 
- def add_escaped_user(server, no):
-     (uid, dn) = _user_get_dn(no)
-     log.fatal('Adding user (%s): ' % dn)
--    server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson'],
--                             'uid': [uid],
--                             'sn' : [uid],
--                             'cn' : [uid]})))
-+    users = UserAccounts(server, DEFAULT_SUFFIX, None)
-+    user_properties = {
-+        'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson', 'posixAccount'],
-+        'uid': uid,
-+        'cn' : uid,
-+        'sn' : uid,
-+        'uidNumber' : '1000',
-+        'gidNumber' : '2000',
-+        'homeDirectory' : '/home/testuser',
-+    }
-+    users.create(properties=user_properties)
-     return dn
- 
--@pytest.mark.ds50020
- def test_referential_false_failure(topo):
--    """On MODRDN referential integrity can erronously fail
-+    """On MODRDN referential integrity can erroneously fail
- 
-     :id: f77aeb80-c4c4-471b-8c1b-4733b714778b
-     :setup: Standalone Instance
-@@ -100,6 +104,46 @@ def test_referential_false_failure(topo):
-     inst.restart()
- 
-     # Here if the bug is fixed, referential is able to update the member value
--    inst.rename_s(user1.dn, 'uid=new_test_user_1001', newsuperior=SUFFIX, delold=0)
-+    user1.rename('uid=new_test_user_1001', newsuperior=DEFAULT_SUFFIX, deloldrdn=False)
- 
- 
-+def test_invalid_referint_log(topo):
-+    """If there is an invalid log line in the referint log, make sure the server
-+    does not crash at startup
-+
-+    :id: 34807b5a-ab17-4281-ae48-4e3513e19145
-+    :setup: Standalone Instance
-+    :steps:
-+        1. Set the referint log delay
-+        2. Create invalid log
-+        3. Start the server (no crash)
-+    :expectedresults:
-+        1. Success
-+        2. Success
-+        3. Success
-+    """
-+
-+    inst = topo.standalone
-+
-+    # Set delay - required for log parsing at server startup
-+    plugin = ReferentialIntegrityPlugin(inst)
-+    plugin.enable()
-+    plugin.set_update_delay('2')
-+    logfile = plugin.get_log_file()
-+    inst.restart()
-+
-+    # Create invalid log
-+    inst.stop()
-+    with open(logfile, 'w') as log_fh:
-+        log_fh.write("CRASH\n")
-+
-+    # Start the instance
-+    inst.start()
-+    assert inst.status()
-+
-+
-+if __name__ == '__main__':
-+    # Run isolated
-+    # -s for DEBUG mode
-+    CURRENT_FILE = os.path.realpath(__file__)
-+    pytest.main("-s %s" % CURRENT_FILE)
-diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
-index fd5356d72..28240c1f6 100644
---- a/ldap/servers/plugins/referint/referint.c
-+++ b/ldap/servers/plugins/referint/referint.c
-@@ -1447,6 +1447,13 @@ referint_thread_func(void *arg __attribute__((unused)))
-             sdn = slapi_sdn_new_normdn_byref(ptoken);
-             ptoken = ldap_utf8strtok_r(NULL, delimiter, &iter);
- 
-+            if (ptoken == NULL) {
-+                /* Invalid line in referint log, skip it */
-+                slapi_log_err(SLAPI_LOG_ERR, REFERINT_PLUGIN_SUBSYSTEM,
-+                        "Skipping invalid referint log line: (%s)\n", thisline);
-+                slapi_sdn_free(&sdn);
-+                continue;
-+            }
-             if (!strcasecmp(ptoken, "NULL")) {
-                 tmprdn = NULL;
-             } else {
-diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
-index 2d88e60bd..b07e80022 100644
---- a/src/lib389/lib389/plugins.py
-+++ b/src/lib389/lib389/plugins.py
-@@ -518,6 +518,21 @@ class ReferentialIntegrityPlugin(Plugin):
- 
-         self.set('referint-update-delay', str(value))
- 
-+    def get_log_file(self):
-+        """Get referint log file"""
-+
-+        return self.get_attr_val_utf8('referint-logfile')
-+
-+    def get_log_file_formatted(self):
-+        """Get referint log file"""
-+
-+        return self.display_attr('referint-logfile')
-+
-+    def set_log_file(self, value):
-+        """Set referint log file"""
-+
-+        self.set('referint-logfile', value)
-+
-     def get_membership_attr(self, formatted=False):
-         """Get referint-membership-attr attribute"""
- 
--- 
-2.31.1
-
diff --git a/SOURCES/0021-Issue-4791-Missing-dependency-for-RetroCL-RFE.patch b/SOURCES/0021-Issue-4791-Missing-dependency-for-RetroCL-RFE.patch
deleted file mode 100644
index 4810288..0000000
--- a/SOURCES/0021-Issue-4791-Missing-dependency-for-RetroCL-RFE.patch
+++ /dev/null
@@ -1,114 +0,0 @@
-From 964a153b420b26140e0bbddfbebb4a51aaa0e4ea Mon Sep 17 00:00:00 2001
-From: James Chapman <jachapma@redhat.com>
-Date: Thu, 3 Jun 2021 15:16:22 +0000
-Subject: [PATCH 1/7] Issue 4791 - Missing dependency for RetroCL RFE
-
-Description: The RetroCL exclude attribute RFE is dependent on functionality of the
-	     EntryUUID bug fix, that didn't make into the latest build. This breaks the
-             RetroCL exclude attr feature so we need to provide a workaround.
-
-Fixes: https://github.com/389ds/389-ds-base/issues/4791
-
-Relates: https://github.com/389ds/389-ds-base/pull/4723
-
-Relates: https://github.com/389ds/389-ds-base/issues/4224
-
-Reviewed by: tbordaz, droideck (Thank you)
----
- .../tests/suites/retrocl/basic_test.py        |  6 ++--
- .../lib389/cli_conf/plugins/retrochangelog.py | 35 +++++++++++++++++--
- 2 files changed, 36 insertions(+), 5 deletions(-)
-
-diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py
-index 112c73cb9..f3bc50f29 100644
---- a/dirsrvtests/tests/suites/retrocl/basic_test.py
-+++ b/dirsrvtests/tests/suites/retrocl/basic_test.py
-@@ -17,7 +17,7 @@ from lib389.utils import *
- from lib389.tasks import *
- from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance
- from lib389.cli_base.dsrc import dsrc_arg_concat
--from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add
-+from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add_attr
- from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts
- 
- pytestmark = pytest.mark.tier1
-@@ -122,7 +122,7 @@ def test_retrocl_exclude_attr_add(topology_st):
-     args.bindpw = None
-     args.prompt = False
-     args.exclude_attrs = ATTR_HOMEPHONE
--    args.func = retrochangelog_add
-+    args.func = retrochangelog_add_attr
-     dsrc_inst = dsrc_arg_concat(args, None)
-     inst = connect_instance(dsrc_inst, False, args)
-     result = args.func(inst, None, log, args)
-@@ -255,7 +255,7 @@ def test_retrocl_exclude_attr_mod(topology_st):
-     args.bindpw = None
-     args.prompt = False
-     args.exclude_attrs = ATTR_CARLICENSE
--    args.func = retrochangelog_add
-+    args.func = retrochangelog_add_attr
-     dsrc_inst = dsrc_arg_concat(args, None)
-     inst = connect_instance(dsrc_inst, False, args)
-     result = args.func(inst, None, log, args)
-diff --git a/src/lib389/lib389/cli_conf/plugins/retrochangelog.py b/src/lib389/lib389/cli_conf/plugins/retrochangelog.py
-index 9940c6532..160fbb82d 100644
---- a/src/lib389/lib389/cli_conf/plugins/retrochangelog.py
-+++ b/src/lib389/lib389/cli_conf/plugins/retrochangelog.py
-@@ -6,8 +6,13 @@
- # See LICENSE for details.
- # --- END COPYRIGHT BLOCK ---
- 
-+# JC Work around for missing dependency on https://github.com/389ds/389-ds-base/pull/4344
-+import ldap
-+
- from lib389.plugins import RetroChangelogPlugin
--from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit
-+# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344
-+# from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add_attr
-+from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, _args_to_attrs
- 
- arg_to_attr = {
-     'is_replicated': 'isReplicated',
-@@ -18,12 +23,38 @@ arg_to_attr = {
-     'exclude_attrs': 'nsslapd-exclude-attrs'
- }
- 
--
- def retrochangelog_edit(inst, basedn, log, args):
-     log = log.getChild('retrochangelog_edit')
-     plugin = RetroChangelogPlugin(inst)
-     generic_object_edit(plugin, log, args, arg_to_attr)
- 
-+# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344
-+def retrochangelog_add_attr(inst, basedn, log, args):
-+    log = log.getChild('retrochangelog_add_attr')
-+    plugin = RetroChangelogPlugin(inst)
-+    generic_object_add_attr(plugin, log, args, arg_to_attr)
-+
-+# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344
-+def generic_object_add_attr(dsldap_object, log, args, arg_to_attr):
-+    """Add an attribute to the entry. This differs to 'edit' as edit uses replace,
-+    and this allows multivalues to be added.
-+
-+    dsldap_object should be a single instance of DSLdapObject with a set dn
-+    """
-+    log = log.getChild('generic_object_add_attr')
-+    # Gather the attributes
-+    attrs = _args_to_attrs(args, arg_to_attr)
-+
-+    modlist = []
-+    for attr, value in attrs.items():
-+        if not isinstance(value, list):
-+            value = [value]
-+        modlist.append((ldap.MOD_ADD, attr, value))
-+    if len(modlist) > 0:
-+        dsldap_object.apply_mods(modlist)
-+        log.info("Successfully changed the %s", dsldap_object.dn)
-+    else:
-+        raise ValueError("There is nothing to set in the %s plugin entry" % dsldap_object.dn)
- 
- def _add_parser_args(parser):
-     parser.add_argument('--is-replicated', choices=['TRUE', 'FALSE'], type=str.upper,
--- 
-2.31.1
-
diff --git a/SOURCES/0022-Issue-4656-remove-problematic-language-from-ds-replc.patch b/SOURCES/0022-Issue-4656-remove-problematic-language-from-ds-replc.patch
deleted file mode 100644
index 82d6945..0000000
--- a/SOURCES/0022-Issue-4656-remove-problematic-language-from-ds-replc.patch
+++ /dev/null
@@ -1,642 +0,0 @@
-From d2ac7e98d53cfe6c74c99ddf3504b1072418f05a Mon Sep 17 00:00:00 2001
-From: Mark Reynolds <mreynolds@redhat.com>
-Date: Thu, 11 Mar 2021 10:12:46 -0500
-Subject: [PATCH] Issue 4656 - remove problematic language from ds-replcheck
-
-Description: remove master from ds-replcheck and replace it with supplier
-
-relates: https://github.com/389ds/389-ds-base/issues/4656
-
-Reviewed by: mreynolds
-
-e with '#' will be ignored, and an empty message aborts the commit.
----
- ldap/admin/src/scripts/ds-replcheck | 202 ++++++++++++++--------------
- 1 file changed, 101 insertions(+), 101 deletions(-)
-
-diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck
-index 169496e8f..f411f357a 100755
---- a/ldap/admin/src/scripts/ds-replcheck
-+++ b/ldap/admin/src/scripts/ds-replcheck
-@@ -1,7 +1,7 @@
- #!/usr/bin/python3
- 
- # --- BEGIN COPYRIGHT BLOCK ---
--# Copyright (C) 2020 Red Hat, Inc.
-+# Copyright (C) 2021 Red Hat, Inc.
- # All rights reserved.
- #
- # License: GPL (version 3 or any later version).
-@@ -63,7 +63,7 @@ def remove_entry(rentries, dn):
- def get_ruv_time(ruv, rid):
-     """Take a RUV element (nsds50ruv attribute) and extract the timestamp from maxcsn
-     :param ruv - A lsit of RUV elements
--    :param rid - The rid of the master to extractthe maxcsn time from
-+    :param rid - The rid of the supplier to extract the maxcsn time from
-     :return: The time in seconds of the maxcsn, or 0 if there is no maxcsn, or -1 if
-              the rid was not found
-     """
-@@ -213,22 +213,22 @@ def get_ruv_state(opts):
-     :param opts - all the script options
-     :return - A text description of the replicaton state
-     """
--    mtime = get_ruv_time(opts['master_ruv'], opts['rid'])
-+    mtime = get_ruv_time(opts['supplier_ruv'], opts['rid'])
-     rtime = get_ruv_time(opts['replica_ruv'], opts['rid'])
-     if mtime == -1:
--        repl_state = "Replication State: Replica ID ({}) not found in Master's RUV".format(opts['rid'])
-+        repl_state = "Replication State: Replica ID ({}) not found in Supplier's RUV".format(opts['rid'])
-     elif rtime == -1:
-         repl_state = "Replication State: Replica ID ({}) not found in Replica's RUV (not initialized?)".format(opts['rid'])
-     elif mtime == 0:
--        repl_state = "Replication State: Master has not seen any updates"
-+        repl_state = "Replication State: Supplier has not seen any updates"
-     elif rtime == 0:
--        repl_state = "Replication State: Replica has not seen any changes from the Master"
-+        repl_state = "Replication State: Replica has not seen any changes from the Supplier"
-     elif mtime > rtime:
--        repl_state = "Replication State: Replica is behind Master by: {} seconds".format(mtime - rtime)
-+        repl_state = "Replication State: Replica is behind Supplier by: {} seconds".format(mtime - rtime)
-     elif mtime < rtime:
--        repl_state = "Replication State: Replica is ahead of Master by: {} seconds".format(rtime - mtime)
-+        repl_state = "Replication State: Replica is ahead of Supplier by: {} seconds".format(rtime - mtime)
-     else:
--        repl_state = "Replication State: Master and Replica are in perfect synchronization"
-+        repl_state = "Replication State: Supplier and Replica are in perfect synchronization"
- 
-     return repl_state
- 
-@@ -238,11 +238,11 @@ def get_ruv_report(opts):
-     :param opts - all the script options
-     :return - A text blob to display in the report
-     """
--    opts['master_ruv'].sort()
-+    opts['supplier_ruv'].sort()
-     opts['replica_ruv'].sort()
- 
--    report = "Master RUV:\n"
--    for element in opts['master_ruv']:
-+    report = "Supplier RUV:\n"
-+    for element in opts['supplier_ruv']:
-         report += "  %s\n" % (element)
-     report += "\nReplica RUV:\n"
-     for element in opts['replica_ruv']:
-@@ -521,7 +521,7 @@ def get_ldif_ruv(LDIF, opts):
- 
- def cmp_entry(mentry, rentry, opts):
-     """Compare the two entries, and return a "diff map"
--    :param mentry - A Master entry
-+    :param mentry - A Supplier entry
-     :param rentry - A Replica entry
-     :param opts - A Dict of the scripts options
-     :return - A Dict of the differences in the entry, or None
-@@ -536,7 +536,7 @@ def cmp_entry(mentry, rentry, opts):
-     mlist = list(mentry.data.keys())
- 
-     #
--    # Check master
-+    # Check Supplier
-     #
-     for mattr in mlist:
-         if mattr in opts['ignore']:
-@@ -555,7 +555,7 @@ def cmp_entry(mentry, rentry, opts):
-                             if not found:
-                                 diff['missing'].append("")
-                             found = True
--                            diff['missing'].append(" - Master's State Info: %s" % (val))
-+                            diff['missing'].append(" - Supplier's State Info: %s" % (val))
-                             diff['missing'].append(" - Date: %s\n" % (time.ctime(extract_time(val))))
-                 else:
-                     # No state info, just move on
-@@ -566,18 +566,18 @@ def cmp_entry(mentry, rentry, opts):
-             if report_conflict(rentry, mattr, opts) and report_conflict(mentry, mattr, opts):
-                 diff['diff'].append(" - Attribute '%s' is different:" % mattr)
-                 if 'nscpentrywsi' in mentry.data:
--                    # Process Master
-+                    # Process Supplier
-                     found = False
-                     for val in mentry.data['nscpentrywsi']:
-                         if val.lower().startswith(mattr + ';'):
-                             if not found:
--                                diff['diff'].append("      Master:")
-+                                diff['diff'].append("      Supplier:")
-                             diff['diff'].append("        - Value:      %s" % (val.split(':')[1].lstrip()))
-                             diff['diff'].append("        - State Info: %s" % (val))
-                             diff['diff'].append("        - Date:       %s\n" % (time.ctime(extract_time(val))))
-                             found = True
-                     if not found:
--                        diff['diff'].append("      Master: ")
-+                        diff['diff'].append("      Supplier: ")
-                         for val in mentry.data[mattr]:
-                             # This is an "origin" value which means it's never been
-                             # updated since replication was set up.  So its the
-@@ -605,7 +605,7 @@ def cmp_entry(mentry, rentry, opts):
-                         diff['diff'].append("")
-                 else:
-                     # no state info, report what we got
--                    diff['diff'].append("      Master: ")
-+                    diff['diff'].append("      Supplier: ")
-                     for val in mentry.data[mattr]:
-                         diff['diff'].append("        - %s: %s" % (mattr, val))
-                     diff['diff'].append("      Replica: ")
-@@ -622,9 +622,9 @@ def cmp_entry(mentry, rentry, opts):
-             continue
- 
-         if rattr not in mlist:
--            # Master is missing the attribute
-+            # Supplier is missing the attribute
-             if report_conflict(rentry, rattr, opts):
--                diff['missing'].append(" - Master missing attribute: \"%s\"" % (rattr))
-+                diff['missing'].append(" - Supplier missing attribute: \"%s\"" % (rattr))
-                 diff_count += 1
-                 if 'nscpentrywsi' in rentry.data:
-                     found = False
-@@ -663,7 +663,7 @@ def do_offline_report(opts, output_file=None):
-     try:
-         MLDIF = open(opts['mldif'], "r")
-     except Exception as e:
--        print('Failed to open Master LDIF: ' + str(e))
-+        print('Failed to open Supplier LDIF: ' + str(e))
-         return
- 
-     try:
-@@ -676,10 +676,10 @@ def do_offline_report(opts, output_file=None):
-     # Verify LDIF Files
-     try:
-         if opts['verbose']:
--            print("Validating Master ldif file ({})...".format(opts['mldif']))
-+            print("Validating Supplier ldif file ({})...".format(opts['mldif']))
-         LDIFRecordList(MLDIF).parse()
-     except ValueError:
--        print('Master LDIF file in invalid, aborting...')
-+        print('Supplier LDIF file in invalid, aborting...')
-         MLDIF.close()
-         RLDIF.close()
-         return
-@@ -696,34 +696,34 @@ def do_offline_report(opts, output_file=None):
-     # Get all the dn's, and entry counts
-     if opts['verbose']:
-         print ("Gathering all the DN's...")
--    master_dns = get_dns(MLDIF, opts['mldif'], opts)
-+    supplier_dns = get_dns(MLDIF, opts['mldif'], opts)
-     replica_dns = get_dns(RLDIF, opts['rldif'], opts)
--    if master_dns is None or replica_dns is None:
-+    if supplier_dns is None or replica_dns is None:
-         print("Aborting scan...")
-         MLDIF.close()
-         RLDIF.close()
-         sys.exit(1)
--    m_count = len(master_dns)
-+    m_count = len(supplier_dns)
-     r_count = len(replica_dns)
- 
-     # Get DB RUV
-     if opts['verbose']:
-         print ("Gathering the database RUV's...")
--    opts['master_ruv'] = get_ldif_ruv(MLDIF, opts)
-+    opts['supplier_ruv'] = get_ldif_ruv(MLDIF, opts)
-     opts['replica_ruv'] = get_ldif_ruv(RLDIF, opts)
- 
--    """ Compare the master entries with the replica's.  Take our list of dn's from
--    the master ldif and get that entry( dn) from the master and replica ldif.  In
-+    """ Compare the Supplier entries with the replica's.  Take our list of dn's from
-+    the Supplier ldif and get that entry( dn) from the Supplier and replica ldif.  In
-     this phase we keep keep track of conflict/tombstone counts, and we check for
-     missing entries and entry differences.   We only need to do the entry diff
-     checking in this phase - we do not need to do it when process the replica dn's
-     because if the entry exists in both LDIF's then we already checked or diffs
--    while processing the master dn's.
-+    while processing the Supplier dn's.
-     """
-     if opts['verbose']:
--        print ("Comparing Master to Replica...")
-+        print ("Comparing Supplier to Replica...")
-     missing = False
--    for dn in master_dns:
-+    for dn in supplier_dns:
-         mresult = ldif_search(MLDIF, dn)
-         if mresult['entry'] is None and mresult['conflict'] is None and not mresult['tombstone']:
-             # Try from the beginning
-@@ -736,7 +736,7 @@ def do_offline_report(opts, output_file=None):
-                 rresult['conflict'] is not None or rresult['tombstone']):
-                 """ We can safely remove this DN from the replica dn list as it
-                 does not need to be checked again.  This also speeds things up
--                when doing the replica vs master phase.
-+                when doing the replica vs Supplier phase.
-                 """
-                 replica_dns.remove(dn)
- 
-@@ -766,7 +766,7 @@ def do_offline_report(opts, output_file=None):
-                         missing_report += ('  Entries missing on Replica:\n')
-                         missing = True
-                     if mresult['entry'] and 'createtimestamp' in mresult['entry'].data:
--                        missing_report += ('   - %s  (Created on Master at: %s)\n' %
-+                        missing_report += ('   - %s  (Created on Supplier at: %s)\n' %
-                                            (dn, convert_timestamp(mresult['entry'].data['createtimestamp'][0])))
-                     else:
-                         missing_report += ('  - %s\n' % dn)
-@@ -791,7 +791,7 @@ def do_offline_report(opts, output_file=None):
-     remaining conflict & tombstone entries as well.
-     """
-     if opts['verbose']:
--        print ("Comparing Replica to Master...")
-+        print ("Comparing Replica to Supplier...")
-     MLDIF.seek(0)
-     RLDIF.seek(0)
-     missing = False
-@@ -811,7 +811,7 @@ def do_offline_report(opts, output_file=None):
-             if mresult['entry'] is None and mresult['glue'] is None:
-                 MLDIF.seek(rresult['idx'])  # Set the LDIF cursor/index to the last good line
-                 if not missing:
--                    missing_report += ('  Entries missing on Master:\n')
-+                    missing_report += ('  Entries missing on Supplier:\n')
-                     missing = True
-                 if rresult['entry'] and 'createtimestamp' in rresult['entry'].data:
-                     missing_report += ('   - %s  (Created on Replica at: %s)\n' %
-@@ -837,12 +837,12 @@ def do_offline_report(opts, output_file=None):
-     final_report += get_ruv_report(opts)
-     final_report += ('Entry Counts\n')
-     final_report += ('=====================================================\n\n')
--    final_report += ('Master:  %d\n' % (m_count))
-+    final_report += ('Supplier:  %d\n' % (m_count))
-     final_report += ('Replica: %d\n\n' % (r_count))
- 
-     final_report += ('\nTombstones\n')
-     final_report += ('=====================================================\n\n')
--    final_report += ('Master:  %d\n' % (mtombstones))
-+    final_report += ('Supplier:  %d\n' % (mtombstones))
-     final_report += ('Replica: %d\n' % (rtombstones))
- 
-     final_report += get_conflict_report(mconflicts, rconflicts, opts['conflicts'])
-@@ -859,9 +859,9 @@ def do_offline_report(opts, output_file=None):
-     final_report += ('\nResult\n')
-     final_report += ('=====================================================\n\n')
-     if missing_report == "" and len(diff_report) == 0:
--        final_report += ('No replication differences between Master and Replica\n')
-+        final_report += ('No replication differences between Supplier and Replica\n')
-     else:
--        final_report += ('There are replication differences between Master and Replica\n')
-+        final_report += ('There are replication differences between Supplier and Replica\n')
- 
-     if output_file:
-         output_file.write(final_report)
-@@ -871,8 +871,8 @@ def do_offline_report(opts, output_file=None):
- 
- def check_for_diffs(mentries, mglue, rentries, rglue, report, opts):
-     """Online mode only - Check for diffs, return the updated report
--    :param mentries - Master entries
--    :param mglue - Master glue entries
-+    :param mentries - Supplier entries
-+    :param mglue - Supplier glue entries
-     :param rentries - Replica entries
-     :param rglue - Replica glue entries
-     :param report - A Dict of the entire report
-@@ -947,8 +947,8 @@ def validate_suffix(ldapnode, suffix, hostname):
-     # Check suffix is replicated
-     try:
-         replica_filter = "(&(objectclass=nsds5replica)(nsDS5ReplicaRoot=%s))" % suffix
--        master_replica = ldapnode.search_s("cn=config",ldap.SCOPE_SUBTREE,replica_filter)
--        if (len(master_replica) != 1):
-+        supplier_replica = ldapnode.search_s("cn=config",ldap.SCOPE_SUBTREE,replica_filter)
-+        if (len(supplier_replica) != 1):
-             print("Error: Failed to validate suffix in {}. {} is not replicated.".format(hostname, suffix))
-             return False
-     except ldap.LDAPError as e:
-@@ -969,7 +969,7 @@ def connect_to_replicas(opts):
-         muri = "%s://%s" % (opts['mprotocol'], opts['mhost'].replace("/", "%2f"))
-     else:
-         muri = "%s://%s:%s/" % (opts['mprotocol'], opts['mhost'], opts['mport'])
--    master = SimpleLDAPObject(muri)
-+    supplier = SimpleLDAPObject(muri)
- 
-     if opts['rprotocol'].lower() == 'ldapi':
-         ruri = "%s://%s" % (opts['rprotocol'], opts['rhost'].replace("/", "%2f"))
-@@ -978,23 +978,23 @@ def connect_to_replicas(opts):
-     replica = SimpleLDAPObject(ruri)
- 
-     # Set timeouts
--    master.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
--    master.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
-+    supplier.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
-+    supplier.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
-     replica.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
-     replica.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
- 
-     # Setup Secure Connection
-     if opts['certdir'] is not None:
--        # Setup Master
-+        # Setup Supplier
-         if opts['mprotocol'] != LDAPI:
--            master.set_option(ldap.OPT_X_TLS_CACERTDIR, opts['certdir'])
--            master.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_HARD)
-+            supplier.set_option(ldap.OPT_X_TLS_CACERTDIR, opts['certdir'])
-+            supplier.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_HARD)
-             if opts['mprotocol'] == LDAP:
-                 # Do StartTLS
-                 try:
--                    master.start_tls_s()
-+                    supplier.start_tls_s()
-                 except ldap.LDAPError as e:
--                    print('TLS negotiation failed on Master: {}'.format(str(e)))
-+                    print('TLS negotiation failed on Supplier: {}'.format(str(e)))
-                     exit(1)
- 
-         # Setup Replica
-@@ -1006,17 +1006,17 @@ def connect_to_replicas(opts):
-                 try:
-                     replica.start_tls_s()
-                 except ldap.LDAPError as e:
--                    print('TLS negotiation failed on Master: {}'.format(str(e)))
-+                    print('TLS negotiation failed on Supplier: {}'.format(str(e)))
-                     exit(1)
- 
--    # Open connection to master
-+    # Open connection to Supplier
-     try:
--        master.simple_bind_s(opts['binddn'], opts['bindpw'])
-+        supplier.simple_bind_s(opts['binddn'], opts['bindpw'])
-     except ldap.SERVER_DOWN as e:
-         print(f"Cannot connect to {muri} ({str(e)})")
-         sys.exit(1)
-     except ldap.LDAPError as e:
--        print("Error: Failed to authenticate to Master: ({}).  "
-+        print("Error: Failed to authenticate to Supplier: ({}).  "
-               "Please check your credentials and LDAP urls are correct.".format(str(e)))
-         sys.exit(1)
- 
-@@ -1034,7 +1034,7 @@ def connect_to_replicas(opts):
-     # Validate suffix
-     if opts['verbose']:
-         print ("Validating suffix ...")
--    if not validate_suffix(master, opts['suffix'], opts['mhost']):
-+    if not validate_suffix(supplier, opts['suffix'], opts['mhost']):
-       sys.exit(1)
- 
-     if not validate_suffix(replica,opts['suffix'], opts['rhost']):
-@@ -1042,16 +1042,16 @@ def connect_to_replicas(opts):
- 
-     # Get the RUVs
-     if opts['verbose']:
--        print ("Gathering Master's RUV...")
-+        print ("Gathering Supplier's RUV...")
-     try:
--        master_ruv = master.search_s(opts['suffix'], ldap.SCOPE_SUBTREE, RUV_FILTER, ['nsds50ruv'])
--        if len(master_ruv) > 0:
--            opts['master_ruv'] = ensure_list_str(master_ruv[0][1]['nsds50ruv'])
-+        supplier_ruv = supplier.search_s(opts['suffix'], ldap.SCOPE_SUBTREE, RUV_FILTER, ['nsds50ruv'])
-+        if len(supplier_ruv) > 0:
-+            opts['supplier_ruv'] = ensure_list_str(supplier_ruv[0][1]['nsds50ruv'])
-         else:
--            print("Error: Master does not have an RUV entry")
-+            print("Error: Supplier does not have an RUV entry")
-             sys.exit(1)
-     except ldap.LDAPError as e:
--        print("Error: Failed to get Master RUV entry: {}".format(str(e)))
-+        print("Error: Failed to get Supplier RUV entry: {}".format(str(e)))
-         sys.exit(1)
- 
-     if opts['verbose']:
-@@ -1067,12 +1067,12 @@ def connect_to_replicas(opts):
-         print("Error: Failed to get Replica RUV entry: {}".format(str(e)))
-         sys.exit(1)
- 
--    # Get the master RID
-+    # Get the Supplier RID
-     if opts['verbose']:
--        print("Getting Master's replica ID")
-+        print("Getting Supplier's replica ID")
-     try:
-         search_filter = "(&(objectclass=nsds5Replica)(nsDS5ReplicaRoot={})(nsDS5ReplicaId=*))".format(opts['suffix'])
--        replica_entry = master.search_s("cn=config", ldap.SCOPE_SUBTREE, search_filter)
-+        replica_entry = supplier.search_s("cn=config", ldap.SCOPE_SUBTREE, search_filter)
-         if len(replica_entry) > 0:
-             opts['rid'] = ensure_int(replica_entry[0][1]['nsDS5ReplicaId'][0])
-         else:
-@@ -1081,7 +1081,7 @@ def connect_to_replicas(opts):
-         print("Error: Failed to get Replica entry: {}".format(str(e)))
-         sys.exit(1)
- 
--    return (master, replica, opts)
-+    return (supplier, replica, opts)
- 
- 
- def print_online_report(report, opts, output_file):
-@@ -1104,11 +1104,11 @@ def print_online_report(report, opts, output_file):
-     final_report += get_ruv_report(opts)
-     final_report += ('Entry Counts\n')
-     final_report += ('=====================================================\n\n')
--    final_report += ('Master:  %d\n' % (report['m_count']))
-+    final_report += ('Supplier:  %d\n' % (report['m_count']))
-     final_report += ('Replica: %d\n\n' % (report['r_count']))
-     final_report += ('\nTombstones\n')
-     final_report += ('=====================================================\n\n')
--    final_report += ('Master:  %d\n' % (report['mtombstones']))
-+    final_report += ('Supplier:  %d\n' % (report['mtombstones']))
-     final_report += ('Replica: %d\n' % (report['rtombstones']))
-     final_report += report['conflict']
-     missing = False
-@@ -1121,7 +1121,7 @@ def print_online_report(report, opts, output_file):
-             final_report += ('  Entries missing on Replica:\n')
-             for entry in report['r_missing']:
-                 if 'createtimestamp' in entry.data:
--                    final_report += ('   - %s  (Created on Master at: %s)\n' %
-+                    final_report += ('   - %s  (Created on Supplier at: %s)\n' %
-                                      (entry.dn, convert_timestamp(entry.data['createtimestamp'][0])))
-                 else:
-                     final_report += ('   - %s\n' % (entry.dn))
-@@ -1129,7 +1129,7 @@ def print_online_report(report, opts, output_file):
-         if m_missing > 0:
-             if r_missing > 0:
-                 final_report += ('\n')
--            final_report += ('  Entries missing on Master:\n')
-+            final_report += ('  Entries missing on Supplier:\n')
-             for entry in report['m_missing']:
-                 if 'createtimestamp' in entry.data:
-                     final_report += ('   - %s  (Created on Replica at: %s)\n' %
-@@ -1146,9 +1146,9 @@ def print_online_report(report, opts, output_file):
-     final_report += ('\nResult\n')
-     final_report += ('=====================================================\n\n')
-     if not missing and len(report['diff']) == 0:
--        final_report += ('No replication differences between Master and Replica\n')
-+        final_report += ('No replication differences between Supplier and Replica\n')
-     else:
--        final_report += ('There are replication differences between Master and Replica\n')
-+        final_report += ('There are replication differences between Supplier and Replica\n')
- 
-     if output_file:
-         output_file.write(final_report)
-@@ -1170,7 +1170,7 @@ def remove_state_info(entry):
- 
- def get_conflict_report(mentries, rentries, verbose):
-     """Gather the conflict entry dn's for each replica
--    :param mentries - Master entries
-+    :param mentries - Supplier entries
-     :param rentries - Replica entries
-     :param verbose - verbose logging
-     :return - A text blob to dispaly in the report
-@@ -1197,7 +1197,7 @@ def get_conflict_report(mentries, rentries, verbose):
-         report = "\n\nConflict Entries\n"
-         report += "=====================================================\n\n"
-         if len(m_conflicts) > 0:
--            report += ('Master Conflict Entries:  %d\n' % (len(m_conflicts)))
-+            report += ('Supplier Conflict Entries:  %d\n' % (len(m_conflicts)))
-             if verbose:
-                 for entry in m_conflicts:
-                     report += ('\n - %s\n' % (entry['dn']))
-@@ -1239,8 +1239,8 @@ def do_online_report(opts, output_file=None):
-     rconflicts = []
-     mconflicts = []
- 
--    # Fire off paged searches on Master and Replica
--    master, replica, opts = connect_to_replicas(opts)
-+    # Fire off paged searches on Supplier and Replica
-+    supplier, replica, opts = connect_to_replicas(opts)
- 
-     if opts['verbose']:
-         print('Start searching and comparing...')
-@@ -1248,12 +1248,12 @@ def do_online_report(opts, output_file=None):
-     controls = [paged_ctrl]
-     req_pr_ctrl = controls[0]
-     try:
--        master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
--                                         "(|(objectclass=*)(objectclass=ldapsubentry)(objectclass=nstombstone))",
--                                         ['*', 'createtimestamp', 'nscpentrywsi', 'nsds5replconflict'],
--                                         serverctrls=controls)
-+        supplier_msgid = supplier.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
-+                                             "(|(objectclass=*)(objectclass=ldapsubentry)(objectclass=nstombstone))",
-+                                             ['*', 'createtimestamp', 'nscpentrywsi', 'nsds5replconflict'],
-+                                             serverctrls=controls)
-     except ldap.LDAPError as e:
--        print("Error: Failed to get Master entries: %s", str(e))
-+        print("Error: Failed to get Supplier entries: %s", str(e))
-         sys.exit(1)
-     try:
-         replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
-@@ -1268,11 +1268,11 @@ def do_online_report(opts, output_file=None):
-     while not m_done or not r_done:
-         try:
-             if not m_done:
--                m_rtype, m_rdata, m_rmsgid, m_rctrls = master.result3(master_msgid)
-+                m_rtype, m_rdata, m_rmsgid, m_rctrls = supplier.result3(supplier_msgid)
-             elif not r_done:
-                 m_rdata = []
-         except ldap.LDAPError as e:
--            print("Error: Problem getting the results from the master: %s", str(e))
-+            print("Error: Problem getting the results from the Supplier: %s", str(e))
-             sys.exit(1)
-         try:
-             if not r_done:
-@@ -1299,7 +1299,7 @@ def do_online_report(opts, output_file=None):
-                                  report, opts)
- 
-         if not m_done:
--            # Master
-+            # Supplier
-             m_pctrls = [
-                 c
-                 for c in m_rctrls
-@@ -1310,11 +1310,11 @@ def do_online_report(opts, output_file=None):
-                     try:
-                         # Copy cookie from response control to request control
-                         req_pr_ctrl.cookie = m_pctrls[0].cookie
--                        master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
-+                        supplier_msgid = supplier.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
-                             "(|(objectclass=*)(objectclass=ldapsubentry))",
-                             ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
-                     except ldap.LDAPError as e:
--                        print("Error: Problem searching the master: %s", str(e))
-+                        print("Error: Problem searching the Supplier: %s", str(e))
-                         sys.exit(1)
-                 else:
-                     m_done = True  # No more pages available
-@@ -1354,7 +1354,7 @@ def do_online_report(opts, output_file=None):
-     print_online_report(report, opts, output_file)
- 
-     # unbind
--    master.unbind_s()
-+    supplier.unbind_s()
-     replica.unbind_s()
- 
- 
-@@ -1367,18 +1367,18 @@ def init_online_params(args):
- 
-     # Make sure the URLs are different
-     if args.murl == args.rurl:
--        print("Master and Replica LDAP URLs are the same, they must be different")
-+        print("Supplier and Replica LDAP URLs are the same, they must be different")
-         sys.exit(1)
- 
--    # Parse Master url
-+    # Parse Supplier url
-     if not ldapurl.isLDAPUrl(args.murl):
--        print("Master LDAP URL is invalid")
-+        print("Supplier LDAP URL is invalid")
-         sys.exit(1)
-     murl = ldapurl.LDAPUrl(args.murl)
-     if murl.urlscheme in VALID_PROTOCOLS:
-         opts['mprotocol'] = murl.urlscheme
-     else:
--        print('Unsupported ldap url protocol (%s) for Master, please use "ldaps" or "ldap"' %
-+        print('Unsupported ldap url protocol (%s) for Supplier, please use "ldaps" or "ldap"' %
-               murl.urlscheme)
-         sys.exit(1)
- 
-@@ -1520,7 +1520,7 @@ def offline_report(args):
-             print ("LDIF file ({}) is empty".format(ldif_dir))
-             sys.exit(1)
-     if opts['mldif'] == opts['rldif']:
--        print("The Master and Replica LDIF files must be different")
-+        print("The Supplier and Replica LDIF files must be different")
-         sys.exit(1)
- 
-     OUTPUT_FILE = None
-@@ -1547,7 +1547,7 @@ def get_state(args):
-     """Just do the RUV comparision
-     """
-     opts = init_online_params(args)
--    master, replica, opts = connect_to_replicas(opts)
-+    supplier, replica, opts = connect_to_replicas(opts)
-     print(get_ruv_state(opts))
- 
- 
-@@ -1569,10 +1569,10 @@ def main():
-     # Get state
-     state_parser = subparsers.add_parser('state', help="Get the current replicaton state between two replicas")
-     state_parser.set_defaults(func=get_state)
--    state_parser.add_argument('-m', '--master-url', help='The LDAP URL for the Master server',
--                               dest='murl', default=None, required=True)
-+    state_parser.add_argument('-m', '--supplier-url', help='The LDAP URL for the Supplier server',
-+                              dest='murl', default=None, required=True)
-     state_parser.add_argument('-r', '--replica-url', help='The LDAP URL for the Replica server',
--                               dest='rurl', required=True, default=None)
-+                              dest='rurl', required=True, default=None)
-     state_parser.add_argument('-b', '--suffix', help='Replicated suffix', dest='suffix', required=True)
-     state_parser.add_argument('-D', '--bind-dn', help='The Bind DN', required=True, dest='binddn', default=None)
-     state_parser.add_argument('-w', '--bind-pw', help='The Bind password', dest='bindpw', default=None)
-@@ -1586,7 +1586,7 @@ def main():
-     # Online mode
-     online_parser = subparsers.add_parser('online', help="Compare two online replicas for differences")
-     online_parser.set_defaults(func=online_report)
--    online_parser.add_argument('-m', '--master-url', help='The LDAP URL for the Master server (REQUIRED)',
-+    online_parser.add_argument('-m', '--supplier-url', help='The LDAP URL for the Supplier server (REQUIRED)',
-                                dest='murl', default=None, required=True)
-     online_parser.add_argument('-r', '--replica-url', help='The LDAP URL for the Replica server (REQUIRED)',
-                                dest='rurl', required=True, default=None)
-@@ -1612,12 +1612,12 @@ def main():
-     # Offline LDIF mode
-     offline_parser = subparsers.add_parser('offline', help="Compare two replication LDIF files for differences (LDIF file generated by 'db2ldif -r')")
-     offline_parser.set_defaults(func=offline_report)
--    offline_parser.add_argument('-m', '--master-ldif', help='Master LDIF file',
-+    offline_parser.add_argument('-m', '--supplier-ldif', help='Supplier LDIF file',
-                                 dest='mldif', default=None, required=True)
-     offline_parser.add_argument('-r', '--replica-ldif', help='Replica LDIF file',
-                                 dest='rldif', default=None, required=True)
-     offline_parser.add_argument('--rid', dest='rid', default=None, required=True,
--                                help='The Replica Identifer (rid) for the "Master" server')
-+                                help='The Replica Identifier (rid) for the "Supplier" server')
-     offline_parser.add_argument('-b', '--suffix', help='Replicated suffix', dest='suffix', required=True)
-     offline_parser.add_argument('-c', '--conflicts', help='Display verbose conflict information', action='store_true',
-                                 dest='conflicts', default=False)
--- 
-2.31.1
-
diff --git a/SOURCES/0023-Issue-4443-Internal-unindexed-searches-in-syncrepl-r.patch b/SOURCES/0023-Issue-4443-Internal-unindexed-searches-in-syncrepl-r.patch
deleted file mode 100644
index 3fd6f16..0000000
--- a/SOURCES/0023-Issue-4443-Internal-unindexed-searches-in-syncrepl-r.patch
+++ /dev/null
@@ -1,373 +0,0 @@
-From 55a47c1bfe1ce1c27e470384c4f1d50895db25f7 Mon Sep 17 00:00:00 2001
-From: Mark Reynolds <mreynolds@redhat.com>
-Date: Tue, 13 Jul 2021 14:18:03 -0400
-Subject: [PATCH] Issue 4443 - Internal unindexed searches in syncrepl/retro
- changelog
-
-Bug Description:
-
-When a non-system index is added to a backend it is
-disabled until the database is initialized or reindexed.
-So in the case of the retro changelog the changenumber index
-is alway disabled by default since it is never initialized.
-This leads to unexpected unindexed searches of the retro
-changelog.
-
-Fix Description:
-
-If an index has "nsSystemIndex" set to "true" then enable it
-immediately.
-
-relates:  https://github.com/389ds/389-ds-base/issues/4443
-
-Reviewed by: spichugi & tbordaz(Thanks!!)
----
- .../tests/suites/retrocl/basic_test.py        | 53 ++++++++-------
- .../suites/retrocl/retrocl_indexing_test.py   | 68 +++++++++++++++++++
- ldap/servers/plugins/retrocl/retrocl_create.c |  2 +-
- .../slapd/back-ldbm/ldbm_index_config.c       | 25 +++++--
- src/lib389/lib389/_mapped_object.py           | 13 ++++
- 5 files changed, 130 insertions(+), 31 deletions(-)
- create mode 100644 dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py
-
-diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py
-index f3bc50f29..84d513829 100644
---- a/dirsrvtests/tests/suites/retrocl/basic_test.py
-+++ b/dirsrvtests/tests/suites/retrocl/basic_test.py
-@@ -8,7 +8,6 @@
- 
- import logging
- import ldap
--import time
- import pytest
- from lib389.topologies import topology_st
- from lib389.plugins import RetroChangelogPlugin
-@@ -18,7 +17,8 @@ from lib389.tasks import *
- from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance
- from lib389.cli_base.dsrc import dsrc_arg_concat
- from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add_attr
--from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts
-+from lib389.idm.user import UserAccount, UserAccounts
-+from lib389._mapped_object import DSLdapObjects
- 
- pytestmark = pytest.mark.tier1
- 
-@@ -82,7 +82,7 @@ def test_retrocl_exclude_attr_add(topology_st):
- 
-     log.info('Adding user1')
-     try:
--        user1 = users.create(properties={
-+        users.create(properties={
-             'sn': '1',
-             'cn': 'user 1',
-             'uid': 'user1',
-@@ -97,17 +97,18 @@ def test_retrocl_exclude_attr_add(topology_st):
-     except ldap.ALREADY_EXISTS:
-         pass
-     except ldap.LDAPError as e:
--        log.error("Failed to add user1")
-+        log.error("Failed to add user1: " + str(e))
- 
-     log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
-     try:
--        cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
-+        retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX)
-+        cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
-     except ldap.LDAPError as e:
--        log.fatal("Changelog search failed, error: " +str(e))
-+        log.fatal("Changelog search failed, error: " + str(e))
-         assert False
-     assert len(cllist) > 0
--    if  cllist[0].hasAttr('changes'):
--        clstr = (cllist[0].getValue('changes')).decode()
-+    if  cllist[0].present('changes'):
-+        clstr = str(cllist[0].get_attr_vals_utf8('changes'))
-         assert ATTR_HOMEPHONE in clstr
-         assert ATTR_CARLICENSE in clstr
- 
-@@ -134,7 +135,7 @@ def test_retrocl_exclude_attr_add(topology_st):
- 
-     log.info('Adding user2')
-     try:
--        user2 = users.create(properties={
-+        users.create(properties={
-             'sn': '2',
-             'cn': 'user 2',
-             'uid': 'user2',
-@@ -149,18 +150,18 @@ def test_retrocl_exclude_attr_add(topology_st):
-     except ldap.ALREADY_EXISTS:
-         pass
-     except ldap.LDAPError as e:
--        log.error("Failed to add user2")
-+        log.error("Failed to add user2: " + str(e))
- 
-     log.info('Verify homePhone attr is not in the changelog changestring')
-     try:
--        cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN)
-+        cllist = retro_changelog_suffix.filter(f'(targetDn={USER2_DN})')
-         assert len(cllist) > 0
--        if  cllist[0].hasAttr('changes'):
--            clstr = (cllist[0].getValue('changes')).decode()
-+        if  cllist[0].present('changes'):
-+            clstr = str(cllist[0].get_attr_vals_utf8('changes'))
-             assert ATTR_HOMEPHONE not in clstr
-             assert ATTR_CARLICENSE in clstr
-     except ldap.LDAPError as e:
--        log.fatal("Changelog search failed, error: " +str(e))
-+        log.fatal("Changelog search failed, error: " + str(e))
-         assert False
- 
- def test_retrocl_exclude_attr_mod(topology_st):
-@@ -228,19 +229,20 @@ def test_retrocl_exclude_attr_mod(topology_st):
-             'homeDirectory': '/home/user1',
-             'userpassword': USER_PW})
-     except ldap.ALREADY_EXISTS:
--        pass
-+        user1 = UserAccount(st, dn=USER1_DN)
-     except ldap.LDAPError as e:
--        log.error("Failed to add user1")
-+        log.error("Failed to add user1: " + str(e))
- 
-     log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
-     try:
--        cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
-+        retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX)
-+        cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
-     except ldap.LDAPError as e:
--        log.fatal("Changelog search failed, error: " +str(e))
-+        log.fatal("Changelog search failed, error: " + str(e))
-         assert False
-     assert len(cllist) > 0
--    if  cllist[0].hasAttr('changes'):
--        clstr = (cllist[0].getValue('changes')).decode()
-+    if  cllist[0].present('changes'):
-+        clstr = str(cllist[0].get_attr_vals_utf8('changes'))
-         assert ATTR_HOMEPHONE in clstr
-         assert ATTR_CARLICENSE in clstr
- 
-@@ -267,24 +269,25 @@ def test_retrocl_exclude_attr_mod(topology_st):
- 
-     log.info('Modify user1 carLicense attribute')
-     try:
--        st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")])
-+        user1.replace(ATTR_CARLICENSE, "123WX321")
-     except ldap.LDAPError as e:
-         log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc'])
-         assert False
- 
-     log.info('Verify carLicense attr is not in the changelog changestring')
-     try:
--        cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
-+        cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
-         assert len(cllist) > 0
-         # There will be 2 entries in the changelog for this user, we are only
-         #interested in the second one, the modify operation.
--        if  cllist[1].hasAttr('changes'):
--            clstr = (cllist[1].getValue('changes')).decode()
-+        if  cllist[1].present('changes'):
-+            clstr = str(cllist[1].get_attr_vals_utf8('changes'))
-             assert ATTR_CARLICENSE not in clstr
-     except ldap.LDAPError as e:
--        log.fatal("Changelog search failed, error: " +str(e))
-+        log.fatal("Changelog search failed, error: " + str(e))
-         assert False
- 
-+
- if __name__ == '__main__':
-     # Run isolated
-     # -s for DEBUG mode
-diff --git a/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py
-new file mode 100644
-index 000000000..b1dfe962c
---- /dev/null
-+++ b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py
-@@ -0,0 +1,68 @@
-+import logging
-+import pytest
-+import os
-+from lib389._constants import RETROCL_SUFFIX, DEFAULT_SUFFIX
-+from lib389.topologies import topology_st as topo
-+from lib389.plugins import RetroChangelogPlugin
-+from lib389.idm.user import UserAccounts
-+from lib389._mapped_object import DSLdapObjects
-+log = logging.getLogger(__name__)
-+
-+
-+def test_indexing_is_online(topo):
-+    """Test that the changenmumber index is online right after enabling the plugin
-+
-+    :id: 16f4c001-9e0c-4448-a2b3-08ac1e85d40f
-+    :setup: Standalone Instance
-+    :steps:
-+        1. Enable retro cl
-+        2. Perform some updates
-+        3. Search for "(changenumber>=-1)", and it is not partially unindexed
-+        4. Search for "(&(changenumber>=-1)(targetuniqueid=*))", and it is not partially unindexed
-+    :expectedresults:
-+        1. Success
-+        2. Success
-+        3. Success
-+        4. Success
-+    """
-+
-+    # Enable plugin
-+    topo.standalone.config.set('nsslapd-accesslog-logbuffering',  'off')
-+    plugin = RetroChangelogPlugin(topo.standalone)
-+    plugin.enable()
-+    topo.standalone.restart()
-+
-+    # Do a bunch of updates
-+    users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
-+    user_entry = users.create(properties={
-+        'sn': '1',
-+        'cn': 'user 1',
-+        'uid': 'user1',
-+        'uidNumber': '11',
-+        'gidNumber': '111',
-+        'givenname': 'user1',
-+        'homePhone': '0861234567',
-+        'carLicense': '131D16674',
-+        'mail': 'user1@whereever.com',
-+        'homeDirectory': '/home'
-+    })
-+    for count in range(0, 10):
-+        user_entry.replace('mail', f'test{count}@test.com')
-+
-+    # Search the retro cl, and check for error messages
-+    filter_simple = '(changenumber>=-1)'
-+    filter_compound = '(&(changenumber>=-1)(targetuniqueid=*))'
-+    retro_changelog_suffix = DSLdapObjects(topo.standalone, basedn=RETROCL_SUFFIX)
-+    retro_changelog_suffix.filter(filter_simple)
-+    assert not topo.standalone.searchAccessLog('Partially Unindexed Filter')
-+
-+    # Search the retro cl again with compound filter
-+    retro_changelog_suffix.filter(filter_compound)
-+    assert not topo.standalone.searchAccessLog('Partially Unindexed Filter')
-+
-+
-+if __name__ == '__main__':
-+    # Run isolated
-+    # -s for DEBUG mode
-+    CURRENT_FILE = os.path.realpath(__file__)
-+    pytest.main(["-s", CURRENT_FILE])
-diff --git a/ldap/servers/plugins/retrocl/retrocl_create.c b/ldap/servers/plugins/retrocl/retrocl_create.c
-index 571e6899f..5bfde7831 100644
---- a/ldap/servers/plugins/retrocl/retrocl_create.c
-+++ b/ldap/servers/plugins/retrocl/retrocl_create.c
-@@ -133,7 +133,7 @@ retrocl_create_be(const char *bedir)
-     val.bv_len = strlen(val.bv_val);
-     slapi_entry_add_values(e, "cn", vals);
- 
--    val.bv_val = "false";
-+    val.bv_val = "true"; /* enables the index */
-     val.bv_len = strlen(val.bv_val);
-     slapi_entry_add_values(e, "nssystemindex", vals);
- 
-diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
-index 9722d0ce7..38e7368e1 100644
---- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
-+++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
-@@ -25,7 +25,7 @@ int ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb, Slapi_Entry *en
- #define INDEXTYPE_NONE 1
- 
- static int
--ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, char *err_buf)
-+ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, PRBool *is_system_index, char *err_buf)
- {
-     Slapi_Attr *attr;
-     const struct berval *attrValue;
-@@ -78,6 +78,15 @@ ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_st
-         }
-     }
- 
-+    *is_system_index = PR_FALSE;
-+    if (0 == slapi_entry_attr_find(e, "nsSystemIndex", &attr)) {
-+        slapi_attr_first_value(attr, &sval);
-+        attrValue = slapi_value_get_berval(sval);
-+        if (strcasecmp(attrValue->bv_val, "true") == 0) {
-+            *is_system_index = PR_TRUE;
-+        }
-+    }
-+
-     /* ok the entry is good to process, pass it to attr_index_config */
-     if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0, err_buf)) {
-         slapi_ch_free_string(index_name);
-@@ -101,9 +110,10 @@ ldbm_index_init_entry_callback(Slapi_PBlock *pb __attribute__((unused)),
-                                void *arg)
- {
-     ldbm_instance *inst = (ldbm_instance *)arg;
-+    PRBool is_system_index = PR_FALSE;
- 
-     returntext[0] = '\0';
--    *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, NULL);
-+    *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, &is_system_index /* not used */, NULL);
-     if (*returncode == LDAP_SUCCESS) {
-         return SLAPI_DSE_CALLBACK_OK;
-     } else {
-@@ -126,17 +136,21 @@ ldbm_instance_index_config_add_callback(Slapi_PBlock *pb __attribute__((unused))
- {
-     ldbm_instance *inst = (ldbm_instance *)arg;
-     char *index_name = NULL;
-+    PRBool is_system_index = PR_FALSE;
- 
-     returntext[0] = '\0';
--    *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, returntext);
-+    *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, &is_system_index, returntext);
-     if (*returncode == LDAP_SUCCESS) {
-         struct attrinfo *ai = NULL;
-         /* if the index is a "system" index, we assume it's being added by
-          * by the server, and it's okay for the index to go online immediately.
-          * if not, we set the index "offline" so it won't actually be used
-          * until someone runs db2index on it.
-+         * If caller wants to add an index that they want to be online
-+         * immediately they can also set "nsSystemIndex" to "true" in the
-+         * index config entry (e.g. is_system_index).
-          */
--        if (!ldbm_attribute_always_indexed(index_name)) {
-+        if (!is_system_index && !ldbm_attribute_always_indexed(index_name)) {
-             ainfo_get(inst->inst_be, index_name, &ai);
-             PR_ASSERT(ai != NULL);
-             ai->ai_indexmask |= INDEX_OFFLINE;
-@@ -386,13 +400,14 @@ ldbm_instance_index_config_enable_index(ldbm_instance *inst, Slapi_Entry *e)
-     char *index_name = NULL;
-     int rc = LDAP_SUCCESS;
-     struct attrinfo *ai = NULL;
-+    PRBool is_system_index = PR_FALSE;
- 
-     index_name = slapi_entry_attr_get_charptr(e, "cn");
-     if (index_name) {
-         ainfo_get(inst->inst_be, index_name, &ai);
-     }
-     if (!ai) {
--        rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, NULL);
-+        rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, &is_system_index /* not used */, NULL);
-     }
-     if (rc == LDAP_SUCCESS) {
-         /* Assume the caller knows if it is OK to go online immediately */
-diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
-index b6d778b01..fe610d175 100644
---- a/src/lib389/lib389/_mapped_object.py
-+++ b/src/lib389/lib389/_mapped_object.py
-@@ -148,6 +148,19 @@ class DSLdapObject(DSLogging, DSLint):
- 
-         return True
- 
-+    def search(self, scope="subtree", filter='objectclass=*'):
-+        search_scope = ldap.SCOPE_SUBTREE
-+        if scope == 'base':
-+            search_scope = ldap.SCOPE_BASE
-+        elif scope == 'one':
-+            search_scope = ldap.SCOPE_ONE
-+        elif scope == 'subtree':
-+            search_scope = ldap.SCOPE_SUBTREE
-+        return self._instance.search_ext_s(self._dn, search_scope, filter,
-+                                           serverctrls=self._server_controls,
-+                                           clientctrls=self._client_controls,
-+                                           escapehatch='i am sure')
-+
-     def display(self, attrlist=['*']):
-         """Get an entry but represent it as a string LDIF
- 
--- 
-2.31.1
-
diff --git a/SOURCES/0024-Issue-4817-BUG-locked-crypt-accounts-on-import-may-a.patch b/SOURCES/0024-Issue-4817-BUG-locked-crypt-accounts-on-import-may-a.patch
deleted file mode 100644
index 32c0eb1..0000000
--- a/SOURCES/0024-Issue-4817-BUG-locked-crypt-accounts-on-import-may-a.patch
+++ /dev/null
@@ -1,121 +0,0 @@
-From 2f0218f91d35c83a2aaecb71849a54b2481390ab Mon Sep 17 00:00:00 2001
-From: Firstyear <william@blackhats.net.au>
-Date: Fri, 9 Jul 2021 11:53:35 +1000
-Subject: [PATCH] Issue 4817 - BUG - locked crypt accounts on import may allow
- all passwords (#4819)
-
-Bug Description: Due to mishanding of short dbpwd hashes, the
-crypt_r algorithm was misused and was only comparing salts
-in some cases, rather than checking the actual content
-of the password.
-
-Fix Description: Stricter checks on dbpwd lengths to ensure
-that content passed to crypt_r has at least 2 salt bytes and
-1 hash byte, as well as stricter checks on ct_memcmp to ensure
-that compared values are the same length, rather than potentially
-allowing overruns/short comparisons.
-
-fixes: https://github.com/389ds/389-ds-base/issues/4817
-
-Author: William Brown <william@blackhats.net.au>
-
-Review by: @mreynolds389
----
- .../password/pwd_crypt_asterisk_test.py       | 50 +++++++++++++++++++
- ldap/servers/plugins/pwdstorage/crypt_pwd.c   | 20 +++++---
- 2 files changed, 64 insertions(+), 6 deletions(-)
- create mode 100644 dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py
-
-diff --git a/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py b/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py
-new file mode 100644
-index 000000000..d76614db1
---- /dev/null
-+++ b/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py
-@@ -0,0 +1,50 @@
-+# --- BEGIN COPYRIGHT BLOCK ---
-+# Copyright (C) 2021 William Brown <william@blackhats.net.au>
-+# All rights reserved.
-+#
-+# License: GPL (version 3 or any later version).
-+# See LICENSE for details.
-+# --- END COPYRIGHT BLOCK ---
-+#
-+import ldap
-+import pytest
-+from lib389.topologies import topology_st
-+from lib389.idm.user import UserAccounts
-+from lib389._constants import (DEFAULT_SUFFIX, PASSWORD)
-+
-+pytestmark = pytest.mark.tier1
-+
-+def test_password_crypt_asterisk_is_rejected(topology_st):
-+    """It was reported that {CRYPT}* was allowing all passwords to be
-+    valid in the bind process. This checks that we should be rejecting
-+    these as they should represent locked accounts. Similar, {CRYPT}!
-+
-+    :id: 0b8f1a6a-f3eb-4443-985e-da14d0939dc3
-+    :setup: Single instance
-+    :steps: 1. Set a password hash in with CRYPT and the content *
-+            2. Test a bind
-+            3. Set a password hash in with CRYPT and the content !
-+            4. Test a bind
-+    :expectedresults:
-+            1. Successfully set the values
-+            2. The bind fails
-+            3. Successfully set the values
-+            4. The bind fails
-+    """
-+    topology_st.standalone.config.set('nsslapd-allow-hashed-passwords', 'on')
-+    topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'off')
-+
-+    users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
-+    user = users.create_test_user()
-+
-+    user.set('userPassword', "{CRYPT}*")
-+
-+    # Attempt to bind with incorrect password.
-+    with pytest.raises(ldap.INVALID_CREDENTIALS):
-+        badconn = user.bind('badpassword')
-+
-+    user.set('userPassword', "{CRYPT}!")
-+    # Attempt to bind with incorrect password.
-+    with pytest.raises(ldap.INVALID_CREDENTIALS):
-+        badconn = user.bind('badpassword')
-+
-diff --git a/ldap/servers/plugins/pwdstorage/crypt_pwd.c b/ldap/servers/plugins/pwdstorage/crypt_pwd.c
-index 9031b2199..1b37d41ed 100644
---- a/ldap/servers/plugins/pwdstorage/crypt_pwd.c
-+++ b/ldap/servers/plugins/pwdstorage/crypt_pwd.c
-@@ -48,15 +48,23 @@ static unsigned char itoa64[] = /* 0 ... 63 => ascii - 64 */
- int
- crypt_pw_cmp(const char *userpwd, const char *dbpwd)
- {
--    int rc;
--    char *cp;
-+    int rc = -1;
-+    char *cp = NULL;
-+    size_t dbpwd_len = strlen(dbpwd);
-     struct crypt_data data;
-     data.initialized = 0;
- 
--    /* we use salt (first 2 chars) of encoded password in call to crypt_r() */
--    cp = crypt_r(userpwd, dbpwd, &data);
--    if (cp) {
--        rc = slapi_ct_memcmp(dbpwd, cp, strlen(dbpwd));
-+    /*
-+     * there MUST be at least 2 chars of salt and some pw bytes, else this is INVALID and will
-+     * allow any password to bind as we then only compare SALTS.
-+     */
-+    if (dbpwd_len >= 3) {
-+        /* we use salt (first 2 chars) of encoded password in call to crypt_r() */
-+        cp = crypt_r(userpwd, dbpwd, &data);
-+    }
-+    /* If these are not the same length, we can not proceed safely with memcmp. */
-+    if (cp && dbpwd_len == strlen(cp)) {
-+        rc = slapi_ct_memcmp(dbpwd, cp, dbpwd_len);
-     } else {
-         rc = -1;
-     }
--- 
-2.31.1
-
diff --git a/SOURCES/0025-Issue-4837-persistent-search-returns-entries-even-wh.patch b/SOURCES/0025-Issue-4837-persistent-search-returns-entries-even-wh.patch
deleted file mode 100644
index 66643a1..0000000
--- a/SOURCES/0025-Issue-4837-persistent-search-returns-entries-even-wh.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From 31d53e7da585723e66b838dcf34b77ea7c9968c6 Mon Sep 17 00:00:00 2001
-From: tbordaz <tbordaz@redhat.com>
-Date: Wed, 21 Jul 2021 09:16:30 +0200
-Subject: [PATCH] Issue 4837 - persistent search returns entries even when an
- error is returned by content-sync-plugin (#4838)
-
-Bug description:
-	When a ldap client sends a sync request control, the server response may contain a sync state control.
-        If the server fails to create the control the search should fail.
-
-Fix description:
-	In case the server fails to create the response control
-        logs the failure of the pre_search
-
-relates: https://github.com/389ds/389-ds-base/issues/4837
-
-Reviewed by: Simon Pichugin
-
-Platforms tested: RH8.4
----
- ldap/servers/plugins/sync/sync_refresh.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/ldap/servers/plugins/sync/sync_refresh.c b/ldap/servers/plugins/sync/sync_refresh.c
-index 646ff760b..4cbb6a949 100644
---- a/ldap/servers/plugins/sync/sync_refresh.c
-+++ b/ldap/servers/plugins/sync/sync_refresh.c
-@@ -213,7 +213,7 @@ sync_srch_refresh_pre_entry(Slapi_PBlock *pb)
-         Slapi_Entry *e;
-         slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_ENTRY, &e);
-         LDAPControl **ctrl = (LDAPControl **)slapi_ch_calloc(2, sizeof(LDAPControl *));
--        sync_create_state_control(e, &ctrl[0], LDAP_SYNC_ADD, NULL);
-+        rc = sync_create_state_control(e, &ctrl[0], LDAP_SYNC_ADD, NULL);
-         slapi_pblock_set(pb, SLAPI_SEARCH_CTRLS, ctrl);
-     }
-     return (rc);
--- 
-2.31.1
-
diff --git a/SOURCES/0026-Hardcode-gost-crypt-passsword-storage-scheme.patch b/SOURCES/0026-Hardcode-gost-crypt-passsword-storage-scheme.patch
deleted file mode 100644
index aa701a0..0000000
--- a/SOURCES/0026-Hardcode-gost-crypt-passsword-storage-scheme.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-From 616dc9964a4675dea2ab2c2efb9bd31c3903e29d Mon Sep 17 00:00:00 2001
-From: Mark Reynolds <mreynolds@redhat.com>
-Date: Mon, 26 Jul 2021 15:22:08 -0400
-Subject: [PATCH] Hardcode gost crypt passsword storage scheme
-
----
- .../plugins/pwdstorage/gost_yescrypt.c        | 22 -------------------
- 1 file changed, 22 deletions(-)
-
-diff --git a/ldap/servers/plugins/pwdstorage/gost_yescrypt.c b/ldap/servers/plugins/pwdstorage/gost_yescrypt.c
-index 67b39395e..7b0d1653c 100644
---- a/ldap/servers/plugins/pwdstorage/gost_yescrypt.c
-+++ b/ldap/servers/plugins/pwdstorage/gost_yescrypt.c
-@@ -11,7 +11,6 @@
- 
- #include <crypt.h>
- 
--#ifdef XCRYPT_VERSION_STR
- #include <errno.h>
- int
- gost_yescrypt_pw_cmp(const char *userpwd, const char *dbpwd)
-@@ -64,24 +63,3 @@ gost_yescrypt_pw_enc(const char *pwd)
-     return enc;
- }
- 
--#else
--
--/*
-- * We do not have xcrypt, so always fail all checks.
-- */
--int
--gost_yescrypt_pw_cmp(const char *userpwd __attribute__((unused)), const char *dbpwd __attribute__((unused)))
--{
--    slapi_log_err(SLAPI_LOG_ERR, GOST_YESCRYPT_SCHEME_NAME,
--                  "Unable to use gost_yescrypt_pw_cmp, xcrypt is not available.\n");
--    return 1;
--}
--
--char *
--gost_yescrypt_pw_enc(const char *pwd __attribute__((unused)))
--{
--    slapi_log_err(SLAPI_LOG_ERR, GOST_YESCRYPT_SCHEME_NAME,
--                  "Unable to use gost_yescrypt_pw_enc, xcrypt is not available.\n");
--    return NULL;
--}
--#endif
--- 
-2.31.1
-
diff --git a/SOURCES/0027-Issue-4734-import-of-entry-with-no-parent-warning-47.patch b/SOURCES/0027-Issue-4734-import-of-entry-with-no-parent-warning-47.patch
deleted file mode 100644
index 138ee66..0000000
--- a/SOURCES/0027-Issue-4734-import-of-entry-with-no-parent-warning-47.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From a2a51130b2f95316237b85da099a8be734969e54 Mon Sep 17 00:00:00 2001
-From: James Chapman <jachapma@redhat.com>
-Date: Sat, 24 Apr 2021 21:37:54 +0100
-Subject: [PATCH] Issue 4734 - import of entry with no parent warning (#4735)
-
-Description:    Online import of ldif file that contains an entry with
-                no parent doesnt generate a task warning.
-
-Fixes:          https://github.com/389ds/389-ds-base/issues/4734
-
-Author: vashirov@redhat.com (Thanks)
-
-Reviewed by: mreynolds, jchapma
----
- ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c | 6 ++++++
- 1 file changed, 6 insertions(+)
-
-diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
-index 905a84e74..35183ed59 100644
---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
-+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
-@@ -2767,8 +2767,14 @@ import_foreman(void *param)
-         if (job->flags & FLAG_ABORT) {
-             goto error;
-         }
-+
-+        /* capture skipped entry warnings for this task */
-+        if((job) && (job->skipped)) {
-+            slapi_task_set_warning(job->task, WARN_SKIPPED_IMPORT_ENTRY);
-+        }
-     }
- 
-+
-     slapi_pblock_destroy(pb);
-     info->state = FINISHED;
-     return;
--- 
-2.31.1
-
diff --git a/SOURCES/0028-Issue-4872-BUG-entryuuid-enabled-by-default-causes-r.patch b/SOURCES/0028-Issue-4872-BUG-entryuuid-enabled-by-default-causes-r.patch
deleted file mode 100644
index a9d5958..0000000
--- a/SOURCES/0028-Issue-4872-BUG-entryuuid-enabled-by-default-causes-r.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From f9bc249b2baa11a8ac0eb54e4077eb706d137e38 Mon Sep 17 00:00:00 2001
-From: Firstyear <william@blackhats.net.au>
-Date: Thu, 19 Aug 2021 11:06:06 +1000
-Subject: [PATCH] Issue 4872 - BUG - entryuuid enabled by default causes
- replication issues (#4876)
-
-Bug Description: Due to older servers missing the syntax
-plugin this breaks schema replication and causes cascading
-errors.
-
-Fix Description: This changes the syntax to be a case
-insensitive string, while leaving the plugins in place
-for other usage.
-
-fixes: https://github.com/389ds/389-ds-base/issues/4872
-
-Author: William Brown <william@blackhats.net.au>
-
-Review by: @mreynolds389 @progier389
----
- ldap/schema/03entryuuid.ldif | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/ldap/schema/03entryuuid.ldif b/ldap/schema/03entryuuid.ldif
-index cbde981fe..f7a7f40d5 100644
---- a/ldap/schema/03entryuuid.ldif
-+++ b/ldap/schema/03entryuuid.ldif
-@@ -13,4 +13,5 @@ dn: cn=schema
- #
- # attributes
- #
--attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )
-+# attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )
-+attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )
--- 
-2.31.1
-
diff --git a/SOURCES/0029-Remove-GOST-YESCRYPT-password-sotrage-scheme.patch b/SOURCES/0029-Remove-GOST-YESCRYPT-password-sotrage-scheme.patch
deleted file mode 100644
index 7b74019..0000000
--- a/SOURCES/0029-Remove-GOST-YESCRYPT-password-sotrage-scheme.patch
+++ /dev/null
@@ -1,125 +0,0 @@
-From 120511d35095a48d60abbb7cb2367d0c30fbc757 Mon Sep 17 00:00:00 2001
-From: Mark Reynolds <mreynolds@redhat.com>
-Date: Wed, 25 Aug 2021 13:20:56 -0400
-Subject: [PATCH] Remove GOST-YESCRYPT password sotrage scheme
-
----
- .../tests/suites/password/pwd_algo_test.py     |  1 -
- ldap/ldif/template-dse-minimal.ldif.in         |  9 ---------
- ldap/ldif/template-dse.ldif.in                 |  9 ---------
- ldap/servers/plugins/pwdstorage/pwd_init.c     | 18 ------------------
- ldap/servers/slapd/fedse.c                     | 13 -------------
- 5 files changed, 50 deletions(-)
-
-diff --git a/dirsrvtests/tests/suites/password/pwd_algo_test.py b/dirsrvtests/tests/suites/password/pwd_algo_test.py
-index 66bda420e..88f8e40b7 100644
---- a/dirsrvtests/tests/suites/password/pwd_algo_test.py
-+++ b/dirsrvtests/tests/suites/password/pwd_algo_test.py
-@@ -124,7 +124,6 @@ def _test_algo_for_pbkdf2(inst, algo_name):
-     ('CLEAR', 'CRYPT', 'CRYPT-MD5', 'CRYPT-SHA256', 'CRYPT-SHA512',
-      'MD5', 'SHA', 'SHA256', 'SHA384', 'SHA512', 'SMD5', 'SSHA',
-      'SSHA256', 'SSHA384', 'SSHA512', 'PBKDF2_SHA256', 'DEFAULT',
--     'GOST_YESCRYPT',
-      ))
- def test_pwd_algo_test(topology_st, algo):
-     """Assert that all of our password algorithms correctly PASS and FAIL varying
-diff --git a/ldap/ldif/template-dse-minimal.ldif.in b/ldap/ldif/template-dse-minimal.ldif.in
-index 2eccae9b2..1a05f4a67 100644
---- a/ldap/ldif/template-dse-minimal.ldif.in
-+++ b/ldap/ldif/template-dse-minimal.ldif.in
-@@ -194,15 +194,6 @@ nsslapd-pluginarg1: nsds5ReplicaCredentials
- nsslapd-pluginid: aes-storage-scheme
- nsslapd-pluginprecedence: 1
- 
--dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config
--objectclass: top
--objectclass: nsSlapdPlugin
--cn: GOST_YESCRYPT
--nsslapd-pluginpath: libpwdstorage-plugin
--nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init
--nsslapd-plugintype: pwdstoragescheme
--nsslapd-pluginenabled: on
--
- dn: cn=Syntax Validation Task,cn=plugins,cn=config
- objectclass: top
- objectclass: nsSlapdPlugin
-diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
-index 7e7480cba..f30531bec 100644
---- a/ldap/ldif/template-dse.ldif.in
-+++ b/ldap/ldif/template-dse.ldif.in
-@@ -242,15 +242,6 @@ nsslapd-pluginarg2: nsds5ReplicaBootstrapCredentials
- nsslapd-pluginid: aes-storage-scheme
- nsslapd-pluginprecedence: 1
- 
--dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config
--objectclass: top
--objectclass: nsSlapdPlugin
--cn: GOST_YESCRYPT
--nsslapd-pluginpath: libpwdstorage-plugin
--nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init
--nsslapd-plugintype: pwdstoragescheme
--nsslapd-pluginenabled: on
--
- dn: cn=Syntax Validation Task,cn=plugins,cn=config
- objectclass: top
- objectclass: nsSlapdPlugin
-diff --git a/ldap/servers/plugins/pwdstorage/pwd_init.c b/ldap/servers/plugins/pwdstorage/pwd_init.c
-index 606e63404..59cfc4684 100644
---- a/ldap/servers/plugins/pwdstorage/pwd_init.c
-+++ b/ldap/servers/plugins/pwdstorage/pwd_init.c
-@@ -52,8 +52,6 @@ static Slapi_PluginDesc smd5_pdesc = {"smd5-password-storage-scheme", VENDOR, DS
- 
- static Slapi_PluginDesc pbkdf2_sha256_pdesc = {"pbkdf2-sha256-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "Salted PBKDF2 SHA256 hash algorithm (PBKDF2_SHA256)"};
- 
--static Slapi_PluginDesc gost_yescrypt_pdesc = {"gost-yescrypt-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "Yescrypt KDF algorithm (Streebog256)"};
--
- static char *plugin_name = "NSPwdStoragePlugin";
- 
- int
-@@ -431,19 +429,3 @@ pbkdf2_sha256_pwd_storage_scheme_init(Slapi_PBlock *pb)
-     return rc;
- }
- 
--int
--gost_yescrypt_pwd_storage_scheme_init(Slapi_PBlock *pb)
--{
--    int rc;
--
--    slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "=> gost_yescrypt_pwd_storage_scheme_init\n");
--
--    rc = slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION, (void *)SLAPI_PLUGIN_VERSION_01);
--    rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION, (void *)&gost_yescrypt_pdesc);
--    rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_ENC_FN, (void *)gost_yescrypt_pw_enc);
--    rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *)gost_yescrypt_pw_cmp);
--    rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, GOST_YESCRYPT_SCHEME_NAME);
--
--    slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "<= gost_yescrypt_pwd_storage_scheme_init %d\n", rc);
--    return rc;
--}
-diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
-index 44159c991..24b7ed11c 100644
---- a/ldap/servers/slapd/fedse.c
-+++ b/ldap/servers/slapd/fedse.c
-@@ -203,19 +203,6 @@ static const char *internal_entries[] =
-         "nsslapd-pluginVersion: none\n"
-         "nsslapd-pluginVendor: 389 Project\n"
-         "nsslapd-pluginDescription: CRYPT-SHA512\n",
--
--        "dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config\n"
--        "objectclass: top\n"
--        "objectclass: nsSlapdPlugin\n"
--        "cn: GOST_YESCRYPT\n"
--        "nsslapd-pluginpath: libpwdstorage-plugin\n"
--        "nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init\n"
--        "nsslapd-plugintype: pwdstoragescheme\n"
--        "nsslapd-pluginenabled: on\n"
--        "nsslapd-pluginId: GOST_YESCRYPT\n"
--        "nsslapd-pluginVersion: none\n"
--        "nsslapd-pluginVendor: 389 Project\n"
--        "nsslapd-pluginDescription: GOST_YESCRYPT\n",
- };
- 
- static int NUM_INTERNAL_ENTRIES = sizeof(internal_entries) / sizeof(internal_entries[0]);
--- 
-2.31.1
-
diff --git a/SOURCES/0030-Issue-4884-server-crashes-when-dnaInterval-attribute.patch b/SOURCES/0030-Issue-4884-server-crashes-when-dnaInterval-attribute.patch
deleted file mode 100644
index 332394c..0000000
--- a/SOURCES/0030-Issue-4884-server-crashes-when-dnaInterval-attribute.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-From df0ccce06259b9ef06d522e61da4e3ffcbbf5016 Mon Sep 17 00:00:00 2001
-From: Mark Reynolds <mreynolds@redhat.com>
-Date: Wed, 25 Aug 2021 16:54:57 -0400
-Subject: [PATCH] Issue 4884 - server crashes when dnaInterval attribute is set
- to zero
-
-Bug Description:
-
-A division by zero crash occurs if the dnaInterval is set to zero
-
-Fix Description:
-
-Validate the config value of dnaInterval and adjust it to the
-default/safe value of "1" if needed.
-
-relates: https://github.com/389ds/389-ds-base/issues/4884
-
-Reviewed by: tbordaz(Thanks!)
----
- ldap/servers/plugins/dna/dna.c | 7 +++++++
- 1 file changed, 7 insertions(+)
-
-diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
-index 928a3f54a..c983ebdd0 100644
---- a/ldap/servers/plugins/dna/dna.c
-+++ b/ldap/servers/plugins/dna/dna.c
-@@ -1025,7 +1025,14 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
- 
-     value = slapi_entry_attr_get_charptr(e, DNA_INTERVAL);
-     if (value) {
-+        errno = 0;
-         entry->interval = strtoull(value, 0, 0);
-+        if (entry->interval == 0 || errno == ERANGE) {
-+            slapi_log_err(SLAPI_LOG_WARNING, DNA_PLUGIN_SUBSYSTEM,
-+                          "dna_parse_config_entry - Invalid value for dnaInterval (%s), "
-+                          "Using default value of 1\n", value);
-+            entry->interval = 1;
-+        }
-         slapi_ch_free_string(&value);
-     }
- 
--- 
-2.31.1
-
diff --git a/SOURCES/Cargo.lock b/SOURCES/Cargo.lock
index 1127ca0..4c77f19 100644
--- a/SOURCES/Cargo.lock
+++ b/SOURCES/Cargo.lock
@@ -36,9 +36,9 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
 
 [[package]]
 name = "bitflags"
-version = "1.2.1"
+version = "1.3.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
 
 [[package]]
 name = "byteorder"
@@ -65,9 +65,9 @@ dependencies = [
 
 [[package]]
 name = "cc"
-version = "1.0.68"
+version = "1.0.71"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787"
+checksum = "79c2681d6594606957bbb8631c4b90a7fcaaa72cdb714743a437b156d6a7eedd"
 dependencies = [
  "jobserver",
 ]
@@ -156,24 +156,24 @@ dependencies = [
 
 [[package]]
 name = "hermit-abi"
-version = "0.1.18"
+version = "0.1.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
 dependencies = [
  "libc",
 ]
 
 [[package]]
 name = "itoa"
-version = "0.4.7"
+version = "0.4.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
+checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4"
 
 [[package]]
 name = "jobserver"
-version = "0.1.22"
+version = "0.1.24"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd"
+checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa"
 dependencies = [
  "libc",
 ]
@@ -186,9 +186,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
 
 [[package]]
 name = "libc"
-version = "0.2.95"
+version = "0.2.104"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "789da6d93f1b866ffe175afc5322a4d76c038605a1c3319bb57b06967ca98a36"
+checksum = "7b2f96d100e1cf1929e7719b7edb3b90ab5298072638fccd77be9ce942ecdfce"
 
 [[package]]
 name = "librnsslapd"
@@ -219,15 +219,15 @@ dependencies = [
 
 [[package]]
 name = "once_cell"
-version = "1.7.2"
+version = "1.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3"
+checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56"
 
 [[package]]
 name = "openssl"
-version = "0.10.34"
+version = "0.10.36"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8"
+checksum = "8d9facdb76fec0b73c406f125d44d86fdad818d66fef0531eec9233ca425ff4a"
 dependencies = [
  "bitflags",
  "cfg-if",
@@ -239,9 +239,9 @@ dependencies = [
 
 [[package]]
 name = "openssl-sys"
-version = "0.9.63"
+version = "0.9.67"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98"
+checksum = "69df2d8dfc6ce3aaf44b40dec6f487d5a886516cf6879c49e98e0710f310a058"
 dependencies = [
  "autocfg",
  "cc",
@@ -271,15 +271,15 @@ dependencies = [
 
 [[package]]
 name = "pkg-config"
-version = "0.3.19"
+version = "0.3.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
+checksum = "7c9b1041b4387893b91ee6746cddfc28516aff326a3519fb2adf820932c5e6cb"
 
 [[package]]
 name = "ppv-lite86"
-version = "0.2.10"
+version = "0.2.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
+checksum = "c3ca011bd0129ff4ae15cd04c4eef202cadf6c51c21e47aba319b4e0501db741"
 
 [[package]]
 name = "proc-macro-hack"
@@ -289,27 +289,27 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
 
 [[package]]
 name = "proc-macro2"
-version = "1.0.27"
+version = "1.0.30"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038"
+checksum = "edc3358ebc67bc8b7fa0c007f945b0b18226f78437d61bec735a9eb96b61ee70"
 dependencies = [
  "unicode-xid",
 ]
 
 [[package]]
 name = "quote"
-version = "1.0.9"
+version = "1.0.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
+checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05"
 dependencies = [
  "proc-macro2",
 ]
 
 [[package]]
 name = "rand"
-version = "0.8.3"
+version = "0.8.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
+checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8"
 dependencies = [
  "libc",
  "rand_chacha",
@@ -319,9 +319,9 @@ dependencies = [
 
 [[package]]
 name = "rand_chacha"
-version = "0.3.0"
+version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
 dependencies = [
  "ppv-lite86",
  "rand_core",
@@ -329,27 +329,27 @@ dependencies = [
 
 [[package]]
 name = "rand_core"
-version = "0.6.2"
+version = "0.6.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7"
+checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
 dependencies = [
  "getrandom",
 ]
 
 [[package]]
 name = "rand_hc"
-version = "0.3.0"
+version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
+checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7"
 dependencies = [
  "rand_core",
 ]
 
 [[package]]
 name = "redox_syscall"
-version = "0.2.8"
+version = "0.2.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc"
+checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff"
 dependencies = [
  "bitflags",
 ]
@@ -375,18 +375,18 @@ checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
 
 [[package]]
 name = "serde"
-version = "1.0.126"
+version = "1.0.130"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03"
+checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913"
 dependencies = [
  "serde_derive",
 ]
 
 [[package]]
 name = "serde_derive"
-version = "1.0.126"
+version = "1.0.130"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43"
+checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -395,9 +395,9 @@ dependencies = [
 
 [[package]]
 name = "serde_json"
-version = "1.0.64"
+version = "1.0.68"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79"
+checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8"
 dependencies = [
  "itoa",
  "ryu",
@@ -429,9 +429,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
 
 [[package]]
 name = "syn"
-version = "1.0.72"
+version = "1.0.80"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82"
+checksum = "d010a1623fbd906d51d650a9916aaefc05ffa0e4053ff7fe601167f3e715d194"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -440,9 +440,9 @@ dependencies = [
 
 [[package]]
 name = "synstructure"
-version = "0.12.4"
+version = "0.12.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701"
+checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -484,9 +484,9 @@ dependencies = [
 
 [[package]]
 name = "unicode-width"
-version = "0.1.8"
+version = "0.1.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
+checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
 
 [[package]]
 name = "unicode-xid"
@@ -505,9 +505,9 @@ dependencies = [
 
 [[package]]
 name = "vcpkg"
-version = "0.2.13"
+version = "0.2.15"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "025ce40a007e1907e58d5bc1a594def78e5573bb0b1160bc389634e8f12e4faa"
+checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
 
 [[package]]
 name = "vec_map"
@@ -545,18 +545,18 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
 
 [[package]]
 name = "zeroize"
-version = "1.3.0"
+version = "1.4.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd"
+checksum = "bf68b08513768deaa790264a7fac27a58cbf2705cfcdc9448362229217d7e970"
 dependencies = [
  "zeroize_derive",
 ]
 
 [[package]]
 name = "zeroize_derive"
-version = "1.1.0"
+version = "1.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1"
+checksum = "bdff2024a851a322b08f179173ae2ba620445aef1e838f0c196820eade4ae0c7"
 dependencies = [
  "proc-macro2",
  "quote",
diff --git a/SPECS/389-ds-base.spec b/SPECS/389-ds-base.spec
index bd2daeb..3180aed 100644
--- a/SPECS/389-ds-base.spec
+++ b/SPECS/389-ds-base.spec
@@ -47,8 +47,8 @@ ExcludeArch: i686
 
 Summary:          389 Directory Server (base)
 Name:             389-ds-base
-Version:          1.4.3.23
-Release:          %{?relprefix}10%{?prerel}%{?dist}
+Version:          1.4.3.28
+Release:          %{?relprefix}6%{?prerel}%{?dist}
 License:          GPLv3+
 URL:              https://www.port389.org
 Group:            System Environment/Daemons
@@ -61,56 +61,67 @@ Provides:         ldif2ldbm >= 0
 Provides:  bundled(crate(ansi_term)) = 0.11.0
 Provides:  bundled(crate(atty)) = 0.2.14
 Provides:  bundled(crate(autocfg)) = 1.0.1
-Provides:  bundled(crate(base64)) = 0.10.1
-Provides:  bundled(crate(bitflags)) = 1.2.1
-Provides:  bundled(crate(byteorder)) = 1.4.2
+Provides:  bundled(crate(base64)) = 0.13.0
+Provides:  bundled(crate(bitflags)) = 1.3.2
+Provides:  bundled(crate(byteorder)) = 1.4.3
 Provides:  bundled(crate(cbindgen)) = 0.9.1
-Provides:  bundled(crate(cc)) = 1.0.66
-Provides:  bundled(crate(cfg-if)) = 0.1.10
+Provides:  bundled(crate(cc)) = 1.0.71
 Provides:  bundled(crate(cfg-if)) = 1.0.0
 Provides:  bundled(crate(clap)) = 2.33.3
-Provides:  bundled(crate(fernet)) = 0.1.3
+Provides:  bundled(crate(entryuuid)) = 0.1.0
+Provides:  bundled(crate(entryuuid_syntax)) = 0.1.0
+Provides:  bundled(crate(fernet)) = 0.1.4
 Provides:  bundled(crate(foreign-types)) = 0.3.2
 Provides:  bundled(crate(foreign-types-shared)) = 0.1.1
-Provides:  bundled(crate(getrandom)) = 0.1.16
-Provides:  bundled(crate(hermit-abi)) = 0.1.17
-Provides:  bundled(crate(itoa)) = 0.4.7
+Provides:  bundled(crate(getrandom)) = 0.2.3
+Provides:  bundled(crate(hermit-abi)) = 0.1.19
+Provides:  bundled(crate(itoa)) = 0.4.8
+Provides:  bundled(crate(jobserver)) = 0.1.24
 Provides:  bundled(crate(lazy_static)) = 1.4.0
-Provides:  bundled(crate(libc)) = 0.2.82
+Provides:  bundled(crate(libc)) = 0.2.104
 Provides:  bundled(crate(librnsslapd)) = 0.1.0
 Provides:  bundled(crate(librslapd)) = 0.1.0
-Provides:  bundled(crate(log)) = 0.4.11
-Provides:  bundled(crate(openssl)) = 0.10.32
-Provides:  bundled(crate(openssl-sys)) = 0.9.60
-Provides:  bundled(crate(pkg-config)) = 0.3.19
-Provides:  bundled(crate(ppv-lite86)) = 0.2.10
-Provides:  bundled(crate(proc-macro2)) = 1.0.24
-Provides:  bundled(crate(quote)) = 1.0.8
-Provides:  bundled(crate(rand)) = 0.7.3
-Provides:  bundled(crate(rand_chacha)) = 0.2.2
-Provides:  bundled(crate(rand_core)) = 0.5.1
-Provides:  bundled(crate(rand_hc)) = 0.2.0
-Provides:  bundled(crate(redox_syscall)) = 0.1.57
+Provides:  bundled(crate(log)) = 0.4.14
+Provides:  bundled(crate(once_cell)) = 1.8.0
+Provides:  bundled(crate(openssl)) = 0.10.36
+Provides:  bundled(crate(openssl-sys)) = 0.9.67
+Provides:  bundled(crate(paste)) = 0.1.18
+Provides:  bundled(crate(paste-impl)) = 0.1.18
+Provides:  bundled(crate(pkg-config)) = 0.3.20
+Provides:  bundled(crate(ppv-lite86)) = 0.2.14
+Provides:  bundled(crate(proc-macro-hack)) = 0.5.19
+Provides:  bundled(crate(proc-macro2)) = 1.0.30
+Provides:  bundled(crate(quote)) = 1.0.10
+Provides:  bundled(crate(rand)) = 0.8.4
+Provides:  bundled(crate(rand_chacha)) = 0.3.1
+Provides:  bundled(crate(rand_core)) = 0.6.3
+Provides:  bundled(crate(rand_hc)) = 0.3.1
+Provides:  bundled(crate(redox_syscall)) = 0.2.10
 Provides:  bundled(crate(remove_dir_all)) = 0.5.3
 Provides:  bundled(crate(rsds)) = 0.1.0
 Provides:  bundled(crate(ryu)) = 1.0.5
-Provides:  bundled(crate(serde)) = 1.0.118
-Provides:  bundled(crate(serde_derive)) = 1.0.118
-Provides:  bundled(crate(serde_json)) = 1.0.61
+Provides:  bundled(crate(serde)) = 1.0.130
+Provides:  bundled(crate(serde_derive)) = 1.0.130
+Provides:  bundled(crate(serde_json)) = 1.0.68
 Provides:  bundled(crate(slapd)) = 0.1.0
+Provides:  bundled(crate(slapi_r_plugin)) = 0.1.0
 Provides:  bundled(crate(strsim)) = 0.8.0
-Provides:  bundled(crate(syn)) = 1.0.58
-Provides:  bundled(crate(tempfile)) = 3.1.0
+Provides:  bundled(crate(syn)) = 1.0.80
+Provides:  bundled(crate(synstructure)) = 0.12.6
+Provides:  bundled(crate(tempfile)) = 3.2.0
 Provides:  bundled(crate(textwrap)) = 0.11.0
 Provides:  bundled(crate(toml)) = 0.5.8
-Provides:  bundled(crate(unicode-width)) = 0.1.8
-Provides:  bundled(crate(unicode-xid)) = 0.2.1
-Provides:  bundled(crate(vcpkg)) = 0.2.11
+Provides:  bundled(crate(unicode-width)) = 0.1.9
+Provides:  bundled(crate(unicode-xid)) = 0.2.2
+Provides:  bundled(crate(uuid)) = 0.8.2
+Provides:  bundled(crate(vcpkg)) = 0.2.15
 Provides:  bundled(crate(vec_map)) = 0.8.2
-Provides:  bundled(crate(wasi)) = 0.9.0+wasi_snapshot_preview1
+Provides:  bundled(crate(wasi)) = 0.10.2+wasi_snapshot_preview1
 Provides:  bundled(crate(winapi)) = 0.3.9
 Provides:  bundled(crate(winapi-i686-pc-windows-gnu)) = 0.4.0
 Provides:  bundled(crate(winapi-x86_64-pc-windows-gnu)) = 0.4.0
+Provides:  bundled(crate(zeroize)) = 1.4.2
+Provides:  bundled(crate(zeroize_derive)) = 1.2.0
 ##### Bundled cargo crates list - END #####
 
 BuildRequires:    nspr-devel
@@ -234,40 +245,27 @@ Source2:          %{name}-devel.README
 Source3:          https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2
 %endif
 %if %{use_rust}
-Source4:          vendor-%{version}-2.tar.gz
+Source4:          vendor-%{version}-1.tar.gz
 Source5:          Cargo.lock
 %endif
-Patch01:          0001-Issue-4747-Remove-unstable-unstatus-tests-from-PRCI-.patch
-Patch02:          0002-Issue-4701-RFE-Exclude-attributes-from-retro-changel.patch
-Patch03:          0003-Ticket-137-Implement-EntryUUID-plugin.patch
-Patch04:          0004-Ticket-4326-entryuuid-fixup-did-not-work-correctly-4.patch
-Patch05:          0005-Issue-4498-BUG-entryuuid-replication-may-not-work-45.patch
-Patch06:          0006-Issue-4421-Unable-to-build-with-Rust-enabled-in-clos.patch
-Patch07:          0007-Ticket-51175-resolve-plugin-name-leaking.patch
-Patch08:          0008-Issue-4773-Enable-interval-feature-of-DNA-plugin.patch
-Patch09:          0009-Issue-4623-RFE-Monitor-the-current-DB-locks-4762.patch
-Patch10:          0010-Issue-4764-replicated-operation-sometime-checks-ACI-.patch
-Patch11:          0011-Issue-4778-RFE-Allow-setting-TOD-for-db-compaction-a.patch
-Patch12:          0012-Issue-4778-RFE-Add-changelog-compaction-task-in-1.4..patch
-Patch13:          0013-Issue-4797-ACL-IP-ADDRESS-evaluation-may-corrupt-c_i.patch
-Patch14:          0014-Issue-4396-Minor-memory-leak-in-backend-4558-4572.patch
-Patch15:          0015-Issue-4700-Regression-in-winsync-replication-agreeme.patch
-Patch16:          0016-Issue-4725-Fix-compiler-warnings.patch
-Patch17:          0017-Issue-4814-_cl5_get_tod_expiration-may-crash-at-star.patch
-Patch18:          0018-Issue-4789-Temporary-password-rules-are-not-enforce-.patch
-Patch19:          0019-Issue-4788-CLI-should-support-Temporary-Password-Rul.patch
-Patch20:          0020-Issue-4447-Crash-when-the-Referential-Integrity-log-.patch
-Patch21:          0021-Issue-4791-Missing-dependency-for-RetroCL-RFE.patch
-Patch22:          0022-Issue-4656-remove-problematic-language-from-ds-replc.patch
-Patch23:          0023-Issue-4443-Internal-unindexed-searches-in-syncrepl-r.patch
-Patch24:          0024-Issue-4817-BUG-locked-crypt-accounts-on-import-may-a.patch
-Patch25:          0025-Issue-4837-persistent-search-returns-entries-even-wh.patch
-Patch26:          0026-Hardcode-gost-crypt-passsword-storage-scheme.patch
-Patch27:          0027-Issue-4734-import-of-entry-with-no-parent-warning-47.patch
-Patch28:          0028-Issue-4872-BUG-entryuuid-enabled-by-default-causes-r.patch
-Patch29:          0029-Remove-GOST-YESCRYPT-password-sotrage-scheme.patch
-Patch30:          0030-Issue-4884-server-crashes-when-dnaInterval-attribute.patch
 
+Patch01:          0001-Issue-4678-RFE-automatique-disable-of-virtual-attrib.patch
+Patch02:          0002-Issue-4943-Fix-csn-generator-to-limit-time-skew-drif.patch
+Patch03:          0003-Issue-3584-Fix-PBKDF2_SHA256-hashing-in-FIPS-mode-49.patch
+Patch04:          0004-Issue-4956-Automember-allows-invalid-regex-and-does-.patch
+Patch05:          0005-Issue-4092-systemd-tmpfiles-warnings.patch
+Patch06:          0006-Issue-4973-installer-changes-permissions-on-run.patch
+Patch07:          0007-Issue-4973-update-snmp-to-use-run-dirsrv-for-PID-fil.patch
+Patch08:          0008-Issue-4978-make-installer-robust.patch
+Patch09:          0009-Issue-4972-gecos-with-IA5-introduces-a-compatibility.patch
+Patch10:          0010-Issue-4997-Function-declaration-compiler-error-on-1..patch
+Patch11:          0011-Issue-4978-use-more-portable-python-command-for-chec.patch
+Patch12:          0012-Issue-4959-Invalid-etc-hosts-setup-can-cause-isLocal.patch
+Patch13:          0013-CVE-2021-4091-BZ-2030367-double-free-of-the-virtual-.patch
+Patch14:          0014-Issue-5127-run-restorecon-on-dev-shm-at-server-start.patch
+Patch15:          0015-Issue-5127-ds_selinux_restorecon.sh-always-exit-0.patch
+Patch16:          0016-Issue-4775-Add-entryuuid-CLI-and-Fixup-4776.patch
+Patch17:          0017-Issue-4775-Fix-cherry-pick-error.patch
 
 %description
 389 Directory Server is an LDAPv3 compliant server.  The base package includes
@@ -695,6 +693,7 @@ exit 0
 %{_sbindir}/ns-slapd
 %{_mandir}/man8/ns-slapd.8.gz
 %{_libexecdir}/%{pkgname}/ds_systemd_ask_password_acl
+%{_libexecdir}/%{pkgname}/ds_selinux_restorecon.sh
 %{_mandir}/man5/99user.ldif.5.gz
 %{_mandir}/man5/certmap.conf.5.gz
 %{_mandir}/man5/slapd-collations.conf.5.gz
@@ -886,63 +885,42 @@ exit 0
 %doc README.md
 
 %changelog
-* Thu Aug 26 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-10
-- Bump version to 1.4.3.23-10
-- Resolves: Bug 1997138 - LDAP server crashes when dnaInterval attribute is set to 0
-
-* Wed Aug 25 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-9
-- Bump version to 1.4.3.23-9
-- Resolves: Bug 1947044 - remove unsupported GOST password storage scheme
-
-* Thu Aug 19 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-8
-- Bump version to 1.4.3.23-8
-- Resolves: Bug 1947044 - add missing patch for import result code
-- Resolves: Bug 1944494 - support for RFC 4530 entryUUID attribute
-
-* Mon Jul 26 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-7
-- Bump version to 1.4.3.23-7
-- Resolves: Bug 1983921 - persistent search returns entries even when an error is returned by content-sync-plugin
-
-* Fri Jul 16 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-6
-- Bump version to 1.4.3.23-6
-- Resolves: Bug 1982787 - CRYPT password hash with asterisk allows any bind attempt to succeed
-
-* Thu Jul 15 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-5
-- Bump version to 1.4.3.23-5
-- Resolves: Bug 1951020 - Internal unindexed searches in syncrepl
-- Resolves: Bug 1978279 - ds-replcheck state output message has 'Master' instead of 'Supplier'
-
-* Tue Jun 29 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-4
-- Bump version to 1.4.3.23-4
-- Resolves: Bug 1976906 - Instance crash at restart after changelog configuration
-- Resolves: Bug 1480323 - ns-slapd crash at startup - Segmentation fault in strcmpi_fast() when the Referential Integrity log is manually edited
-- Resolves: Bug 1967596 - Temporary password - add CLI and fix compiler errors
-
-* Thu Jun 17 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-3
-- Bump version to 1.4.3.23-3
-- Resolves: Bug 1944494 - support for RFC 4530 entryUUID attribute
-- Resolves: Bug 1967839 - ACIs are being evaluated against the Replication Manager account in a replication context
-- Resolves: Bug 1970259 - A connection can be erroneously flagged as replication conn during evaluation of an aci with ip bind rule
-- Resolves: Bug 1972590 - Large updates can reset the CLcache to the beginning of the changelog
-- Resolves: Bug 1903221 - Memory leak in 389ds backend (Minor)
-
-* Sun May 30 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-2
-- Bump version to 1.4.3.23-2
-- Resolves: Bug 1812286 - RFE - Monitor the current DB locks ( nsslapd-db-current-locks )
-- Resolves: Bug 1748441 - RFE - Schedule execution of "compactdb" at specific date/time
-- Resolves: Bug 1938239 - RFE - Extend DNA plugin to support intervals sizes for subuids
-
-* Fri May 14 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-1
-- Bump version to 1.4.3.23-1
-- Resolves: Bug 1947044 - Rebase 389 DS with 389-ds-base-1.4.3.23 for RHEL 8.5
-- Resolves: Bug 1850664 - RFE - Add an option for the Retro Changelog to ignore some attributes
-- Resolves: Bug 1903221 - Memory leak in 389ds backend (Minor)
-- Resolves: Bug 1898541 - Changelog cache can upload updates from a wrong starting point (CSN)
-- Resolves: Bug 1889562 - client psearch with multiple threads hangs if nsslapd-maxthreadsperconn is under sized
-- Resolves: Bug 1924848 - Negative wtime on ldapcompare
-- Resolves: Bug 1895460 - RFE - Log an additional message if the server certificate nickname doesn't match nsSSLPersonalitySSL value
-- Resolves: Bug 1897614 - Performance search rate: change entry cache monitor to recursive pthread mutex
-- Resolves: Bug 1939607 - hang because of incorrect accounting of readers in vattr rwlock
-- Resolves: Bug 1626633 - [RFE] DS - Update the password policy to support a Temporary Password with expiration
-- Resolves: Bug 1952804 - CVE-2021-3514 389-ds:1.4/389-ds-base: sync_repl NULL pointer dereference in sync_create_state_control()
+* Thu Feb 3 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-6
+- Bump version to 1.4.3.28-6
+- Resolves: Bug 2047171 - Based on 1944494 (RFC 4530 entryUUID attribute) - plugin entryuuid failing
+
+* Fri Jan 28 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-5
+- Bump version to 1.4.3.28-5
+- Resolves: Bug 2045223 - ipa-restore command is failing when restore after uninstalling the server (aprt 2)
+
+* Tue Jan 25 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-4
+- Bump version to 1.4.3.28-4
+- Resolves: Bug 2045223 - ipa-restore command is failing when restore after uninstalling the server
+
+* Thu Nov 18 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-3
+- Bump version to 1.4.3.28-3
+- Resolves: Bug 2030367 - EMBARGOED CVE-2021-4091 389-ds:1.4/389-ds-base: double-free of the virtual attribute context in persistent search
+- Resolves: Bug 2033398 - PBKDF2 hashing does not work in FIPS mode
+
+* Thu Nov 18 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-2
+- Bump version to 1.4.3.28-2
+- Resolves: Bug 2024695 - DB corruption "_entryrdn_insert_key - Same DN (dn: nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff,<SUFFIX>) is already in the entryrdn file"
+- Resolves: Bug 1859210 - systemd-tmpfiles warnings
+- Resolves: Bug 1913199 - IPA server (389ds) is very slow in execution of some searches (`&(memberOf=...)(objectClass=ipaHost)` in particular)
+- Resolves: Bug 1974236 - automatique disable of virtual attribute checking
+- Resolves: Bug 1976882 - logconv.pl -j: Use of uninitialized value $first in numeric gt (>)
+- Resolves: Bug 1981281 - ipa user-add fails with "gecos: value invalid per syntax: Invalid syntax"
+- Resolves: Bug 2015998 - Log the Auto Member invalid regex rules in the LDAP errors log
+
+* Thu Oct 21 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-1
+- Bump version to 1.4.3.28-1
+- Resolves: Bug 2016014 - rebase RHEL 8.6 with 389-ds-base-1.4.3
+- Resolves: Bug 1990002 - monitor displays wrong date for connection
+- Resolves: Bug 1950335 - upgrade password hash on bind also causes passwordExpirationtime to be updated
+- Resolves: Bug 1916292 - Indexing a single backend actually processes all configured backends
+- Resolves: Bug 1780842 - [RFE] set db home directory to /dev/shm by default
+- Resolves: Bug 2000975 - Retro Changelog does not trim changes
+
+
+