From be9751a677a11621a6972b21f33ccd289a5e873d Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Mar 30 2021 14:14:00 +0000 Subject: import 389-ds-base-1.4.3.16-11.module+el8.4.0+9969+312e177c --- diff --git a/.389-ds-base.metadata b/.389-ds-base.metadata index af43295..9c5f2b7 100644 --- a/.389-ds-base.metadata +++ b/.389-ds-base.metadata @@ -1,2 +1,2 @@ -7e651c99e43265c678c98ac2d8e31b8c48522be6 SOURCES/389-ds-base-1.4.3.8.tar.bz2 +90cda7aea8d8644eea5a2af28c72350dd915db34 SOURCES/389-ds-base-1.4.3.16.tar.bz2 9e06b5cc57fd185379d007696da153893cf73e30 SOURCES/jemalloc-5.2.1.tar.bz2 diff --git a/.gitignore b/.gitignore index 470a59e..9745926 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ -SOURCES/389-ds-base-1.4.3.8.tar.bz2 +SOURCES/389-ds-base-1.4.3.16.tar.bz2 SOURCES/jemalloc-5.2.1.tar.bz2 diff --git a/SOURCES/0001-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch b/SOURCES/0001-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch new file mode 100644 index 0000000..1b08b52 --- /dev/null +++ b/SOURCES/0001-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch @@ -0,0 +1,159 @@ +From 81dcaf1c37c2de24c46672df8d4f968c2fb40a6e Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 11 Nov 2020 08:59:18 -0500 +Subject: [PATCH 1/3] Issue 4383 - Do not normalize escaped spaces in a DN + +Bug Description: Adding an entry with an escaped leading space leads to many + problems. Mainly id2entry can get corrupted during an + import of such an entry, and the entryrdn index is not + updated correctly + +Fix Description: In slapi_dn_normalize_ext() leave an escaped space intact. + +Relates: https://github.com/389ds/389-ds-base/issues/4383 + +Reviewed by: firstyear, progier, and tbordaz (Thanks!!!) +--- + .../tests/suites/syntax/acceptance_test.py | 75 ++++++++++++++++++- + ldap/servers/slapd/dn.c | 8 +- + 2 files changed, 77 insertions(+), 6 deletions(-) + +diff --git a/dirsrvtests/tests/suites/syntax/acceptance_test.py b/dirsrvtests/tests/suites/syntax/acceptance_test.py +index 543718689..7939a99a7 100644 +--- a/dirsrvtests/tests/suites/syntax/acceptance_test.py ++++ b/dirsrvtests/tests/suites/syntax/acceptance_test.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2019 Red Hat, Inc. ++# Copyright (C) 2020 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -7,13 +7,12 @@ + # --- END COPYRIGHT BLOCK --- + + import ldap +-import logging + import pytest + import os + from lib389.schema import Schema + from lib389.config import Config + from lib389.idm.user import UserAccounts +-from lib389.idm.group import Groups ++from lib389.idm.group import Group, Groups + from lib389._constants import DEFAULT_SUFFIX + from lib389.topologies import log, topology_st as topo + +@@ -127,7 +126,7 @@ def test_invalid_dn_syntax_crash(topo): + 4. Success + """ + +- # Create group ++ # Create group + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': ' test'}) + +@@ -145,6 +144,74 @@ def test_invalid_dn_syntax_crash(topo): + groups.list() + + ++@pytest.mark.parametrize("props, rawdn", [ ++ ({'cn': ' leadingSpace'}, "cn=\\20leadingSpace,ou=Groups,dc=example,dc=com"), ++ ({'cn': 'trailingSpace '}, "cn=trailingSpace\\20,ou=Groups,dc=example,dc=com")]) ++def test_dn_syntax_spaces_delete(topo, props, rawdn): ++ """Test that an entry with a space as the first character in the DN can be ++ deleted without error. We also want to make sure the indexes are properly ++ updated by repeatedly adding and deleting the entry, and that the entry cache ++ is properly maintained. ++ ++ :id: b993f37c-c2b0-4312-992c-a9048ff98965 ++ :parametrized: yes ++ :setup: Standalone Instance ++ :steps: ++ 1. Create a group with a DN that has a space as the first/last ++ character. ++ 2. Delete group ++ 3. Add group ++ 4. Modify group ++ 5. Restart server and modify entry ++ 6. Delete group ++ 7. Add group back ++ 8. Delete group using specific DN ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Success ++ 8. Success ++ """ ++ ++ # Create group ++ groups = Groups(topo.standalone, DEFAULT_SUFFIX) ++ group = groups.create(properties=props.copy()) ++ ++ # Delete group (verifies DN/RDN parsing works and cache is correct) ++ group.delete() ++ ++ # Add group again (verifies entryrdn index was properly updated) ++ groups = Groups(topo.standalone, DEFAULT_SUFFIX) ++ group = groups.create(properties=props.copy()) ++ ++ # Modify the group (verifies dn/rdn parsing is correct) ++ group.replace('description', 'escaped space group') ++ ++ # Restart the server. This will pull the entry from the database and ++ # convert it into a cache entry, which is different than how a client ++ # first adds an entry and is put into the cache before being written to ++ # disk. ++ topo.standalone.restart() ++ ++ # Make sure we can modify the entry (verifies cache entry was created ++ # correctly) ++ group.replace('description', 'escaped space group after restart') ++ ++ # Make sure it can still be deleted (verifies cache again). ++ group.delete() ++ ++ # Add it back so we can delete it using a specific DN (sanity test to verify ++ # another DN/RDN parsing variation). ++ groups = Groups(topo.standalone, DEFAULT_SUFFIX) ++ group = groups.create(properties=props.copy()) ++ group = Group(topo.standalone, dn=rawdn) ++ group.delete() ++ ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c +index 2af3f38fc..3980b897f 100644 +--- a/ldap/servers/slapd/dn.c ++++ b/ldap/servers/slapd/dn.c +@@ -894,8 +894,7 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len) + s++; + } + } +- } else if (s + 2 < ends && +- isxdigit(*(s + 1)) && isxdigit(*(s + 2))) { ++ } else if (s + 2 < ends && isxdigit(*(s + 1)) && isxdigit(*(s + 2))) { + /* esc hexpair ==> real character */ + int n = slapi_hexchar2int(*(s + 1)); + int n2 = slapi_hexchar2int(*(s + 2)); +@@ -903,6 +902,11 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len) + if (n == 0) { /* don't change \00 */ + *d++ = *++s; + *d++ = *++s; ++ } else if (n == 32) { /* leave \20 (space) intact */ ++ *d++ = *s; ++ *d++ = *++s; ++ *d++ = *++s; ++ s++; + } else { + *d++ = n; + s += 3; +-- +2.26.2 + diff --git a/SOURCES/0001-Issue-51076-prevent-unnecessarily-duplication-of-the.patch b/SOURCES/0001-Issue-51076-prevent-unnecessarily-duplication-of-the.patch deleted file mode 100644 index cba92a9..0000000 --- a/SOURCES/0001-Issue-51076-prevent-unnecessarily-duplication-of-the.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 97ecf0190f264a2d87750bc2d26ebf011542e3e1 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Fri, 8 May 2020 10:52:43 -0400 -Subject: [PATCH 01/12] Issue 51076 - prevent unnecessarily duplication of the - target entry - -Bug Description: For any update operation the MEP plugin was calling - slapi_search_internal_get_entry() which duplicates - the entry it returns. In this case the entry is just - read from and discarded, but this entry is already - in the pblock (the PRE OP ENTRY). - -Fix Description: Just grab the PRE OP ENTRY from the pblock and use - that to read the attribute values from. This saves - two entry duplications for every update operation - from MEP. - -fixes: https://pagure.io/389-ds-base/issue/51076 - -Reviewed by: tbordaz & firstyear(Thanks!!) ---- - ldap/servers/plugins/mep/mep.c | 5 ++--- - 1 file changed, 2 insertions(+), 3 deletions(-) - -diff --git a/ldap/servers/plugins/mep/mep.c b/ldap/servers/plugins/mep/mep.c -index ca9a64b3b..401d95e3a 100644 ---- a/ldap/servers/plugins/mep/mep.c -+++ b/ldap/servers/plugins/mep/mep.c -@@ -2165,9 +2165,8 @@ mep_pre_op(Slapi_PBlock *pb, int modop) - if (e && free_entry) { - slapi_entry_free(e); - } -- -- slapi_search_internal_get_entry(sdn, 0, &e, mep_get_plugin_id()); -- free_entry = 1; -+ slapi_pblock_get(pb, SLAPI_ENTRY_PRE_OP, &e); -+ free_entry = 0; - } - - if (e && mep_is_managed_entry(e)) { --- -2.26.2 - diff --git a/SOURCES/0002-Ticket-51082-abort-when-a-empty-valueset-is-freed.patch b/SOURCES/0002-Ticket-51082-abort-when-a-empty-valueset-is-freed.patch deleted file mode 100644 index 822c8d2..0000000 --- a/SOURCES/0002-Ticket-51082-abort-when-a-empty-valueset-is-freed.patch +++ /dev/null @@ -1,116 +0,0 @@ -From 1426f086623404ab2eacb04de7e6414177c0993a Mon Sep 17 00:00:00 2001 -From: Thierry Bordaz -Date: Mon, 11 May 2020 17:11:49 +0200 -Subject: [PATCH 02/12] Ticket 51082 - abort when a empty valueset is freed - -Bug Description: - A large valueset (more than 10 values) manages a sorted array of values. - replication purges old values from a valueset (valueset_array_purge). If it purges all the values - the valueset is freed (slapi_valueset_done). - A problem is that the counter of values, in the valueset, is still reflecting the initial number - of values (before the purge). When the valueset is freed (because empty) a safety checking - detects incoherent values based on the wrong counter. - -Fix Description: - When all the values have been purge reset the counter before freeing the valueset - -https://pagure.io/389-ds-base/issue/51082 - -Reviewed by: Mark Reynolds - -Platforms tested: F30 - -Flag Day: no - -Doc impact: no ---- - .../suites/replication/acceptance_test.py | 57 +++++++++++++++++++ - ldap/servers/slapd/valueset.c | 4 ++ - 2 files changed, 61 insertions(+) - -diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py -index c8e0a4c93..5009f4e7c 100644 ---- a/dirsrvtests/tests/suites/replication/acceptance_test.py -+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py -@@ -500,6 +500,63 @@ def test_warining_for_invalid_replica(topo_m4): - assert topo_m4.ms["master1"].ds_error_log.match('.*nsds5ReplicaBackoffMax.*10.*invalid.*') - - -+@pytest.mark.ds51082 -+def test_csnpurge_large_valueset(topo_m2): -+ """Test csn generator test -+ -+ :id: 63e2bdb2-0a8f-4660-9465-7b80a9f72a74 -+ :setup: MMR with 2 masters -+ :steps: -+ 1. Create a test_user -+ 2. add a large set of values (more than 10) -+ 3. delete all the values (more than 10) -+ 4. configure the replica to purge those values (purgedelay=5s) -+ 5. Waiting for 6 second -+ 6. do a series of update -+ :expectedresults: -+ 1. Should succeeds -+ 2. Should succeeds -+ 3. Should succeeds -+ 4. Should succeeds -+ 5. Should succeeds -+ 6. Should not crash -+ """ -+ m1 = topo_m2.ms["master2"] -+ -+ test_user = UserAccount(m1, TEST_ENTRY_DN) -+ if test_user.exists(): -+ log.info('Deleting entry {}'.format(TEST_ENTRY_DN)) -+ test_user.delete() -+ test_user.create(properties={ -+ 'uid': TEST_ENTRY_NAME, -+ 'cn': TEST_ENTRY_NAME, -+ 'sn': TEST_ENTRY_NAME, -+ 'userPassword': TEST_ENTRY_NAME, -+ 'uidNumber' : '1000', -+ 'gidNumber' : '2000', -+ 'homeDirectory' : '/home/mmrepl_test', -+ }) -+ -+ # create a large value set so that it is sorted -+ for i in range(1,20): -+ test_user.add('description', 'value {}'.format(str(i))) -+ -+ # delete all values of the valueset -+ for i in range(1,20): -+ test_user.remove('description', 'value {}'.format(str(i))) -+ -+ # set purging delay to 5 second and wait more that 5second -+ replicas = Replicas(m1) -+ replica = replicas.list()[0] -+ log.info('nsds5ReplicaPurgeDelay to 5') -+ replica.set('nsds5ReplicaPurgeDelay', '5') -+ time.sleep(6) -+ -+ # add some new values to the valueset containing entries that should be purged -+ for i in range(21,25): -+ test_user.add('description', 'value {}'.format(str(i))) -+ -+ - if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode -diff --git a/ldap/servers/slapd/valueset.c b/ldap/servers/slapd/valueset.c -index 2af3ee18d..12027ecb8 100644 ---- a/ldap/servers/slapd/valueset.c -+++ b/ldap/servers/slapd/valueset.c -@@ -801,6 +801,10 @@ valueset_array_purge(const Slapi_Attr *a, Slapi_ValueSet *vs, const CSN *csn) - } - } - } else { -+ /* empty valueset - reset the vs->num so that further -+ * checking will not abort -+ */ -+ vs->num = 0; - slapi_valueset_done(vs); - } - --- -2.26.2 - diff --git a/SOURCES/0002-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch b/SOURCES/0002-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch new file mode 100644 index 0000000..e82fdf8 --- /dev/null +++ b/SOURCES/0002-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch @@ -0,0 +1,232 @@ +From 29c9e1c3c760f0941b022d45d14c248e9ceb9738 Mon Sep 17 00:00:00 2001 +From: progier389 <72748589+progier389@users.noreply.github.com> +Date: Tue, 3 Nov 2020 12:18:50 +0100 +Subject: [PATCH 2/3] ticket 2058: Add keep alive entry after on-line + initialization - second version (#4399) + +Bug description: +Keep alive entry is not created on target master after on line initialization, +and its RUVelement stays empty until a direct update is issued on that master + +Fix description: +The patch allows a consumer (configured as a master) to create (if it did not +exist before) the consumer's keep alive entry. It creates it at the end of a +replication session at a time we are sure the changelog exists and will not +be reset. It allows a consumer to have RUVelement with csn in the RUV at the +first incoming replication session. + +That is basically lkrispen's proposal with an associated pytest testcase + +Second version changes: + - moved the testcase to suites/replication/regression_test.py + - set up the topology from a 2 master topology then + reinitialized the replicas from an ldif without replication metadata + rather than using the cli. + - search for keepalive entries using search_s instead of getEntry + - add a comment about keep alive entries purpose + +last commit: + - wait that ruv are in sync before checking keep alive entries + +Reviewed by: droideck, Firstyear + +Platforms tested: F32 + +relates: #2058 +--- + .../suites/replication/regression_test.py | 130 ++++++++++++++++++ + .../plugins/replication/repl5_replica.c | 14 ++ + ldap/servers/plugins/replication/repl_extop.c | 4 + + 3 files changed, 148 insertions(+) + +diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py +index 844d762b9..14b9d6a44 100644 +--- a/dirsrvtests/tests/suites/replication/regression_test.py ++++ b/dirsrvtests/tests/suites/replication/regression_test.py +@@ -98,6 +98,30 @@ def _move_ruv(ldif_file): + for dn, entry in ldif_list: + ldif_writer.unparse(dn, entry) + ++def _remove_replication_data(ldif_file): ++ """ Remove the replication data from ldif file: ++ db2lif without -r includes some of the replica data like ++ - nsUniqueId ++ - keepalive entries ++ This function filters the ldif fil to remove these data ++ """ ++ ++ with open(ldif_file) as f: ++ parser = ldif.LDIFRecordList(f) ++ parser.parse() ++ ++ ldif_list = parser.all_records ++ # Iterate on a copy of the ldif entry list ++ for dn, entry in ldif_list[:]: ++ if dn.startswith('cn=repl keep alive'): ++ ldif_list.remove((dn,entry)) ++ else: ++ entry.pop('nsUniqueId') ++ with open(ldif_file, 'w') as f: ++ ldif_writer = ldif.LDIFWriter(f) ++ for dn, entry in ldif_list: ++ ldif_writer.unparse(dn, entry) ++ + + @pytest.fixture(scope="module") + def topo_with_sigkill(request): +@@ -897,6 +921,112 @@ def test_moving_entry_make_online_init_fail(topology_m2): + assert len(m1entries) == len(m2entries) + + ++def get_keepalive_entries(instance,replica): ++ # Returns the keep alive entries that exists with the suffix of the server instance ++ try: ++ entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL, ++ "(&(objectclass=ldapsubentry)(cn=repl keep alive*))", ++ ['cn', 'nsUniqueId', 'modifierTimestamp']) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: error %s' % (dn, instance, str(e))) ++ assert False ++ # No error, so lets log the keepalive entries ++ if log.isEnabledFor(logging.DEBUG): ++ for ret in entries: ++ log.debug("Found keepalive entry:\n"+str(ret)); ++ return entries ++ ++def verify_keepalive_entries(topo, expected): ++ #Check that keep alive entries exists (or not exists) for every masters on every masters ++ #Note: The testing method is quite basic: counting that there is one keepalive entry per master. ++ # that is ok for simple test cases like test_online_init_should_create_keepalive_entries but ++ # not for the general case as keep alive associated with no more existing master may exists ++ # (for example after: db2ldif / demote a master / ldif2db / init other masters) ++ # ==> if the function is somehow pushed in lib389, a check better than simply counting the entries ++ # should be done. ++ for masterId in topo.ms: ++ master=topo.ms[masterId] ++ for replica in Replicas(master).list(): ++ if (replica.get_role() != ReplicaRole.MASTER): ++ continue ++ replica_info = f'master: {masterId} RID: {replica.get_rid()} suffix: {replica.get_suffix()}' ++ log.debug(f'Checking keepAliveEntries on {replica_info}') ++ keepaliveEntries = get_keepalive_entries(master, replica); ++ expectedCount = len(topo.ms) if expected else 0 ++ foundCount = len(keepaliveEntries) ++ if (foundCount == expectedCount): ++ log.debug(f'Found {foundCount} keepalive entries as expected on {replica_info}.') ++ else: ++ log.error(f'{foundCount} Keepalive entries are found ' ++ f'while {expectedCount} were expected on {replica_info}.') ++ assert False ++ ++ ++def test_online_init_should_create_keepalive_entries(topo_m2): ++ """Check that keep alive entries are created when initializinf a master from another one ++ ++ :id: d5940e71-d18a-4b71-aaf7-b9185361fffe ++ :setup: Two masters replication setup ++ :steps: ++ 1. Generate ldif without replication data ++ 2 Init both masters from that ldif ++ 3 Check that keep alive entries does not exists ++ 4 Perform on line init of master2 from master1 ++ 5 Check that keep alive entries exists ++ :expectedresults: ++ 1. No error while generating ldif ++ 2. No error while importing the ldif file ++ 3. No keepalive entrie should exists on any masters ++ 4. No error while initializing master2 ++ 5. All keepalive entries should exist on every masters ++ ++ """ ++ ++ repl = ReplicationManager(DEFAULT_SUFFIX) ++ m1 = topo_m2.ms["master1"] ++ m2 = topo_m2.ms["master2"] ++ # Step 1: Generate ldif without replication data ++ m1.stop() ++ m2.stop() ++ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() ++ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], ++ excludeSuffixes=None, repl_data=False, ++ outputfile=ldif_file, encrypt=False) ++ # Remove replication metadata that are still in the ldif ++ _remove_replication_data(ldif_file) ++ ++ # Step 2: Init both masters from that ldif ++ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) ++ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) ++ m1.start() ++ m2.start() ++ ++ """ Replica state is now as if CLI setup has been done using: ++ dsconf master1 replication enable --suffix "${SUFFIX}" --role master ++ dsconf master2 replication enable --suffix "${SUFFIX}" --role master ++ dsconf master1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}" ++ dsconf master2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}" ++ dsconf master1 repl-agmt create --suffix "${SUFFIX}" ++ dsconf master2 repl-agmt create --suffix "${SUFFIX}" ++ """ ++ ++ # Step 3: No keepalive entrie should exists on any masters ++ verify_keepalive_entries(topo_m2, False) ++ ++ # Step 4: Perform on line init of master2 from master1 ++ agmt = Agreements(m1).list()[0] ++ agmt.begin_reinit() ++ (done, error) = agmt.wait_reinit() ++ assert done is True ++ assert error is False ++ ++ # Step 5: All keepalive entries should exists on every masters ++ # Verify the keep alive entry once replication is in sync ++ # (that is the step that fails when bug is not fixed) ++ repl.wait_for_ruv(m2,m1) ++ verify_keepalive_entries(topo_m2, True); ++ ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c +index f01782330..f0ea0f8ef 100644 +--- a/ldap/servers/plugins/replication/repl5_replica.c ++++ b/ldap/servers/plugins/replication/repl5_replica.c +@@ -373,6 +373,20 @@ replica_destroy(void **arg) + slapi_ch_free((void **)arg); + } + ++/****************************************************************************** ++ ******************** REPLICATION KEEP ALIVE ENTRIES ************************** ++ ****************************************************************************** ++ * They are subentries of the replicated suffix and there is one per master. * ++ * These entries exist only to trigger a change that get replicated over the * ++ * topology. * ++ * Their main purpose is to generate records in the changelog and they are * ++ * updated from time to time by fractional replication to insure that at * ++ * least a change must be replicated by FR after a great number of not * ++ * replicated changes are found in the changelog. The interest is that the * ++ * fractional RUV get then updated so less changes need to be walked in the * ++ * changelog when searching for the first change to send * ++ ******************************************************************************/ ++ + #define KEEP_ALIVE_ATTR "keepalivetimestamp" + #define KEEP_ALIVE_ENTRY "repl keep alive" + #define KEEP_ALIVE_DN_FORMAT "cn=%s %d,%s" +diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c +index 14c8e0bcc..af486f730 100644 +--- a/ldap/servers/plugins/replication/repl_extop.c ++++ b/ldap/servers/plugins/replication/repl_extop.c +@@ -1173,6 +1173,10 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb) + */ + if (cl5GetState() == CL5_STATE_OPEN) { + replica_log_ruv_elements(r); ++ /* now that the changelog is open and started, we can alos cretae the ++ * keep alive entry without risk that db and cl will not match ++ */ ++ replica_subentry_check(replica_get_root(r), replica_get_rid(r)); + } + + /* ONREPL code that dealt with new RUV, etc was moved into the code +-- +2.26.2 + diff --git a/SOURCES/0003-Issue-51091-healthcheck-json-report-fails-when-mappi.patch b/SOURCES/0003-Issue-51091-healthcheck-json-report-fails-when-mappi.patch deleted file mode 100644 index b3a1a82..0000000 --- a/SOURCES/0003-Issue-51091-healthcheck-json-report-fails-when-mappi.patch +++ /dev/null @@ -1,45 +0,0 @@ -From 7a62e72b81d75ebb844835619ecc97dbf5e21058 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Thu, 14 May 2020 09:38:20 -0400 -Subject: [PATCH 03/12] Issue 51091 - healthcheck json report fails when - mapping tree is deleted - -Description: We were passing the bename in bytes and not as a utf8 string. - This caused the json dumping to fail. - -relates: https://pagure.io/389-ds-base/issue/51091 - -Reviewed by: firstyear(Thanks!) ---- - src/lib389/lib389/backend.py | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py -index e472d3de5..4f752f414 100644 ---- a/src/lib389/lib389/backend.py -+++ b/src/lib389/lib389/backend.py -@@ -11,7 +11,7 @@ import copy - import ldap - from lib389._constants import * - from lib389.properties import * --from lib389.utils import normalizeDN, ensure_str, ensure_bytes, assert_c -+from lib389.utils import normalizeDN, ensure_str, assert_c - from lib389 import Entry - - # Need to fix this .... -@@ -488,10 +488,10 @@ class Backend(DSLdapObject): - - # Check for the missing mapping tree. - suffix = self.get_attr_val_utf8('nsslapd-suffix') -- bename = self.get_attr_val_bytes('cn') -+ bename = self.get_attr_val_utf8('cn') - try: - mt = self._mts.get(suffix) -- if mt.get_attr_val_bytes('nsslapd-backend') != bename and mt.get_attr_val('nsslapd-state') != ensure_bytes('backend'): -+ if mt.get_attr_val_utf8('nsslapd-backend') != bename and mt.get_attr_val_utf8('nsslapd-state') != 'backend': - raise ldap.NO_SUCH_OBJECT("We have a matching suffix, but not a backend or correct database name.") - except ldap.NO_SUCH_OBJECT: - result = DSBLE0001 --- -2.26.2 - diff --git a/SOURCES/0003-do-not-add-referrals-for-masters-with-different-data.patch b/SOURCES/0003-do-not-add-referrals-for-masters-with-different-data.patch new file mode 100644 index 0000000..411958e --- /dev/null +++ b/SOURCES/0003-do-not-add-referrals-for-masters-with-different-data.patch @@ -0,0 +1,513 @@ +From e202c62c3b4c92163d2de9f3da9a9f3efc81e4b8 Mon Sep 17 00:00:00 2001 +From: progier389 <72748589+progier389@users.noreply.github.com> +Date: Thu, 12 Nov 2020 18:50:04 +0100 +Subject: [PATCH 3/3] do not add referrals for masters with different data + generation #2054 (#4427) + +Bug description: +The problem is that some operation mandatory in the usual cases are +also performed when replication cannot take place because the +database set are differents (i.e: RUV generation ids are different) + +One of the issue is that the csn generator state is updated when +starting a replication session (it is a problem when trying to +reset the time skew, as freshly reinstalled replicas get infected +by the old ones) + +A second issue is that the RUV got updated when ending a replication session +(which may add replica that does not share the same data set, +then update operations on consumer retun referrals towards wrong masters + +Fix description: +The fix checks the RUVs generation id before updating the csn generator +and before updating the RUV. + +Reviewed by: mreynolds + firstyear + vashirov + +Platforms tested: F32 +--- + .../suites/replication/regression_test.py | 290 ++++++++++++++++++ + ldap/servers/plugins/replication/repl5.h | 1 + + .../plugins/replication/repl5_inc_protocol.c | 20 +- + .../plugins/replication/repl5_replica.c | 39 ++- + src/lib389/lib389/dseldif.py | 37 +++ + 5 files changed, 368 insertions(+), 19 deletions(-) + +diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py +index 14b9d6a44..a72af6b30 100644 +--- a/dirsrvtests/tests/suites/replication/regression_test.py ++++ b/dirsrvtests/tests/suites/replication/regression_test.py +@@ -13,6 +13,7 @@ from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts + from lib389.pwpolicy import PwPolicyManager + from lib389.utils import * + from lib389.topologies import topology_m2 as topo_m2, TopologyMain, topology_m3 as topo_m3, create_topology, _remove_ssca_db, topology_i2 as topo_i2 ++from lib389.topologies import topology_m2c2 as topo_m2c2 + from lib389._constants import * + from lib389.idm.organizationalunit import OrganizationalUnits + from lib389.idm.user import UserAccount +@@ -22,6 +23,7 @@ from lib389.idm.directorymanager import DirectoryManager + from lib389.replica import Replicas, ReplicationManager, Changelog5, BootstrapReplicationManager + from lib389.agreement import Agreements + from lib389 import pid_from_file ++from lib389.dseldif import * + + + pytestmark = pytest.mark.tier1 +@@ -1027,6 +1029,294 @@ def test_online_init_should_create_keepalive_entries(topo_m2): + verify_keepalive_entries(topo_m2, True); + + ++def get_agreement(agmts, consumer): ++ # Get agreement towards consumer among the agremment list ++ for agmt in agmts.list(): ++ if (agmt.get_attr_val_utf8('nsDS5ReplicaPort') == str(consumer.port) and ++ agmt.get_attr_val_utf8('nsDS5ReplicaHost') == consumer.host): ++ return agmt ++ return None; ++ ++ ++def test_ruv_url_not_added_if_different_uuid(topo_m2c2): ++ """Check that RUV url is not updated if RUV generation uuid are different ++ ++ :id: 7cc30a4e-0ffd-4758-8f00-e500279af344 ++ :setup: Two masters + two consumers replication setup ++ :steps: ++ 1. Generate ldif without replication data ++ 2. Init both masters from that ldif ++ (to clear the ruvs and generates different generation uuid) ++ 3. Perform on line init from master1 to consumer1 ++ and from master2 to consumer2 ++ 4. Perform update on both masters ++ 5. Check that c1 RUV does not contains URL towards m2 ++ 6. Check that c2 RUV does contains URL towards m2 ++ 7. Perform on line init from master1 to master2 ++ 8. Perform update on master2 ++ 9. Check that c1 RUV does contains URL towards m2 ++ :expectedresults: ++ 1. No error while generating ldif ++ 2. No error while importing the ldif file ++ 3. No error and Initialization done. ++ 4. No error ++ 5. master2 replicaid should not be in the consumer1 RUV ++ 6. master2 replicaid should be in the consumer2 RUV ++ 7. No error and Initialization done. ++ 8. No error ++ 9. master2 replicaid should be in the consumer1 RUV ++ ++ """ ++ ++ # Variables initialization ++ repl = ReplicationManager(DEFAULT_SUFFIX) ++ ++ m1 = topo_m2c2.ms["master1"] ++ m2 = topo_m2c2.ms["master2"] ++ c1 = topo_m2c2.cs["consumer1"] ++ c2 = topo_m2c2.cs["consumer2"] ++ ++ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) ++ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) ++ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX) ++ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX) ++ ++ replicid_m2 = replica_m2.get_rid() ++ ++ agmts_m1 = Agreements(m1, replica_m1.dn) ++ agmts_m2 = Agreements(m2, replica_m2.dn) ++ ++ m1_m2 = get_agreement(agmts_m1, m2) ++ m1_c1 = get_agreement(agmts_m1, c1) ++ m1_c2 = get_agreement(agmts_m1, c2) ++ m2_m1 = get_agreement(agmts_m2, m1) ++ m2_c1 = get_agreement(agmts_m2, c1) ++ m2_c2 = get_agreement(agmts_m2, c2) ++ ++ # Step 1: Generate ldif without replication data ++ m1.stop() ++ m2.stop() ++ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() ++ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], ++ excludeSuffixes=None, repl_data=False, ++ outputfile=ldif_file, encrypt=False) ++ # Remove replication metadata that are still in the ldif ++ # _remove_replication_data(ldif_file) ++ ++ # Step 2: Init both masters from that ldif ++ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) ++ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) ++ m1.start() ++ m2.start() ++ ++ # Step 3: Perform on line init from master1 to consumer1 ++ # and from master2 to consumer2 ++ m1_c1.begin_reinit() ++ m2_c2.begin_reinit() ++ (done, error) = m1_c1.wait_reinit() ++ assert done is True ++ assert error is False ++ (done, error) = m2_c2.wait_reinit() ++ assert done is True ++ assert error is False ++ ++ # Step 4: Perform update on both masters ++ repl.test_replication(m1, c1) ++ repl.test_replication(m2, c2) ++ ++ # Step 5: Check that c1 RUV does not contains URL towards m2 ++ ruv = replica_c1.get_ruv() ++ log.debug(f"c1 RUV: {ruv}") ++ url=ruv._rid_url.get(replica_m2.get_rid()) ++ if (url == None): ++ log.debug(f"No URL for RID {replica_m2.get_rid()} in RUV"); ++ else: ++ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}"); ++ log.error(f"URL for RID {replica_m2.get_rid()} found in RUV") ++ #Note: this assertion fails if issue 2054 is not fixed. ++ assert False ++ ++ # Step 6: Check that c2 RUV does contains URL towards m2 ++ ruv = replica_c2.get_ruv() ++ log.debug(f"c1 RUV: {ruv} {ruv._rids} ") ++ url=ruv._rid_url.get(replica_m2.get_rid()) ++ if (url == None): ++ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV"); ++ assert False ++ else: ++ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}"); ++ ++ ++ # Step 7: Perform on line init from master1 to master2 ++ m1_m2.begin_reinit() ++ (done, error) = m1_m2.wait_reinit() ++ assert done is True ++ assert error is False ++ ++ # Step 8: Perform update on master2 ++ repl.test_replication(m2, c1) ++ ++ # Step 9: Check that c1 RUV does contains URL towards m2 ++ ruv = replica_c1.get_ruv() ++ log.debug(f"c1 RUV: {ruv} {ruv._rids} ") ++ url=ruv._rid_url.get(replica_m2.get_rid()) ++ if (url == None): ++ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV"); ++ assert False ++ else: ++ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}"); ++ ++ ++def test_csngen_state_not_updated_if_different_uuid(topo_m2c2): ++ """Check that csngen remote offset is not updated if RUV generation uuid are different ++ ++ :id: 77694b8e-22ae-11eb-89b2-482ae39447e5 ++ :setup: Two masters + two consumers replication setup ++ :steps: ++ 1. Disable m1<->m2 agreement to avoid propagate timeSkew ++ 2. Generate ldif without replication data ++ 3. Increase time skew on master2 ++ 4. Init both masters from that ldif ++ (to clear the ruvs and generates different generation uuid) ++ 5. Perform on line init from master1 to consumer1 and master2 to consumer2 ++ 6. Perform update on both masters ++ 7: Check that c1 has no time skew ++ 8: Check that c2 has time skew ++ 9. Init master2 from master1 ++ 10. Perform update on master2 ++ 11. Check that c1 has time skew ++ :expectedresults: ++ 1. No error ++ 2. No error while generating ldif ++ 3. No error ++ 4. No error while importing the ldif file ++ 5. No error and Initialization done. ++ 6. No error ++ 7. c1 time skew should be lesser than threshold ++ 8. c2 time skew should be higher than threshold ++ 9. No error and Initialization done. ++ 10. No error ++ 11. c1 time skew should be higher than threshold ++ ++ """ ++ ++ # Variables initialization ++ repl = ReplicationManager(DEFAULT_SUFFIX) ++ ++ m1 = topo_m2c2.ms["master1"] ++ m2 = topo_m2c2.ms["master2"] ++ c1 = topo_m2c2.cs["consumer1"] ++ c2 = topo_m2c2.cs["consumer2"] ++ ++ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) ++ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) ++ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX) ++ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX) ++ ++ replicid_m2 = replica_m2.get_rid() ++ ++ agmts_m1 = Agreements(m1, replica_m1.dn) ++ agmts_m2 = Agreements(m2, replica_m2.dn) ++ ++ m1_m2 = get_agreement(agmts_m1, m2) ++ m1_c1 = get_agreement(agmts_m1, c1) ++ m1_c2 = get_agreement(agmts_m1, c2) ++ m2_m1 = get_agreement(agmts_m2, m1) ++ m2_c1 = get_agreement(agmts_m2, c1) ++ m2_c2 = get_agreement(agmts_m2, c2) ++ ++ # Step 1: Disable m1<->m2 agreement to avoid propagate timeSkew ++ m1_m2.pause() ++ m2_m1.pause() ++ ++ # Step 2: Generate ldif without replication data ++ m1.stop() ++ m2.stop() ++ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() ++ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], ++ excludeSuffixes=None, repl_data=False, ++ outputfile=ldif_file, encrypt=False) ++ # Remove replication metadata that are still in the ldif ++ # _remove_replication_data(ldif_file) ++ ++ # Step 3: Increase time skew on master2 ++ timeSkew=6*3600 ++ # We can modify master2 time skew ++ # But the time skew on the consumer may be smaller ++ # depending on when the cnsgen generation time is updated ++ # and when first csn get replicated. ++ # Since we use timeSkew has threshold value to detect ++ # whether there are time skew or not, ++ # lets add a significative margin (longer than the test duration) ++ # to avoid any risk of erroneous failure ++ timeSkewMargin = 300 ++ DSEldif(m2)._increaseTimeSkew(DEFAULT_SUFFIX, timeSkew+timeSkewMargin) ++ ++ # Step 4: Init both masters from that ldif ++ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) ++ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) ++ m1.start() ++ m2.start() ++ ++ # Step 5: Perform on line init from master1 to consumer1 ++ # and from master2 to consumer2 ++ m1_c1.begin_reinit() ++ m2_c2.begin_reinit() ++ (done, error) = m1_c1.wait_reinit() ++ assert done is True ++ assert error is False ++ (done, error) = m2_c2.wait_reinit() ++ assert done is True ++ assert error is False ++ ++ # Step 6: Perform update on both masters ++ repl.test_replication(m1, c1) ++ repl.test_replication(m2, c2) ++ ++ # Step 7: Check that c1 has no time skew ++ # Stop server to insure that dse.ldif is uptodate ++ c1.stop() ++ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0] ++ c1_timeSkew = int(c1_nsState['time_skew']) ++ log.debug(f"c1 time skew: {c1_timeSkew}") ++ if (c1_timeSkew >= timeSkew): ++ log.error(f"c1 csngen state has unexpectedly been synchronized with m2: time skew {c1_timeSkew}") ++ assert False ++ c1.start() ++ ++ # Step 8: Check that c2 has time skew ++ # Stop server to insure that dse.ldif is uptodate ++ c2.stop() ++ c2_nsState = DSEldif(c2).readNsState(DEFAULT_SUFFIX)[0] ++ c2_timeSkew = int(c2_nsState['time_skew']) ++ log.debug(f"c2 time skew: {c2_timeSkew}") ++ if (c2_timeSkew < timeSkew): ++ log.error(f"c2 csngen state has not been synchronized with m2: time skew {c2_timeSkew}") ++ assert False ++ c2.start() ++ ++ # Step 9: Perform on line init from master1 to master2 ++ m1_c1.pause() ++ m1_m2.resume() ++ m1_m2.begin_reinit() ++ (done, error) = m1_m2.wait_reinit() ++ assert done is True ++ assert error is False ++ ++ # Step 10: Perform update on master2 ++ repl.test_replication(m2, c1) ++ ++ # Step 11: Check that c1 has time skew ++ # Stop server to insure that dse.ldif is uptodate ++ c1.stop() ++ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0] ++ c1_timeSkew = int(c1_nsState['time_skew']) ++ log.debug(f"c1 time skew: {c1_timeSkew}") ++ if (c1_timeSkew < timeSkew): ++ log.error(f"c1 csngen state has not been synchronized with m2: time skew {c1_timeSkew}") ++ assert False ++ ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h +index b35f724c2..f1c596a3f 100644 +--- a/ldap/servers/plugins/replication/repl5.h ++++ b/ldap/servers/plugins/replication/repl5.h +@@ -708,6 +708,7 @@ void replica_dump(Replica *r); + void replica_set_enabled(Replica *r, PRBool enable); + Replica *replica_get_replica_from_dn(const Slapi_DN *dn); + Replica *replica_get_replica_from_root(const char *repl_root); ++int replica_check_generation(Replica *r, const RUV *remote_ruv); + int replica_update_ruv(Replica *replica, const CSN *csn, const char *replica_purl); + Replica *replica_get_replica_for_op(Slapi_PBlock *pb); + /* the functions below manipulate replica hash */ +diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c +index 29b1fb073..af5e5897c 100644 +--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c ++++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c +@@ -2161,26 +2161,12 @@ examine_update_vector(Private_Repl_Protocol *prp, RUV *remote_ruv) + } else if (NULL == remote_ruv) { + return_value = EXAMINE_RUV_PRISTINE_REPLICA; + } else { +- char *local_gen = NULL; +- char *remote_gen = ruv_get_replica_generation(remote_ruv); +- Object *local_ruv_obj; +- RUV *local_ruv; +- + PR_ASSERT(NULL != prp->replica); +- local_ruv_obj = replica_get_ruv(prp->replica); +- if (NULL != local_ruv_obj) { +- local_ruv = (RUV *)object_get_data(local_ruv_obj); +- PR_ASSERT(local_ruv); +- local_gen = ruv_get_replica_generation(local_ruv); +- object_release(local_ruv_obj); +- } +- if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) { +- return_value = EXAMINE_RUV_GENERATION_MISMATCH; +- } else { ++ if (replica_check_generation(prp->replica, remote_ruv)) { + return_value = EXAMINE_RUV_OK; ++ } else { ++ return_value = EXAMINE_RUV_GENERATION_MISMATCH; + } +- slapi_ch_free((void **)&remote_gen); +- slapi_ch_free((void **)&local_gen); + } + return return_value; + } +diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c +index f0ea0f8ef..7e56d6557 100644 +--- a/ldap/servers/plugins/replication/repl5_replica.c ++++ b/ldap/servers/plugins/replication/repl5_replica.c +@@ -812,6 +812,36 @@ replica_set_ruv(Replica *r, RUV *ruv) + replica_unlock(r->repl_lock); + } + ++/* ++ * Check if replica generation is the same than the remote ruv one ++ */ ++int ++replica_check_generation(Replica *r, const RUV *remote_ruv) ++{ ++ int return_value; ++ char *local_gen = NULL; ++ char *remote_gen = ruv_get_replica_generation(remote_ruv); ++ Object *local_ruv_obj; ++ RUV *local_ruv; ++ ++ PR_ASSERT(NULL != r); ++ local_ruv_obj = replica_get_ruv(r); ++ if (NULL != local_ruv_obj) { ++ local_ruv = (RUV *)object_get_data(local_ruv_obj); ++ PR_ASSERT(local_ruv); ++ local_gen = ruv_get_replica_generation(local_ruv); ++ object_release(local_ruv_obj); ++ } ++ if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) { ++ return_value = PR_FALSE; ++ } else { ++ return_value = PR_TRUE; ++ } ++ slapi_ch_free_string(&remote_gen); ++ slapi_ch_free_string(&local_gen); ++ return return_value; ++} ++ + /* + * Update one particular CSN in an RUV. This is meant to be called + * whenever (a) the server has processed a client operation and +@@ -1298,6 +1328,11 @@ replica_update_csngen_state_ext(Replica *r, const RUV *ruv, const CSN *extracsn) + + PR_ASSERT(r && ruv); + ++ if (!replica_check_generation(r, ruv)) /* ruv has wrong generation - we are done */ ++ { ++ return 0; ++ } ++ + rc = ruv_get_max_csn(ruv, &csn); + if (rc != RUV_SUCCESS) { + return -1; +@@ -3713,8 +3748,8 @@ replica_update_ruv_consumer(Replica *r, RUV *supplier_ruv) + replica_lock(r->repl_lock); + + local_ruv = (RUV *)object_get_data(r->repl_ruv); +- +- if (is_cleaned_rid(supplier_id) || local_ruv == NULL) { ++ if (is_cleaned_rid(supplier_id) || local_ruv == NULL || ++ !replica_check_generation(r, supplier_ruv)) { + replica_unlock(r->repl_lock); + return; + } +diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py +index 10baba4d7..6850c9a8a 100644 +--- a/src/lib389/lib389/dseldif.py ++++ b/src/lib389/lib389/dseldif.py +@@ -317,6 +317,43 @@ class DSEldif(DSLint): + + return states + ++ def _increaseTimeSkew(self, suffix, timeSkew): ++ # Increase csngen state local_offset by timeSkew ++ # Warning: instance must be stopped before calling this function ++ assert (timeSkew >= 0) ++ nsState = self.readNsState(suffix)[0] ++ self._instance.log.debug(f'_increaseTimeSkew nsState is {nsState}') ++ oldNsState = self.get(nsState['dn'], 'nsState', True) ++ self._instance.log.debug(f'oldNsState is {oldNsState}') ++ ++ # Lets reencode the new nsState ++ from lib389.utils import print_nice_time ++ if pack('h', 1) == pack('=h',1): ++ end = '>' ++ else: ++ raise ValueError("Unknown endian, unable to proceed") ++ ++ thelen = len(oldNsState) ++ if thelen <= 20: ++ pad = 2 # padding for short H values ++ timefmt = 'I' # timevals are unsigned 32-bit int ++ else: ++ pad = 6 # padding for short H values ++ timefmt = 'Q' # timevals are unsigned 64-bit int ++ fmtstr = "%sH%dx3%sH%dx" % (end, pad, timefmt, pad) ++ newNsState = base64.b64encode(pack(fmtstr, int(nsState['rid']), ++ int(nsState['gen_time']), int(nsState['local_offset'])+timeSkew, ++ int(nsState['remote_offset']), int(nsState['seq_num']))) ++ newNsState = newNsState.decode('utf-8') ++ self._instance.log.debug(f'newNsState is {newNsState}') ++ # Lets replace the value. ++ (entry_dn_i, attr_data) = self._find_attr(nsState['dn'], 'nsState') ++ attr_i = next(iter(attr_data)) ++ self._contents[entry_dn_i + attr_i] = f"nsState:: {newNsState}" ++ self._update() ++ + + class FSChecks(DSLint): + """This is for the healthcheck feature, check commonly used system config files the +-- +2.26.2 + diff --git a/SOURCES/0004-Issue-51076-remove-unnecessary-slapi-entry-dups.patch b/SOURCES/0004-Issue-51076-remove-unnecessary-slapi-entry-dups.patch deleted file mode 100644 index c931a79..0000000 --- a/SOURCES/0004-Issue-51076-remove-unnecessary-slapi-entry-dups.patch +++ /dev/null @@ -1,943 +0,0 @@ -From f13d630ff98eb5b5505f1db3e7f207175b51b237 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Tue, 12 May 2020 13:48:30 -0400 -Subject: [PATCH 04/12] Issue 51076 - remove unnecessary slapi entry dups - -Description: So the problem is that slapi_search_internal_get_entry() - duplicates the entry twice. It does that as a convenience - where it will allocate a pblock, do the search, copy - the entry, free search results from the pblock, and then - free the pblock itself. I basically split this function - into two functions. One function allocates the pblock, - does the search and returns the entry. The other function - frees the entries and pblock. - - 99% of time when we call slapi_search_internal_get_entry() - we are just reading it and freeing it. It's not being - consumed. In these cases we can use the two function - approach eliminates an extra slapi_entry_dup(). Over the - time of an operation/connection we can save quite a bit - of mallocing/freeing. This could also help with memory - fragmentation. - -ASAN: passed - -relates: https://pagure.io/389-ds-base/issue/51076 - -Reviewed by: firstyear & tbordaz(Thanks!) ---- - ldap/servers/plugins/acctpolicy/acct_config.c | 6 +-- - ldap/servers/plugins/acctpolicy/acct_plugin.c | 36 +++++++------- - ldap/servers/plugins/acctpolicy/acct_util.c | 6 +-- - ldap/servers/plugins/automember/automember.c | 17 +++---- - ldap/servers/plugins/dna/dna.c | 23 ++++----- - ldap/servers/plugins/memberof/memberof.c | 16 +++---- - .../plugins/pam_passthru/pam_ptconfig.c | 10 ++-- - .../servers/plugins/pam_passthru/pam_ptimpl.c | 7 +-- - .../plugins/pam_passthru/pam_ptpreop.c | 9 ++-- - .../plugins/replication/repl5_tot_protocol.c | 5 +- - ldap/servers/plugins/uiduniq/uid.c | 23 ++++----- - ldap/servers/slapd/daemon.c | 11 ++--- - ldap/servers/slapd/modify.c | 12 +++-- - ldap/servers/slapd/plugin_internal_op.c | 48 +++++++++++++++++++ - ldap/servers/slapd/resourcelimit.c | 13 ++--- - ldap/servers/slapd/schema.c | 7 ++- - ldap/servers/slapd/slapi-plugin.h | 23 ++++++++- - 17 files changed, 161 insertions(+), 111 deletions(-) - -diff --git a/ldap/servers/plugins/acctpolicy/acct_config.c b/ldap/servers/plugins/acctpolicy/acct_config.c -index fe35ba5a0..01e4f319f 100644 ---- a/ldap/servers/plugins/acctpolicy/acct_config.c -+++ b/ldap/servers/plugins/acctpolicy/acct_config.c -@@ -37,6 +37,7 @@ static int acct_policy_entry2config(Slapi_Entry *e, - int - acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void *plugin_id) - { -+ Slapi_PBlock *entry_pb = NULL; - acctPluginCfg *newcfg; - Slapi_Entry *config_entry = NULL; - Slapi_DN *config_sdn = NULL; -@@ -44,8 +45,7 @@ acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void * - - /* Retrieve the config entry */ - config_sdn = slapi_sdn_new_normdn_byref(PLUGIN_CONFIG_DN); -- rc = slapi_search_internal_get_entry(config_sdn, NULL, &config_entry, -- plugin_id); -+ rc = slapi_search_get_entry(&entry_pb, config_sdn, NULL, &config_entry, plugin_id); - slapi_sdn_free(&config_sdn); - - if (rc != LDAP_SUCCESS || config_entry == NULL) { -@@ -60,7 +60,7 @@ acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void * - rc = acct_policy_entry2config(config_entry, newcfg); - config_unlock(); - -- slapi_entry_free(config_entry); -+ slapi_search_get_entry_done(&entry_pb); - - return (rc); - } -diff --git a/ldap/servers/plugins/acctpolicy/acct_plugin.c b/ldap/servers/plugins/acctpolicy/acct_plugin.c -index 2a876ad72..c3c32b074 100644 ---- a/ldap/servers/plugins/acctpolicy/acct_plugin.c -+++ b/ldap/servers/plugins/acctpolicy/acct_plugin.c -@@ -209,6 +209,7 @@ done: - int - acct_bind_preop(Slapi_PBlock *pb) - { -+ Slapi_PBlock *entry_pb = NULL; - const char *dn = NULL; - Slapi_DN *sdn = NULL; - Slapi_Entry *target_entry = NULL; -@@ -236,8 +237,7 @@ acct_bind_preop(Slapi_PBlock *pb) - goto done; - } - -- ldrc = slapi_search_internal_get_entry(sdn, NULL, &target_entry, -- plugin_id); -+ ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &target_entry, plugin_id); - - /* There was a problem retrieving the entry */ - if (ldrc != LDAP_SUCCESS) { -@@ -275,7 +275,7 @@ done: - slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, NULL, 0, NULL); - } - -- slapi_entry_free(target_entry); -+ slapi_search_get_entry_done(&entry_pb); - - free_acctpolicy(&policy); - -@@ -293,6 +293,7 @@ done: - int - acct_bind_postop(Slapi_PBlock *pb) - { -+ Slapi_PBlock *entry_pb = NULL; - char *dn = NULL; - int ldrc, tracklogin = 0; - int rc = 0; /* Optimistic default */ -@@ -327,8 +328,7 @@ acct_bind_postop(Slapi_PBlock *pb) - covered by an account policy to decide whether we should track */ - if (tracklogin == 0) { - sdn = slapi_sdn_new_normdn_byref(dn); -- ldrc = slapi_search_internal_get_entry(sdn, NULL, &target_entry, -- plugin_id); -+ ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &target_entry, plugin_id); - - if (ldrc != LDAP_SUCCESS) { - slapi_log_err(SLAPI_LOG_ERR, POST_PLUGIN_NAME, -@@ -355,7 +355,7 @@ done: - slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, NULL, 0, NULL); - } - -- slapi_entry_free(target_entry); -+ slapi_search_get_entry_done(&entry_pb); - - slapi_sdn_free(&sdn); - -@@ -370,11 +370,11 @@ done: - static int - acct_pre_op(Slapi_PBlock *pb, int modop) - { -+ Slapi_PBlock *entry_pb = NULL; - Slapi_DN *sdn = 0; - Slapi_Entry *e = 0; - Slapi_Mods *smods = 0; - LDAPMod **mods; -- int free_entry = 0; - char *errstr = NULL; - int ret = SLAPI_PLUGIN_SUCCESS; - -@@ -384,28 +384,25 @@ acct_pre_op(Slapi_PBlock *pb, int modop) - - if (acct_policy_dn_is_config(sdn)) { - /* Validate config changes, but don't apply them. -- * This allows us to reject invalid config changes -- * here at the pre-op stage. Applying the config -- * needs to be done at the post-op stage. */ -+ * This allows us to reject invalid config changes -+ * here at the pre-op stage. Applying the config -+ * needs to be done at the post-op stage. */ - - if (LDAP_CHANGETYPE_ADD == modop) { - slapi_pblock_get(pb, SLAPI_ADD_ENTRY, &e); - -- /* If the entry doesn't exist, just bail and -- * let the server handle it. */ -+ /* If the entry doesn't exist, just bail and let the server handle it. */ - if (e == NULL) { - goto bail; - } - } else if (LDAP_CHANGETYPE_MODIFY == modop) { - /* Fetch the entry being modified so we can -- * create the resulting entry for validation. */ -+ * create the resulting entry for validation. */ - if (sdn) { -- slapi_search_internal_get_entry(sdn, 0, &e, get_identity()); -- free_entry = 1; -+ slapi_search_get_entry(&entry_pb, sdn, 0, &e, get_identity()); - } - -- /* If the entry doesn't exist, just bail and -- * let the server handle it. */ -+ /* If the entry doesn't exist, just bail and let the server handle it. */ - if (e == NULL) { - goto bail; - } -@@ -418,7 +415,7 @@ acct_pre_op(Slapi_PBlock *pb, int modop) - /* Apply the mods to create the resulting entry. */ - if (mods && (slapi_entry_apply_mods(e, mods) != LDAP_SUCCESS)) { - /* The mods don't apply cleanly, so we just let this op go -- * to let the main server handle it. */ -+ * to let the main server handle it. */ - goto bailmod; - } - } else if (modop == LDAP_CHANGETYPE_DELETE) { -@@ -439,8 +436,7 @@ bailmod: - } - - bail: -- if (free_entry && e) -- slapi_entry_free(e); -+ slapi_search_get_entry_done(&entry_pb); - - if (ret) { - slapi_log_err(SLAPI_LOG_PLUGIN, PRE_PLUGIN_NAME, -diff --git a/ldap/servers/plugins/acctpolicy/acct_util.c b/ldap/servers/plugins/acctpolicy/acct_util.c -index f25a3202d..f432092fe 100644 ---- a/ldap/servers/plugins/acctpolicy/acct_util.c -+++ b/ldap/servers/plugins/acctpolicy/acct_util.c -@@ -85,6 +85,7 @@ get_attr_string_val(Slapi_Entry *target_entry, char *attr_name) - int - get_acctpolicy(Slapi_PBlock *pb __attribute__((unused)), Slapi_Entry *target_entry, void *plugin_id, acctPolicy **policy) - { -+ Slapi_PBlock *entry_pb = NULL; - Slapi_DN *sdn = NULL; - Slapi_Entry *policy_entry = NULL; - Slapi_Attr *attr; -@@ -123,8 +124,7 @@ get_acctpolicy(Slapi_PBlock *pb __attribute__((unused)), Slapi_Entry *target_ent - } - - sdn = slapi_sdn_new_dn_byref(policy_dn); -- ldrc = slapi_search_internal_get_entry(sdn, NULL, &policy_entry, -- plugin_id); -+ ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &policy_entry, plugin_id); - slapi_sdn_free(&sdn); - - /* There should be a policy but it can't be retrieved; fatal error */ -@@ -160,7 +160,7 @@ dopolicy: - done: - config_unlock(); - slapi_ch_free_string(&policy_dn); -- slapi_entry_free(policy_entry); -+ slapi_search_get_entry_done(&entry_pb); - return (rc); - } - -diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c -index 7c875c852..39350ad53 100644 ---- a/ldap/servers/plugins/automember/automember.c -+++ b/ldap/servers/plugins/automember/automember.c -@@ -1629,13 +1629,12 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char - char *member_value = NULL; - int rc = 0; - Slapi_DN *group_sdn; -- Slapi_Entry *group_entry = NULL; - - /* First thing check that the group still exists */ - group_sdn = slapi_sdn_new_dn_byval(group_dn); -- rc = slapi_search_internal_get_entry(group_sdn, NULL, &group_entry, automember_get_plugin_id()); -+ rc = slapi_search_internal_get_entry(group_sdn, NULL, NULL, automember_get_plugin_id()); - slapi_sdn_free(&group_sdn); -- if (rc != LDAP_SUCCESS || group_entry == NULL) { -+ if (rc != LDAP_SUCCESS) { - if (rc == LDAP_NO_SUCH_OBJECT) { - /* the automember group (default or target) does not exist, just skip this definition */ - slapi_log_err(SLAPI_LOG_INFO, AUTOMEMBER_PLUGIN_SUBSYSTEM, -@@ -1647,10 +1646,8 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char - "automember_update_member_value - group (default or target) can not be retrieved (%s) err=%d\n", - group_dn, rc); - } -- slapi_entry_free(group_entry); - return rc; - } -- slapi_entry_free(group_entry); - - /* If grouping_value is dn, we need to fetch the dn instead. */ - if (slapi_attr_type_cmp(grouping_value, "dn", SLAPI_TYPE_CMP_EXACT) == 0) { -@@ -1752,11 +1749,11 @@ out: - static int - automember_pre_op(Slapi_PBlock *pb, int modop) - { -+ Slapi_PBlock *entry_pb = NULL; - Slapi_DN *sdn = 0; - Slapi_Entry *e = 0; - Slapi_Mods *smods = 0; - LDAPMod **mods; -- int free_entry = 0; - char *errstr = NULL; - int ret = SLAPI_PLUGIN_SUCCESS; - -@@ -1784,8 +1781,7 @@ automember_pre_op(Slapi_PBlock *pb, int modop) - /* Fetch the entry being modified so we can - * create the resulting entry for validation. */ - if (sdn) { -- slapi_search_internal_get_entry(sdn, 0, &e, automember_get_plugin_id()); -- free_entry = 1; -+ slapi_search_get_entry(&entry_pb, sdn, 0, &e, automember_get_plugin_id()); - } - - /* If the entry doesn't exist, just bail and -@@ -1799,7 +1795,7 @@ automember_pre_op(Slapi_PBlock *pb, int modop) - smods = slapi_mods_new(); - slapi_mods_init_byref(smods, mods); - -- /* Apply the mods to create the resulting entry. */ -+ /* Apply the mods to create the resulting entry. */ - if (mods && (slapi_entry_apply_mods(e, mods) != LDAP_SUCCESS)) { - /* The mods don't apply cleanly, so we just let this op go - * to let the main server handle it. */ -@@ -1831,8 +1827,7 @@ bailmod: - } - - bail: -- if (free_entry && e) -- slapi_entry_free(e); -+ slapi_search_get_entry_done(&entry_pb); - - if (ret) { - slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM, -diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c -index 1ee271359..16c625bb0 100644 ---- a/ldap/servers/plugins/dna/dna.c -+++ b/ldap/servers/plugins/dna/dna.c -@@ -1178,7 +1178,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply) - - value = slapi_entry_attr_get_charptr(e, DNA_SHARED_CFG_DN); - if (value) { -- Slapi_Entry *shared_e = NULL; - Slapi_DN *sdn = NULL; - char *normdn = NULL; - char *attrs[2]; -@@ -1197,10 +1196,8 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply) - /* We don't need attributes */ - attrs[0] = "cn"; - attrs[1] = NULL; -- slapi_search_internal_get_entry(sdn, attrs, &shared_e, getPluginID()); -- - /* Make sure that the shared config entry exists. */ -- if (!shared_e) { -+ if(slapi_search_internal_get_entry(sdn, attrs, NULL, getPluginID()) != LDAP_SUCCESS) { - /* We didn't locate the shared config container entry. Log - * a message and skip this config entry. */ - slapi_log_err(SLAPI_LOG_ERR, DNA_PLUGIN_SUBSYSTEM, -@@ -1210,9 +1207,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply) - ret = DNA_FAILURE; - slapi_sdn_free(&sdn); - goto bail; -- } else { -- slapi_entry_free(shared_e); -- shared_e = NULL; - } - - normdn = (char *)slapi_sdn_get_dn(sdn); -@@ -1539,6 +1533,7 @@ dna_delete_shared_servers(PRCList **servers) - static int - dna_load_host_port(void) - { -+ Slapi_PBlock *pb = NULL; - int status = DNA_SUCCESS; - Slapi_Entry *e = NULL; - Slapi_DN *config_dn = NULL; -@@ -1554,7 +1549,7 @@ dna_load_host_port(void) - - config_dn = slapi_sdn_new_ndn_byref("cn=config"); - if (config_dn) { -- slapi_search_internal_get_entry(config_dn, attrs, &e, getPluginID()); -+ slapi_search_get_entry(&pb, config_dn, attrs, &e, getPluginID()); - slapi_sdn_free(&config_dn); - } - -@@ -1562,8 +1557,8 @@ dna_load_host_port(void) - hostname = slapi_entry_attr_get_charptr(e, "nsslapd-localhost"); - portnum = slapi_entry_attr_get_charptr(e, "nsslapd-port"); - secureportnum = slapi_entry_attr_get_charptr(e, "nsslapd-secureport"); -- slapi_entry_free(e); - } -+ slapi_search_get_entry_done(&pb); - - if (!hostname || !portnum) { - status = DNA_FAILURE; -@@ -2876,6 +2871,7 @@ bail: - static int - dna_is_replica_bind_dn(char *range_dn, char *bind_dn) - { -+ Slapi_PBlock *entry_pb = NULL; - char *replica_dn = NULL; - Slapi_DN *replica_sdn = NULL; - Slapi_DN *range_sdn = NULL; -@@ -2912,8 +2908,7 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn) - attrs[2] = 0; - - /* Find cn=replica entry via search */ -- slapi_search_internal_get_entry(replica_sdn, attrs, &e, getPluginID()); -- -+ slapi_search_get_entry(&entry_pb, replica_sdn, attrs, &e, getPluginID()); - if (e) { - /* Check if the passed in bind dn matches any of the replica bind dns. */ - Slapi_Value *bind_dn_sv = slapi_value_new_string(bind_dn); -@@ -2927,6 +2922,7 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn) - attrs[0] = "member"; - attrs[1] = "uniquemember"; - attrs[2] = 0; -+ slapi_search_get_entry_done(&entry_pb); - for (i = 0; bind_group_dn != NULL && bind_group_dn[i] != NULL; i++) { - if (ret) { - /* already found a member, just free group */ -@@ -2934,14 +2930,14 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn) - continue; - } - bind_group_sdn = slapi_sdn_new_normdn_passin(bind_group_dn[i]); -- slapi_search_internal_get_entry(bind_group_sdn, attrs, &bind_group_entry, getPluginID()); -+ slapi_search_get_entry(&entry_pb, bind_group_sdn, attrs, &bind_group_entry, getPluginID()); - if (bind_group_entry) { - ret = slapi_entry_attr_has_syntax_value(bind_group_entry, "member", bind_dn_sv); - if (ret == 0) { - ret = slapi_entry_attr_has_syntax_value(bind_group_entry, "uniquemember", bind_dn_sv); - } - } -- slapi_entry_free(bind_group_entry); -+ slapi_search_get_entry_done(&entry_pb); - slapi_sdn_free(&bind_group_sdn); - } - slapi_ch_free((void **)&bind_group_dn); -@@ -2956,7 +2952,6 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn) - } - - done: -- slapi_entry_free(e); - slapi_sdn_free(&range_sdn); - slapi_sdn_free(&replica_sdn); - -diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c -index 40bd4b380..e9e1ec4c7 100644 ---- a/ldap/servers/plugins/memberof/memberof.c -+++ b/ldap/servers/plugins/memberof/memberof.c -@@ -884,7 +884,7 @@ memberof_postop_modrdn(Slapi_PBlock *pb) - pre_sdn = slapi_entry_get_sdn(pre_e); - post_sdn = slapi_entry_get_sdn(post_e); - } -- -+ - if (pre_sdn && post_sdn && slapi_sdn_compare(pre_sdn, post_sdn) == 0) { - /* Regarding memberof plugin, this rename is a no-op - * but it can be expensive to process it. So skip it -@@ -1466,6 +1466,7 @@ memberof_modop_one_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_op, Slapi - int - memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_op, Slapi_DN *group_sdn, Slapi_DN *op_this_sdn, Slapi_DN *replace_with_sdn, Slapi_DN *op_to_sdn, memberofstringll *stack) - { -+ Slapi_PBlock *entry_pb = NULL; - int rc = 0; - LDAPMod mod; - LDAPMod replace_mod; -@@ -1515,8 +1516,7 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o - } - - /* determine if this is a group op or single entry */ -- slapi_search_internal_get_entry(op_to_sdn, config->groupattrs, -- &e, memberof_get_plugin_id()); -+ slapi_search_get_entry(&entry_pb, op_to_sdn, config->groupattrs, &e, memberof_get_plugin_id()); - if (!e) { - /* In the case of a delete, we need to worry about the - * missing entry being a nested group. There's a small -@@ -1751,7 +1751,7 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o - bail: - slapi_value_free(&to_dn_val); - slapi_value_free(&this_dn_val); -- slapi_entry_free(e); -+ slapi_search_get_entry_done(&entry_pb); - return rc; - } - -@@ -2368,6 +2368,7 @@ bail: - int - memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Value *memberdn) - { -+ Slapi_PBlock *pb = NULL; - int rc = 0; - Slapi_DN *sdn = 0; - Slapi_Entry *group_e = 0; -@@ -2376,8 +2377,8 @@ memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Va - - sdn = slapi_sdn_new_normdn_byref(slapi_value_get_string(groupdn)); - -- slapi_search_internal_get_entry(sdn, config->groupattrs, -- &group_e, memberof_get_plugin_id()); -+ slapi_search_get_entry(&pb, sdn, config->groupattrs, -+ &group_e, memberof_get_plugin_id()); - - if (group_e) { - /* See if memberdn is referred to by any of the group attributes. */ -@@ -2388,9 +2389,8 @@ memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Va - break; - } - } -- -- slapi_entry_free(group_e); - } -+ slapi_search_get_entry_done(&pb); - - slapi_sdn_free(&sdn); - return rc; -diff --git a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c -index 46a76d884..cbec2ec40 100644 ---- a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c -+++ b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c -@@ -749,22 +749,22 @@ pam_passthru_get_config(Slapi_DN *bind_sdn) - if (pam_passthru_check_suffix(cfg, bind_sdn) == LDAP_SUCCESS) { - if (cfg->slapi_filter) { - /* A filter is configured, so see if the bind entry is a match. */ -+ Slapi_PBlock *entry_pb = NULL; - Slapi_Entry *test_e = NULL; - - /* Fetch the bind entry */ -- slapi_search_internal_get_entry(bind_sdn, NULL, &test_e, -- pam_passthruauth_get_plugin_identity()); -+ slapi_search_get_entry(&entry_pb, bind_sdn, NULL, &test_e, -+ pam_passthruauth_get_plugin_identity()); - - /* If the entry doesn't exist, just fall through to the main server code */ - if (test_e) { - /* Evaluate the filter. */ - if (LDAP_SUCCESS == slapi_filter_test_simple(test_e, cfg->slapi_filter)) { - /* This is a match. */ -- slapi_entry_free(test_e); -+ slapi_search_get_entry_done(&entry_pb); - goto done; - } -- -- slapi_entry_free(test_e); -+ slapi_search_get_entry_done(&entry_pb); - } - } else { - /* There is no filter to check, so this is a match. */ -diff --git a/ldap/servers/plugins/pam_passthru/pam_ptimpl.c b/ldap/servers/plugins/pam_passthru/pam_ptimpl.c -index 7f5fb02c4..5b43f8d1f 100644 ---- a/ldap/servers/plugins/pam_passthru/pam_ptimpl.c -+++ b/ldap/servers/plugins/pam_passthru/pam_ptimpl.c -@@ -81,11 +81,12 @@ derive_from_bind_dn(Slapi_PBlock *pb __attribute__((unused)), const Slapi_DN *bi - static char * - derive_from_bind_entry(Slapi_PBlock *pb, const Slapi_DN *bindsdn, MyStrBuf *pam_id, char *map_ident_attr, int *locked) - { -+ Slapi_PBlock *entry_pb = NULL; - Slapi_Entry *entry = NULL; - char *attrs[] = {NULL, NULL}; - attrs[0] = map_ident_attr; -- int rc = slapi_search_internal_get_entry((Slapi_DN *)bindsdn, attrs, &entry, -- pam_passthruauth_get_plugin_identity()); -+ int32_t rc = slapi_search_get_entry(&entry_pb, (Slapi_DN *)bindsdn, attrs, &entry, -+ pam_passthruauth_get_plugin_identity()); - - if (rc != LDAP_SUCCESS) { - slapi_log_err(SLAPI_LOG_ERR, PAM_PASSTHRU_PLUGIN_SUBSYSTEM, -@@ -108,7 +109,7 @@ derive_from_bind_entry(Slapi_PBlock *pb, const Slapi_DN *bindsdn, MyStrBuf *pam_ - init_my_str_buf(pam_id, val); - } - -- slapi_entry_free(entry); -+ slapi_search_get_entry_done(&entry_pb); - - return pam_id->str; - } -diff --git a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c -index 3d0067531..5bca823ff 100644 ---- a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c -+++ b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c -@@ -526,6 +526,7 @@ done: - static int - pam_passthru_preop(Slapi_PBlock *pb, int modtype) - { -+ Slapi_PBlock *entry_pb = NULL; - Slapi_DN *sdn = NULL; - Slapi_Entry *e = NULL; - LDAPMod **mods; -@@ -555,8 +556,8 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype) - case LDAP_CHANGETYPE_MODIFY: - /* Fetch the entry being modified so we can - * create the resulting entry for validation. */ -- slapi_search_internal_get_entry(sdn, 0, &e, -- pam_passthruauth_get_plugin_identity()); -+ slapi_search_get_entry(&entry_pb, sdn, 0, &e, -+ pam_passthruauth_get_plugin_identity()); - - /* If the entry doesn't exist, just bail and - * let the server handle it. */ -@@ -576,9 +577,6 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype) - /* Don't bail here, as we need to free the entry. */ - } - } -- -- /* Free the entry. */ -- slapi_entry_free(e); - break; - case LDAP_CHANGETYPE_DELETE: - case LDAP_CHANGETYPE_MODDN: -@@ -591,6 +589,7 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype) - } - - bail: -+ slapi_search_get_entry_done(&entry_pb); - /* If we are refusing the operation, return the result to the client. */ - if (ret) { - slapi_send_ldap_result(pb, ret, NULL, returntext, 0, NULL); -diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c -index 3b65d6b20..a25839f21 100644 ---- a/ldap/servers/plugins/replication/repl5_tot_protocol.c -+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c -@@ -469,7 +469,8 @@ retry: - */ - /* Get suffix */ - Slapi_Entry *suffix = NULL; -- rc = slapi_search_internal_get_entry(area_sdn, NULL, &suffix, repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION)); -+ Slapi_PBlock *suffix_pb = NULL; -+ rc = slapi_search_get_entry(&suffix_pb, area_sdn, NULL, &suffix, repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION)); - if (rc) { - slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "repl5_tot_run - Unable to " - "get the suffix entry \"%s\".\n", -@@ -517,7 +518,7 @@ retry: - LDAP_SCOPE_SUBTREE, "(parentid>=1)", NULL, 0, ctrls, NULL, - repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), OP_FLAG_BULK_IMPORT); - cb_data.num_entries = 0UL; -- slapi_entry_free(suffix); -+ slapi_search_get_entry_done(&suffix_pb); - } else { - /* Original total update */ - /* we need to provide managedsait control so that referral entries can -diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c -index d7ccf0e07..e69012204 100644 ---- a/ldap/servers/plugins/uiduniq/uid.c -+++ b/ldap/servers/plugins/uiduniq/uid.c -@@ -1254,6 +1254,7 @@ preop_modify(Slapi_PBlock *pb) - static int - preop_modrdn(Slapi_PBlock *pb) - { -+ Slapi_PBlock *entry_pb = NULL; - int result = LDAP_SUCCESS; - Slapi_Entry *e = NULL; - Slapi_Value *sv_requiredObjectClass = NULL; -@@ -1351,7 +1352,7 @@ preop_modrdn(Slapi_PBlock *pb) - - /* Get the entry that is being renamed so we can make a dummy copy - * of what it will look like after the rename. */ -- err = slapi_search_internal_get_entry(sdn, NULL, &e, plugin_identity); -+ err = slapi_search_get_entry(&entry_pb, sdn, NULL, &e, plugin_identity); - if (err != LDAP_SUCCESS) { - result = uid_op_error(35); - /* We want to return a no such object error if the target doesn't exist. */ -@@ -1371,24 +1372,24 @@ preop_modrdn(Slapi_PBlock *pb) - - - /* -- * Check if it has the required object class -- */ -+ * Check if it has the required object class -+ */ - if (requiredObjectClass && - !slapi_entry_attr_has_syntax_value(e, SLAPI_ATTR_OBJECTCLASS, sv_requiredObjectClass)) { - break; - } - - /* -- * Find any unique attribute data in the new RDN -- */ -+ * Find any unique attribute data in the new RDN -+ */ - for (i = 0; attrNames && attrNames[i]; i++) { - err = slapi_entry_attr_find(e, attrNames[i], &attr); - if (!err) { - /* -- * Passed all the requirements - this is an operation we -- * need to enforce uniqueness on. Now find all parent entries -- * with the marker object class, and do a search for each one. -- */ -+ * Passed all the requirements - this is an operation we -+ * need to enforce uniqueness on. Now find all parent entries -+ * with the marker object class, and do a search for each one. -+ */ - if (NULL != markerObjectClass) { - /* Subtree defined by location of marker object class */ - result = findSubtreeAndSearch(slapi_entry_get_sdn(e), attrNames, attr, NULL, -@@ -1407,8 +1408,8 @@ preop_modrdn(Slapi_PBlock *pb) - END - /* Clean-up */ - slapi_value_free(&sv_requiredObjectClass); -- if (e) -- slapi_entry_free(e); -+ -+ slapi_search_get_entry_done(&entry_pb); - - if (result) { - slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, -diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c -index 65f23363a..a70f40316 100644 ---- a/ldap/servers/slapd/daemon.c -+++ b/ldap/servers/slapd/daemon.c -@@ -1916,18 +1916,13 @@ slapd_bind_local_user(Connection *conn) - char *root_dn = config_get_ldapi_root_dn(); - - if (root_dn) { -+ Slapi_PBlock *entry_pb = NULL; - Slapi_DN *edn = slapi_sdn_new_dn_byref( - slapi_dn_normalize(root_dn)); - Slapi_Entry *e = 0; - - /* root might be locked too! :) */ -- ret = slapi_search_internal_get_entry( -- edn, 0, -- &e, -- (void *)plugin_get_default_component_id() -- -- ); -- -+ ret = slapi_search_get_entry(&entry_pb, edn, 0, &e, (void *)plugin_get_default_component_id()); - if (0 == ret && e) { - ret = slapi_check_account_lock( - 0, /* pb not req */ -@@ -1955,7 +1950,7 @@ slapd_bind_local_user(Connection *conn) - root_map_free: - /* root_dn consumed by bind creds set */ - slapi_sdn_free(&edn); -- slapi_entry_free(e); -+ slapi_search_get_entry_done(&entry_pb); - ret = 0; - } - } -diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c -index bbc0ab71a..259bedfff 100644 ---- a/ldap/servers/slapd/modify.c -+++ b/ldap/servers/slapd/modify.c -@@ -592,6 +592,7 @@ modify_internal_pb(Slapi_PBlock *pb) - static void - op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw) - { -+ Slapi_PBlock *entry_pb = NULL; - Slapi_Backend *be = NULL; - Slapi_Entry *pse; - Slapi_Entry *referral; -@@ -723,7 +724,7 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw) - * 2. If yes, then if the mods contain any passwdpolicy specific attributes. - * 3. If yes, then it invokes corrosponding checking function. - */ -- if (!repl_op && !internal_op && normdn && (e = get_entry(pb, normdn))) { -+ if (!repl_op && !internal_op && normdn && slapi_search_get_entry(&entry_pb, sdn, NULL, &e, NULL) == LDAP_SUCCESS) { - Slapi_Value target; - slapi_value_init(&target); - slapi_value_set_string(&target, "passwordpolicy"); -@@ -1072,7 +1073,7 @@ free_and_return : { - slapi_entry_free(epre); - slapi_entry_free(epost); - } -- slapi_entry_free(e); -+ slapi_search_get_entry_done(&entry_pb); - - if (be) - slapi_be_Unlock(be); -@@ -1202,12 +1203,13 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M - if (!internal_op) { - /* slapi_acl_check_mods needs an array of LDAPMods, but - * we're really only interested in the one password mod. */ -+ Slapi_PBlock *entry_pb = NULL; - LDAPMod *mods[2]; - mods[0] = mod; - mods[1] = NULL; - - /* We need to actually fetch the target here to use for ACI checking. */ -- slapi_search_internal_get_entry(&sdn, NULL, &e, (void *)plugin_get_default_component_id()); -+ slapi_search_get_entry(&entry_pb, &sdn, NULL, &e, NULL); - - /* Create a bogus entry with just the target dn if we were unable to - * find the actual entry. This will only be used for checking the ACIs. */ -@@ -1238,9 +1240,12 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M - } - send_ldap_result(pb, res, NULL, errtxt, 0, NULL); - slapi_ch_free_string(&errtxt); -+ slapi_search_get_entry_done(&entry_pb); - rc = -1; - goto done; - } -+ /* done with slapi entry e */ -+ slapi_search_get_entry_done(&entry_pb); - - /* - * If this mod is being performed by a password administrator/rootDN, -@@ -1353,7 +1358,6 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M - valuearray_free(&values); - - done: -- slapi_entry_free(e); - slapi_sdn_done(&sdn); - slapi_ch_free_string(&proxydn); - slapi_ch_free_string(&proxystr); -diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c -index 9da266b61..a140e7988 100644 ---- a/ldap/servers/slapd/plugin_internal_op.c -+++ b/ldap/servers/slapd/plugin_internal_op.c -@@ -882,3 +882,51 @@ slapi_search_internal_get_entry(Slapi_DN *dn, char **attrs, Slapi_Entry **ret_en - int_search_pb = NULL; - return rc; - } -+ -+int32_t -+slapi_search_get_entry(Slapi_PBlock **pb, Slapi_DN *dn, char **attrs, Slapi_Entry **ret_entry, void *component_identity) -+{ -+ Slapi_Entry **entries = NULL; -+ int32_t rc = 0; -+ void *component = component_identity; -+ -+ if (ret_entry) { -+ *ret_entry = NULL; -+ } -+ -+ if (component == NULL) { -+ component = (void *)plugin_get_default_component_id(); -+ } -+ -+ if (*pb == NULL) { -+ *pb = slapi_pblock_new(); -+ } -+ slapi_search_internal_set_pb(*pb, slapi_sdn_get_dn(dn), LDAP_SCOPE_BASE, -+ "(|(objectclass=*)(objectclass=ldapsubentry))", -+ attrs, 0, NULL, NULL, component, 0 ); -+ slapi_search_internal_pb(*pb); -+ slapi_pblock_get(*pb, SLAPI_PLUGIN_INTOP_RESULT, &rc); -+ if (LDAP_SUCCESS == rc) { -+ slapi_pblock_get(*pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries); -+ if (NULL != entries && NULL != entries[0]) { -+ /* Only need to dup the entry if the caller passed ret_entry in. */ -+ if (ret_entry) { -+ *ret_entry = entries[0]; -+ } -+ } else { -+ rc = LDAP_NO_SUCH_OBJECT; -+ } -+ } -+ -+ return rc; -+} -+ -+void -+slapi_search_get_entry_done(Slapi_PBlock **pb) -+{ -+ if (pb && *pb) { -+ slapi_free_search_results_internal(*pb); -+ slapi_pblock_destroy(*pb); -+ *pb = NULL; -+ } -+} -diff --git a/ldap/servers/slapd/resourcelimit.c b/ldap/servers/slapd/resourcelimit.c -index 705344c84..9c2619716 100644 ---- a/ldap/servers/slapd/resourcelimit.c -+++ b/ldap/servers/slapd/resourcelimit.c -@@ -305,22 +305,17 @@ reslimit_get_ext(Slapi_Connection *conn, const char *logname, SLAPIResLimitConnD - int - reslimit_update_from_dn(Slapi_Connection *conn, Slapi_DN *dn) - { -- Slapi_Entry *e; -+ Slapi_PBlock *pb = NULL; -+ Slapi_Entry *e = NULL; - int rc; - -- e = NULL; - if (dn != NULL) { -- - char **attrs = reslimit_get_registered_attributes(); -- (void)slapi_search_internal_get_entry(dn, attrs, &e, reslimit_componentid); -+ slapi_search_get_entry(&pb, dn, attrs, &e, reslimit_componentid); - charray_free(attrs); - } -- - rc = reslimit_update_from_entry(conn, e); -- -- if (NULL != e) { -- slapi_entry_free(e); -- } -+ slapi_search_get_entry_done(&pb); - - return (rc); - } -diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c -index d44b03b0e..bf7e59f75 100644 ---- a/ldap/servers/slapd/schema.c -+++ b/ldap/servers/slapd/schema.c -@@ -341,6 +341,7 @@ schema_policy_add_action(Slapi_Entry *entry, char *attrName, schema_item_t **lis - static void - schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica) - { -+ Slapi_PBlock *pb = NULL; - Slapi_DN sdn; - Slapi_Entry *entry = NULL; - schema_item_t *schema_item, *next; -@@ -369,8 +370,7 @@ schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica) - - /* Load the replication policy of the schema */ - slapi_sdn_init_dn_byref(&sdn, dn); -- if (slapi_search_internal_get_entry(&sdn, NULL, &entry, plugin_get_default_component_id()) == LDAP_SUCCESS) { -- -+ if (slapi_search_get_entry(&pb, &sdn, NULL, &entry, plugin_get_default_component_id()) == LDAP_SUCCESS) { - /* fill the policies (accept/reject) regarding objectclass */ - schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_OBJECTCLASS_ACCEPT, &replica->objectclasses); - schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_OBJECTCLASS_REJECT, &replica->objectclasses); -@@ -378,9 +378,8 @@ schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica) - /* fill the policies (accept/reject) regarding attribute */ - schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_ATTRIBUTE_ACCEPT, &replica->attributes); - schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_ATTRIBUTE_REJECT, &replica->attributes); -- -- slapi_entry_free(entry); - } -+ slapi_search_get_entry_done(&pb); - slapi_sdn_done(&sdn); - } - -diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h -index 0e3857068..be1e52e4d 100644 ---- a/ldap/servers/slapd/slapi-plugin.h -+++ b/ldap/servers/slapd/slapi-plugin.h -@@ -5972,7 +5972,7 @@ void slapi_seq_internal_set_pb(Slapi_PBlock *pb, char *ibase, int type, char *at - - /* - * slapi_search_internal_get_entry() finds an entry given a dn. It returns -- * an LDAP error code (LDAP_SUCCESS if all goes well). -+ * an LDAP error code (LDAP_SUCCESS if all goes well). Caller must free ret_entry - */ - int slapi_search_internal_get_entry(Slapi_DN *dn, char **attrlist, Slapi_Entry **ret_entry, void *caller_identity); - -@@ -8296,6 +8296,27 @@ uint64_t slapi_atomic_decr_64(uint64_t *ptr, int memorder); - /* helper function */ - const char * slapi_fetch_attr(Slapi_Entry *e, const char *attrname, char *default_val); - -+/** -+ * Get a Slapi_Entry via an internal search. The caller then needs to call -+ * slapi_get_entry_done() to free any resources allocated to get the entry -+ * -+ * \param pb - slapi_pblock pointer (the function will allocate if necessary) -+ * \param dn - Slapi_DN of the entry to retrieve -+ * \param attrs - char list of attributes to get -+ * \param ret_entry - pointer to a Slapi_entry wer the returned entry is stored -+ * \param component_identity - plugin component -+ * -+ * \return - ldap result code -+ */ -+int32_t slapi_search_get_entry(Slapi_PBlock **pb, Slapi_DN *dn, char **attrs, Slapi_Entry **ret_entry, void *component_identity); -+ -+/** -+ * Free the resources allocated by slapi_search_get_entry() -+ * -+ * \param pb - slapi_pblock pointer -+ */ -+void slapi_search_get_entry_done(Slapi_PBlock **pb); -+ - #ifdef __cplusplus - } - #endif --- -2.26.2 - diff --git a/SOURCES/0004-Ticket-50933-Update-2307compat.ldif.patch b/SOURCES/0004-Ticket-50933-Update-2307compat.ldif.patch new file mode 100644 index 0000000..5622a1a --- /dev/null +++ b/SOURCES/0004-Ticket-50933-Update-2307compat.ldif.patch @@ -0,0 +1,179 @@ +From 826a1bb4ea88915ac492828d1cc4a901623f7866 Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Thu, 14 May 2020 14:31:47 +1000 +Subject: [PATCH 1/2] Ticket 50933 - Update 2307compat.ldif + +Bug Description: This resolves a potential conflict between 60nis.ldif +in freeipa and others with 2307compat, by removing the conflicting +definitions from 2307bis that were included. + +Fix Description: By not including these in 2307compat, this means that +sites that rely on the values provided by 2307bis may ALSO need +60nis.ldif to be present. However, these nis values seem like they are +likely very rare in reality, and this also will avoid potential +issues with freeipa. It also is the least disruptive as we don't need +to change an already defined file, and we don't have values where the name +to oid relationship changes. + +Fixes: #50933 +https://pagure.io/389-ds-base/issue/50933 + +Author: William Brown + +Review by: tbordaz (Thanks!) +--- + ldap/schema/10rfc2307compat.ldif | 66 -------------------------------- + ldap/schema/60autofs.ldif | 39 ++++++++++++------- + 2 files changed, 26 insertions(+), 79 deletions(-) + +diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif +index 8810231ac..78c588d08 100644 +--- a/ldap/schema/10rfc2307compat.ldif ++++ b/ldap/schema/10rfc2307compat.ldif +@@ -176,50 +176,6 @@ attributeTypes: ( + SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 + SINGLE-VALUE + ) +-attributeTypes: ( +- 1.3.6.1.1.1.1.28 NAME 'nisPublicKey' +- DESC 'NIS public key' +- EQUALITY octetStringMatch +- SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 +- SINGLE-VALUE +- ) +-attributeTypes: ( +- 1.3.6.1.1.1.1.29 NAME 'nisSecretKey' +- DESC 'NIS secret key' +- EQUALITY octetStringMatch +- SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 +- SINGLE-VALUE +- ) +-attributeTypes: ( +- 1.3.6.1.1.1.1.30 NAME 'nisDomain' +- DESC 'NIS domain' +- EQUALITY caseIgnoreIA5Match +- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 +- ) +-attributeTypes: ( +- 1.3.6.1.1.1.1.31 NAME 'automountMapName' +- DESC 'automount Map Name' +- EQUALITY caseExactIA5Match +- SUBSTR caseExactIA5SubstringsMatch +- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 +- SINGLE-VALUE +- ) +-attributeTypes: ( +- 1.3.6.1.1.1.1.32 NAME 'automountKey' +- DESC 'Automount Key value' +- EQUALITY caseExactIA5Match +- SUBSTR caseExactIA5SubstringsMatch +- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 +- SINGLE-VALUE +- ) +-attributeTypes: ( +- 1.3.6.1.1.1.1.33 NAME 'automountInformation' +- DESC 'Automount information' +- EQUALITY caseExactIA5Match +- SUBSTR caseExactIA5SubstringsMatch +- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 +- SINGLE-VALUE +- ) + # end of attribute types - beginning of objectclasses + objectClasses: ( + 1.3.6.1.1.1.2.0 NAME 'posixAccount' SUP top AUXILIARY +@@ -324,28 +280,6 @@ objectClasses: ( + seeAlso $ serialNumber' + MAY ( bootFile $ bootParameter $ cn $ description $ l $ o $ ou $ owner $ seeAlso $ serialNumber ) + ) +-objectClasses: ( +- 1.3.6.1.1.1.2.14 NAME 'nisKeyObject' SUP top AUXILIARY +- DESC 'An object with a public and secret key' +- MUST ( cn $ nisPublicKey $ nisSecretKey ) +- MAY ( uidNumber $ description ) +- ) +-objectClasses: ( +- 1.3.6.1.1.1.2.15 NAME 'nisDomainObject' SUP top AUXILIARY +- DESC 'Associates a NIS domain with a naming context' +- MUST nisDomain +- ) +-objectClasses: ( +- 1.3.6.1.1.1.2.16 NAME 'automountMap' SUP top STRUCTURAL +- MUST ( automountMapName ) +- MAY description +- ) +-objectClasses: ( +- 1.3.6.1.1.1.2.17 NAME 'automount' SUP top STRUCTURAL +- DESC 'Automount information' +- MUST ( automountKey $ automountInformation ) +- MAY description +- ) + ## namedObject is needed for groups without members + objectClasses: ( + 1.3.6.1.4.1.5322.13.1.1 NAME 'namedObject' SUP top STRUCTURAL +diff --git a/ldap/schema/60autofs.ldif b/ldap/schema/60autofs.ldif +index 084e9ec30..de3922aa2 100644 +--- a/ldap/schema/60autofs.ldif ++++ b/ldap/schema/60autofs.ldif +@@ -6,7 +6,23 @@ dn: cn=schema + ################################################################################ + # + attributeTypes: ( +- 1.3.6.1.1.1.1.33 ++ 1.3.6.1.1.1.1.31 NAME 'automountMapName' ++ DESC 'automount Map Name' ++ EQUALITY caseExactIA5Match ++ SUBSTR caseExactIA5SubstringsMatch ++ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ++ SINGLE-VALUE ++ ) ++attributeTypes: ( ++ 1.3.6.1.1.1.1.32 NAME 'automountKey' ++ DESC 'Automount Key value' ++ EQUALITY caseExactIA5Match ++ SUBSTR caseExactIA5SubstringsMatch ++ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ++ SINGLE-VALUE ++ ) ++attributeTypes: ( ++ 1.3.6.1.1.1.1.33 + NAME 'automountInformation' + DESC 'Information used by the autofs automounter' + EQUALITY caseExactIA5Match +@@ -18,25 +34,22 @@ attributeTypes: ( + ################################################################################ + # + objectClasses: ( +- 1.3.6.1.1.1.2.17 +- NAME 'automount' +- DESC 'An entry in an automounter map' ++ 1.3.6.1.1.1.2.16 ++ NAME 'automountMap' ++ DESC 'An group of related automount objects' + SUP top + STRUCTURAL +- MUST ( cn $ automountInformation ) +- MAY ( description ) ++ MAY ( ou $ automountMapName $ description ) + X-ORIGIN 'draft-howard-rfc2307bis' + ) +-# +-################################################################################ +-# + objectClasses: ( +- 1.3.6.1.1.1.2.16 +- NAME 'automountMap' +- DESC 'An group of related automount objects' ++ 1.3.6.1.1.1.2.17 ++ NAME 'automount' ++ DESC 'An entry in an automounter map' + SUP top + STRUCTURAL +- MUST ( ou ) ++ MUST ( automountInformation ) ++ MAY ( cn $ description $ automountKey ) + X-ORIGIN 'draft-howard-rfc2307bis' + ) + # +-- +2.26.2 + diff --git a/SOURCES/0005-Issue-50933-Fix-OID-change-between-10rfc2307-and-10r.patch b/SOURCES/0005-Issue-50933-Fix-OID-change-between-10rfc2307-and-10r.patch new file mode 100644 index 0000000..82fdf9d --- /dev/null +++ b/SOURCES/0005-Issue-50933-Fix-OID-change-between-10rfc2307-and-10r.patch @@ -0,0 +1,36 @@ +From 3d9ced9e340678cc02b1a36c2139492c95ef15a6 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 12 Aug 2020 12:46:42 -0400 +Subject: [PATCH 2/2] Issue 50933 - Fix OID change between 10rfc2307 and + 10rfc2307compat + +Bug Description: 10rfc2307compat changed the OID for nisMap objectclass to + match the standard OID, but this breaks replication with + older versions of DS. + +Fix Description: Continue to use the old(invalid?) oid for nisMap so that + replication does not break in a mixed version environment. + +Fixes: https://pagure.io/389-ds-base/issue/50933 + +Reviewed by: firstyear & tbordaz(Thanks!!) +--- + ldap/schema/10rfc2307compat.ldif | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif +index 78c588d08..8ba72e1e3 100644 +--- a/ldap/schema/10rfc2307compat.ldif ++++ b/ldap/schema/10rfc2307compat.ldif +@@ -253,7 +253,7 @@ objectClasses: ( + MAY ( nisNetgroupTriple $ memberNisNetgroup $ description ) + ) + objectClasses: ( +- 1.3.6.1.1.1.2.9 NAME 'nisMap' SUP top STRUCTURAL ++ 1.3.6.1.1.1.2.13 NAME 'nisMap' SUP top STRUCTURAL + DESC 'A generic abstraction of a NIS map' + MUST nisMapName + MAY description +-- +2.26.2 + diff --git a/SOURCES/0005-Issue-51086-Improve-dscreate-instance-name-validatio.patch b/SOURCES/0005-Issue-51086-Improve-dscreate-instance-name-validatio.patch deleted file mode 100644 index f3d3571..0000000 --- a/SOURCES/0005-Issue-51086-Improve-dscreate-instance-name-validatio.patch +++ /dev/null @@ -1,96 +0,0 @@ -From 9710c327b3034d7a9d112306961c9cec98083df5 Mon Sep 17 00:00:00 2001 -From: Simon Pichugin -Date: Mon, 18 May 2020 22:33:45 +0200 -Subject: [PATCH 05/12] Issue 51086 - Improve dscreate instance name validation - -Bug Description: When creating an instance using dscreate, it doesn't enforce -max name length. The ldapi socket name contains name of the instance. If it's -too long, we can hit limits, and the file name will be truncated. Also, it -doesn't sanitize the instance name, it's possible to create an instance with -non-ascii symbols in its name. - -Fix Description: Add more checks to 'dscreate from-file' installation. -Add a limitation for nsslapd-ldapifilepath string lenght because it is -limited by sizeof((*ports_info.i_listenaddr)->local.path)) it is copied to. - -https://pagure.io/389-ds-base/issue/51086 - -Reviewed by: firstyear, mreynolds (Thanks!) ---- - ldap/servers/slapd/libglobs.c | 12 ++++++++++++ - src/cockpit/389-console/src/ds.jsx | 8 ++++++-- - src/lib389/lib389/instance/setup.py | 9 +++++++++ - 3 files changed, 27 insertions(+), 2 deletions(-) - -diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c -index 0d3d9a924..fbf90d92d 100644 ---- a/ldap/servers/slapd/libglobs.c -+++ b/ldap/servers/slapd/libglobs.c -@@ -2390,11 +2390,23 @@ config_set_ldapi_filename(const char *attrname, char *value, char *errorbuf, int - { - int retVal = LDAP_SUCCESS; - slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); -+ /* -+ * LDAPI file path length is limited by sizeof((*ports_info.i_listenaddr)->local.path)) -+ * which is set in main.c inside of "#if defined(ENABLE_LDAPI)" block -+ * ports_info.i_listenaddr is sizeof(PRNetAddr) and our required sizes is 8 bytes less -+ */ -+ size_t result_size = sizeof(PRNetAddr) - 8; - - if (config_value_is_null(attrname, value, errorbuf, 0)) { - return LDAP_OPERATIONS_ERROR; - } - -+ if (strlen(value) >= result_size) { -+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "%s: \"%s\" is invalid, its length must be less than %d", -+ attrname, value, result_size); -+ return LDAP_OPERATIONS_ERROR; -+ } -+ - if (apply) { - CFG_LOCK_WRITE(slapdFrontendConfig); - -diff --git a/src/cockpit/389-console/src/ds.jsx b/src/cockpit/389-console/src/ds.jsx -index 90d9e5abd..53aa5cb79 100644 ---- a/src/cockpit/389-console/src/ds.jsx -+++ b/src/cockpit/389-console/src/ds.jsx -@@ -793,10 +793,14 @@ class CreateInstanceModal extends React.Component { - return; - } - newServerId = newServerId.replace(/^slapd-/i, ""); // strip "slapd-" -- if (newServerId.length > 128) { -+ if (newServerId === "admin") { -+ addNotification("warning", "Instance Name 'admin' is reserved, please choose a different name"); -+ return; -+ } -+ if (newServerId.length > 80) { - addNotification( - "warning", -- "Instance name is too long, it must not exceed 128 characters" -+ "Instance name is too long, it must not exceed 80 characters" - ); - return; - } -diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py -index 803992275..f5fc5495d 100644 ---- a/src/lib389/lib389/instance/setup.py -+++ b/src/lib389/lib389/instance/setup.py -@@ -567,6 +567,15 @@ class SetupDs(object): - - # We need to know the prefix before we can do the instance checks - assert_c(slapd['instance_name'] is not None, "Configuration instance_name in section [slapd] not found") -+ assert_c(len(slapd['instance_name']) <= 80, "Server identifier should not be longer than 80 symbols") -+ assert_c(all(ord(c) < 128 for c in slapd['instance_name']), "Server identifier can not contain non ascii characters") -+ assert_c(' ' not in slapd['instance_name'], "Server identifier can not contain a space") -+ assert_c(slapd['instance_name'] != 'admin', "Server identifier \"admin\" is reserved, please choose a different identifier") -+ -+ # Check that valid characters are used -+ safe = re.compile(r'^[#%:\w@_-]+$').search -+ assert_c(bool(safe(slapd['instance_name'])), "Server identifier has invalid characters, please choose a different value") -+ - # Check if the instance exists or not. - # Should I move this import? I think this prevents some recursion - from lib389 import DirSrv --- -2.26.2 - diff --git a/SOURCES/0006-Issue-51102-RFE-ds-replcheck-make-online-timeout-con.patch b/SOURCES/0006-Issue-51102-RFE-ds-replcheck-make-online-timeout-con.patch deleted file mode 100644 index 5bb0635..0000000 --- a/SOURCES/0006-Issue-51102-RFE-ds-replcheck-make-online-timeout-con.patch +++ /dev/null @@ -1,254 +0,0 @@ -From c0cb15445c1434b3d317b1c06ab1a0ba8dbc6f04 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Tue, 19 May 2020 15:11:53 -0400 -Subject: [PATCH 06/12] Issue 51102 - RFE - ds-replcheck - make online timeout - configurable - -Bug Description: When doing an online check with replicas that are very - far apart the connection can time out as the hardcoded - timeout is 5 seconds. - -Fix Description: Change the default timeout to never timeout, and add an - CLI option to specify a specific timeout. - - Also caught all the possible LDAP exceptions so we can - cleanly "fail". Fixed some python syntax issues, and - improved the entry inconsistency report - -relates: https://pagure.io/389-ds-base/issue/51102 - -Reviewed by: firstyear & spichugi(Thanks!) ---- - ldap/admin/src/scripts/ds-replcheck | 90 ++++++++++++++++++----------- - 1 file changed, 57 insertions(+), 33 deletions(-) - -diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck -index 30bcfd65d..5bb7dfce3 100755 ---- a/ldap/admin/src/scripts/ds-replcheck -+++ b/ldap/admin/src/scripts/ds-replcheck -@@ -1,7 +1,7 @@ - #!/usr/bin/python3 - - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2018 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -21,10 +21,9 @@ import getpass - import signal - from ldif import LDIFRecordList - from ldap.ldapobject import SimpleLDAPObject --from ldap.cidict import cidict - from ldap.controls import SimplePagedResultsControl - from lib389._entry import Entry --from lib389.utils import ensure_str, ensure_list_str, ensure_int -+from lib389.utils import ensure_list_str, ensure_int - - VERSION = "2.0" - RUV_FILTER = '(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))' -@@ -185,11 +184,11 @@ def report_conflict(entry, attr, opts): - report = True - - if 'nscpentrywsi' in entry.data: -- found = False - for val in entry.data['nscpentrywsi']: - if val.lower().startswith(attr + ';'): - if (opts['starttime'] - extract_time(val)) <= opts['lag']: - report = False -+ break - - return report - -@@ -321,6 +320,9 @@ def ldif_search(LDIF, dn): - count = 0 - ignore_list = ['conflictcsn', 'modifytimestamp', 'modifiersname'] - val = "" -+ attr = "" -+ state_attr = "" -+ part_dn = "" - result['entry'] = None - result['conflict'] = None - result['tombstone'] = False -@@ -570,6 +572,7 @@ def cmp_entry(mentry, rentry, opts): - if val.lower().startswith(mattr + ';'): - if not found: - diff['diff'].append(" Master:") -+ diff['diff'].append(" - Value: %s" % (val.split(':')[1].lstrip())) - diff['diff'].append(" - State Info: %s" % (val)) - diff['diff'].append(" - Date: %s\n" % (time.ctime(extract_time(val)))) - found = True -@@ -588,6 +591,7 @@ def cmp_entry(mentry, rentry, opts): - if val.lower().startswith(mattr + ';'): - if not found: - diff['diff'].append(" Replica:") -+ diff['diff'].append(" - Value: %s" % (val.split(':')[1].lstrip())) - diff['diff'].append(" - State Info: %s" % (val)) - diff['diff'].append(" - Date: %s\n" % (time.ctime(extract_time(val)))) - found = True -@@ -654,7 +658,6 @@ def do_offline_report(opts, output_file=None): - rconflicts = [] - rtombstones = 0 - mtombstones = 0 -- idx = 0 - - # Open LDIF files - try: -@@ -926,7 +929,7 @@ def validate_suffix(ldapnode, suffix, hostname): - :return - True if suffix exists, otherwise False - """ - try: -- master_basesuffix = ldapnode.search_s(suffix, ldap.SCOPE_BASE ) -+ ldapnode.search_s(suffix, ldap.SCOPE_BASE) - except ldap.NO_SUCH_OBJECT: - print("Error: Failed to validate suffix in {}. {} does not exist.".format(hostname, suffix)) - return False -@@ -968,12 +971,12 @@ def connect_to_replicas(opts): - replica = SimpleLDAPObject(ruri) - - # Set timeouts -- master.set_option(ldap.OPT_NETWORK_TIMEOUT,5.0) -- master.set_option(ldap.OPT_TIMEOUT,5.0) -- replica.set_option(ldap.OPT_NETWORK_TIMEOUT,5.0) -- replica.set_option(ldap.OPT_TIMEOUT,5.0) -+ master.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout']) -+ master.set_option(ldap.OPT_TIMEOUT, opts['timeout']) -+ replica.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout']) -+ replica.set_option(ldap.OPT_TIMEOUT, opts['timeout']) - -- # Setup Secure Conenction -+ # Setup Secure Connection - if opts['certdir'] is not None: - # Setup Master - if opts['mprotocol'] != LDAPI: -@@ -1003,7 +1006,7 @@ def connect_to_replicas(opts): - try: - master.simple_bind_s(opts['binddn'], opts['bindpw']) - except ldap.SERVER_DOWN as e: -- print("Cannot connect to %r" % muri) -+ print(f"Cannot connect to {muri} ({str(e)})") - sys.exit(1) - except ldap.LDAPError as e: - print("Error: Failed to authenticate to Master: ({}). " -@@ -1014,7 +1017,7 @@ def connect_to_replicas(opts): - try: - replica.simple_bind_s(opts['binddn'], opts['bindpw']) - except ldap.SERVER_DOWN as e: -- print("Cannot connect to %r" % ruri) -+ print(f"Cannot connect to {ruri} ({str(e)})") - sys.exit(1) - except ldap.LDAPError as e: - print("Error: Failed to authenticate to Replica: ({}). " -@@ -1218,7 +1221,6 @@ def do_online_report(opts, output_file=None): - """ - m_done = False - r_done = False -- done = False - report = {} - report['diff'] = [] - report['m_missing'] = [] -@@ -1257,15 +1259,22 @@ def do_online_report(opts, output_file=None): - - # Read the results and start comparing - while not m_done or not r_done: -- if not m_done: -- m_rtype, m_rdata, m_rmsgid, m_rctrls = master.result3(master_msgid) -- elif not r_done: -- m_rdata = [] -- -- if not r_done: -- r_rtype, r_rdata, r_rmsgid, r_rctrls = replica.result3(replica_msgid) -- elif not m_done: -- r_rdata = [] -+ try: -+ if not m_done: -+ m_rtype, m_rdata, m_rmsgid, m_rctrls = master.result3(master_msgid) -+ elif not r_done: -+ m_rdata = [] -+ except ldap.LDAPError as e: -+ print("Error: Problem getting the results from the master: %s", str(e)) -+ sys.exit(1) -+ try: -+ if not r_done: -+ r_rtype, r_rdata, r_rmsgid, r_rctrls = replica.result3(replica_msgid) -+ elif not m_done: -+ r_rdata = [] -+ except ldap.LDAPError as e: -+ print("Error: Problem getting the results from the replica: %s", str(e)) -+ sys.exit(1) - - # Convert entries - mresult = convert_entries(m_rdata) -@@ -1291,11 +1300,15 @@ def do_online_report(opts, output_file=None): - ] - if m_pctrls: - if m_pctrls[0].cookie: -- # Copy cookie from response control to request control -- req_pr_ctrl.cookie = m_pctrls[0].cookie -- master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE, -- "(|(objectclass=*)(objectclass=ldapsubentry))", -- ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls) -+ try: -+ # Copy cookie from response control to request control -+ req_pr_ctrl.cookie = m_pctrls[0].cookie -+ master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE, -+ "(|(objectclass=*)(objectclass=ldapsubentry))", -+ ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls) -+ except ldap.LDAPError as e: -+ print("Error: Problem searching the master: %s", str(e)) -+ sys.exit(1) - else: - m_done = True # No more pages available - else: -@@ -1311,11 +1324,15 @@ def do_online_report(opts, output_file=None): - - if r_pctrls: - if r_pctrls[0].cookie: -- # Copy cookie from response control to request control -- req_pr_ctrl.cookie = r_pctrls[0].cookie -- replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE, -- "(|(objectclass=*)(objectclass=ldapsubentry))", -- ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls) -+ try: -+ # Copy cookie from response control to request control -+ req_pr_ctrl.cookie = r_pctrls[0].cookie -+ replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE, -+ "(|(objectclass=*)(objectclass=ldapsubentry))", -+ ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls) -+ except ldap.LDAPError as e: -+ print("Error: Problem searching the replica: %s", str(e)) -+ sys.exit(1) - else: - r_done = True # No more pages available - else: -@@ -1426,6 +1443,9 @@ def init_online_params(args): - # prompt for password - opts['bindpw'] = getpass.getpass('Enter password: ') - -+ # lastly handle the timeout -+ opts['timeout'] = int(args.timeout) -+ - return opts - - -@@ -1553,6 +1573,8 @@ def main(): - state_parser.add_argument('-y', '--pass-file', help='A text file containing the clear text password for the bind dn', dest='pass_file', default=None) - state_parser.add_argument('-Z', '--cert-dir', help='The certificate database directory for secure connections', - dest='certdir', default=None) -+ state_parser.add_argument('-t', '--timeout', help='The timeout for the LDAP connections. Default is no timeout.', -+ type=int, dest='timeout', default=-1) - - # Online mode - online_parser = subparsers.add_parser('online', help="Compare two online replicas for differences") -@@ -1577,6 +1599,8 @@ def main(): - online_parser.add_argument('-p', '--page-size', help='The paged-search result grouping size (default 500 entries)', - dest='pagesize', default=500) - online_parser.add_argument('-o', '--out-file', help='The output file', dest='file', default=None) -+ online_parser.add_argument('-t', '--timeout', help='The timeout for the LDAP connections. Default is no timeout.', -+ type=int, dest='timeout', default=-1) - - # Offline LDIF mode - offline_parser = subparsers.add_parser('offline', help="Compare two replication LDIF files for differences (LDIF file generated by 'db2ldif -r')") --- -2.26.2 - diff --git a/SOURCES/0006-Ticket-51131-improve-mutex-alloc-in-conntable.patch b/SOURCES/0006-Ticket-51131-improve-mutex-alloc-in-conntable.patch new file mode 100644 index 0000000..4269446 --- /dev/null +++ b/SOURCES/0006-Ticket-51131-improve-mutex-alloc-in-conntable.patch @@ -0,0 +1,147 @@ +From 1085823bf5586d55103cfba249fdf212e9afcb7c Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Thu, 4 Jun 2020 11:51:53 +1000 +Subject: [PATCH] Ticket 51131 - improve mutex alloc in conntable + +Bug Description: We previously did delayed allocation +of mutexs, which @tbordaz noted can lead to high usage +of the pthread mutex init routines. This was done under +the conntable lock, as well as cleaning the connection + +Fix Description: rather than delayed allocation, we +initialise everything at start up instead, which means +that while startup may have a delay, at run time we have +a smaller and lighter connection allocation routine, +that is able to release the CT lock sooner. + +https://pagure.io/389-ds-base/issue/51131 + +Author: William Brown + +Review by: ??? +--- + ldap/servers/slapd/conntable.c | 86 +++++++++++++++++++--------------- + 1 file changed, 47 insertions(+), 39 deletions(-) + +diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c +index b23dc3435..feb9c0d75 100644 +--- a/ldap/servers/slapd/conntable.c ++++ b/ldap/servers/slapd/conntable.c +@@ -138,10 +138,21 @@ connection_table_new(int table_size) + ct->conn_next_offset = 1; + ct->conn_free_offset = 1; + ++ pthread_mutexattr_t monitor_attr = {0}; ++ pthread_mutexattr_init(&monitor_attr); ++ pthread_mutexattr_settype(&monitor_attr, PTHREAD_MUTEX_RECURSIVE); ++ + /* We rely on the fact that we called calloc, which zeros the block, so we don't + * init any structure element unless a zero value is troublesome later + */ + for (i = 0; i < table_size; i++) { ++ /* ++ * Technically this is a no-op due to calloc, but we should always be ++ * careful with things like this .... ++ */ ++ ct->c[i].c_state = CONN_STATE_FREE; ++ /* Start the conn setup. */ ++ + LBER_SOCKET invalid_socket; + /* DBDB---move this out of here once everything works */ + ct->c[i].c_sb = ber_sockbuf_alloc(); +@@ -161,11 +172,20 @@ connection_table_new(int table_size) + ct->c[i].c_prev = NULL; + ct->c[i].c_ci = i; + ct->c[i].c_fdi = SLAPD_INVALID_SOCKET_INDEX; +- /* +- * Technically this is a no-op due to calloc, but we should always be +- * careful with things like this .... +- */ +- ct->c[i].c_state = CONN_STATE_FREE; ++ ++ if (pthread_mutex_init(&(ct->c[i].c_mutex), &monitor_attr) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "pthread_mutex_init failed\n"); ++ exit(1); ++ } ++ ++ ct->c[i].c_pdumutex = PR_NewLock(); ++ if (ct->c[i].c_pdumutex == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "PR_NewLock failed\n"); ++ exit(1); ++ } ++ ++ /* Ready to rock, mark as such. */ ++ ct->c[i].c_state = CONN_STATE_INIT; + /* Prepare the connection into the freelist. */ + ct->c_freelist[i] = &(ct->c[i]); + } +@@ -241,44 +261,32 @@ connection_table_get_connection(Connection_Table *ct, int sd) + /* Never use slot 0 */ + ct->conn_next_offset += 1; + } +- /* Now prep the slot for usage. */ +- PR_ASSERT(c->c_next == NULL); +- PR_ASSERT(c->c_prev == NULL); +- PR_ASSERT(c->c_extension == NULL); +- +- if (c->c_state == CONN_STATE_FREE) { +- +- c->c_state = CONN_STATE_INIT; +- +- pthread_mutexattr_t monitor_attr = {0}; +- pthread_mutexattr_init(&monitor_attr); +- pthread_mutexattr_settype(&monitor_attr, PTHREAD_MUTEX_RECURSIVE); +- if (pthread_mutex_init(&(c->c_mutex), &monitor_attr) != 0) { +- slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "pthread_mutex_init failed\n"); +- exit(1); +- } +- +- c->c_pdumutex = PR_NewLock(); +- if (c->c_pdumutex == NULL) { +- c->c_pdumutex = NULL; +- slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "PR_NewLock failed\n"); +- exit(1); +- } +- } +- /* Let's make sure there's no cruft left on there from the last time this connection was used. */ +- /* Note: no need to lock c->c_mutex because this function is only +- * called by one thread (the slapd_daemon thread), and if we got this +- * far then `c' is not being used by any operation threads, etc. +- */ +- connection_cleanup(c); +- c->c_ct = ct; /* pointer to connection table that owns this connection */ ++ PR_Unlock(ct->table_mutex); + } else { +- /* couldn't find a Connection */ ++ /* couldn't find a Connection, table must be full */ + slapi_log_err(SLAPI_LOG_CONNS, "connection_table_get_connection", "Max open connections reached\n"); ++ PR_Unlock(ct->table_mutex); ++ return NULL; + } + +- /* We could move this to before the c alloc as there is no point to remain here. */ +- PR_Unlock(ct->table_mutex); ++ /* Now prep the slot for usage. */ ++ PR_ASSERT(c != NULL); ++ PR_ASSERT(c->c_next == NULL); ++ PR_ASSERT(c->c_prev == NULL); ++ PR_ASSERT(c->c_extension == NULL); ++ PR_ASSERT(c->c_state == CONN_STATE_INIT); ++ /* Let's make sure there's no cruft left on there from the last time this connection was used. */ ++ ++ /* ++ * Note: no need to lock c->c_mutex because this function is only ++ * called by one thread (the slapd_daemon thread), and if we got this ++ * far then `c' is not being used by any operation threads, etc. The ++ * memory ordering will be provided by the work queue sending c to a ++ * thread. ++ */ ++ connection_cleanup(c); ++ /* pointer to connection table that owns this connection */ ++ c->c_ct = ct; + + return c; + } +-- +2.26.2 + diff --git a/SOURCES/0007-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch b/SOURCES/0007-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch new file mode 100644 index 0000000..41f9315 --- /dev/null +++ b/SOURCES/0007-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch @@ -0,0 +1,66 @@ +From a9f53e9958861e6a7a827bd852d72d51a6512396 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Wed, 25 Nov 2020 18:07:34 +0100 +Subject: [PATCH] Issue 4297 - 2nd fix for on ADD replication URP issue + internal searches with filter containing unescaped chars (#4439) + +Bug description: + Previous fix is buggy because slapi_filter_escape_filter_value returns + a escaped filter component not an escaped assertion value. + +Fix description: + use the escaped filter component + +relates: https://github.com/389ds/389-ds-base/issues/4297 + +Reviewed by: William Brown + +Platforms tested: F31 +--- + ldap/servers/plugins/replication/urp.c | 16 ++++++++-------- + 1 file changed, 8 insertions(+), 8 deletions(-) + +diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c +index f41dbc72d..ed340c9d8 100644 +--- a/ldap/servers/plugins/replication/urp.c ++++ b/ldap/servers/plugins/replication/urp.c +@@ -1411,12 +1411,12 @@ urp_add_check_tombstone (Slapi_PBlock *pb, char *sessionid, Slapi_Entry *entry, + Slapi_Entry **entries = NULL; + Slapi_PBlock *newpb; + char *basedn = slapi_entry_get_ndn(entry); +- char *escaped_basedn; ++ char *escaped_filter; + const Slapi_DN *suffix = slapi_get_suffix_by_dn(slapi_entry_get_sdn (entry)); +- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn); ++ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", basedn); + +- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn); +- slapi_ch_free((void **)&escaped_basedn); ++ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter); ++ slapi_ch_free((void **)&escaped_filter); + newpb = slapi_pblock_new(); + slapi_search_internal_set_pb(newpb, + slapi_sdn_get_dn(suffix), /* Base DN */ +@@ -1605,15 +1605,15 @@ urp_find_tombstone_for_glue (Slapi_PBlock *pb, char *sessionid, const Slapi_Entr + Slapi_Entry **entries = NULL; + Slapi_PBlock *newpb; + const char *basedn = slapi_sdn_get_dn(parentdn); +- char *escaped_basedn; +- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn); ++ char *escaped_filter; ++ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn); + + char *conflict_csnstr = (char*)slapi_entry_attr_get_ref((Slapi_Entry *)entry, "conflictcsn"); + CSN *conflict_csn = csn_new_by_string(conflict_csnstr); + CSN *tombstone_csn = NULL; + +- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn); +- slapi_ch_free((void **)&escaped_basedn); ++ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter); ++ slapi_ch_free((void **)&escaped_filter); + newpb = slapi_pblock_new(); + char *parent_dn = slapi_dn_parent (basedn); + slapi_search_internal_set_pb(newpb, +-- +2.26.2 + diff --git a/SOURCES/0007-Issue-51110-Fix-ASAN-ODR-warnings.patch b/SOURCES/0007-Issue-51110-Fix-ASAN-ODR-warnings.patch deleted file mode 100644 index df8423c..0000000 --- a/SOURCES/0007-Issue-51110-Fix-ASAN-ODR-warnings.patch +++ /dev/null @@ -1,428 +0,0 @@ -From a1cd3cf8e8b6b33ab21d5338921187a76dd9dcd0 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Fri, 22 May 2020 15:41:45 -0400 -Subject: [PATCH 07/12] Issue 51110 - Fix ASAN ODR warnings - -Description: Fixed ODR issues with glboal attributes which were duplicated from - the core server into the replication and retrocl plugins. - -relates: https://pagure.io/389-ds-base/issue/51110 - -Reviewed by: firstyear(Thanks!) ---- - ldap/servers/plugins/replication/repl5.h | 17 +++--- - .../plugins/replication/repl_globals.c | 17 +++--- - ldap/servers/plugins/replication/replutil.c | 16 +++--- - ldap/servers/plugins/retrocl/retrocl.h | 22 ++++---- - ldap/servers/plugins/retrocl/retrocl_cn.c | 12 ++--- - ldap/servers/plugins/retrocl/retrocl_po.c | 52 +++++++++---------- - ldap/servers/plugins/retrocl/retrocl_trim.c | 30 +++++------ - 7 files changed, 82 insertions(+), 84 deletions(-) - -diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h -index 873dd8a16..72b7089e3 100644 ---- a/ldap/servers/plugins/replication/repl5.h -+++ b/ldap/servers/plugins/replication/repl5.h -@@ -280,15 +280,14 @@ struct berval *NSDS90StartReplicationRequest_new(const char *protocol_oid, - int multimaster_extop_NSDS50ReplicationEntry(Slapi_PBlock *pb); - - /* From repl_globals.c */ --extern char *attr_changenumber; --extern char *attr_targetdn; --extern char *attr_changetype; --extern char *attr_newrdn; --extern char *attr_deleteoldrdn; --extern char *attr_changes; --extern char *attr_newsuperior; --extern char *attr_changetime; --extern char *attr_dataversion; -+extern char *repl_changenumber; -+extern char *repl_targetdn; -+extern char *repl_changetype; -+extern char *repl_newrdn; -+extern char *repl_deleteoldrdn; -+extern char *repl_changes; -+extern char *repl_newsuperior; -+extern char *repl_changetime; - extern char *attr_csn; - extern char *changetype_add; - extern char *changetype_delete; -diff --git a/ldap/servers/plugins/replication/repl_globals.c b/ldap/servers/plugins/replication/repl_globals.c -index 355a0ffa1..c615c77da 100644 ---- a/ldap/servers/plugins/replication/repl_globals.c -+++ b/ldap/servers/plugins/replication/repl_globals.c -@@ -48,15 +48,14 @@ char *changetype_delete = CHANGETYPE_DELETE; - char *changetype_modify = CHANGETYPE_MODIFY; - char *changetype_modrdn = CHANGETYPE_MODRDN; - char *changetype_moddn = CHANGETYPE_MODDN; --char *attr_changenumber = ATTR_CHANGENUMBER; --char *attr_targetdn = ATTR_TARGETDN; --char *attr_changetype = ATTR_CHANGETYPE; --char *attr_newrdn = ATTR_NEWRDN; --char *attr_deleteoldrdn = ATTR_DELETEOLDRDN; --char *attr_changes = ATTR_CHANGES; --char *attr_newsuperior = ATTR_NEWSUPERIOR; --char *attr_changetime = ATTR_CHANGETIME; --char *attr_dataversion = ATTR_DATAVERSION; -+char *repl_changenumber = ATTR_CHANGENUMBER; -+char *repl_targetdn = ATTR_TARGETDN; -+char *repl_changetype = ATTR_CHANGETYPE; -+char *repl_newrdn = ATTR_NEWRDN; -+char *repl_deleteoldrdn = ATTR_DELETEOLDRDN; -+char *repl_changes = ATTR_CHANGES; -+char *repl_newsuperior = ATTR_NEWSUPERIOR; -+char *repl_changetime = ATTR_CHANGETIME; - char *attr_csn = ATTR_CSN; - char *type_copyingFrom = TYPE_COPYINGFROM; - char *type_copiedFrom = TYPE_COPIEDFROM; -diff --git a/ldap/servers/plugins/replication/replutil.c b/ldap/servers/plugins/replication/replutil.c -index de1e77880..39f821d12 100644 ---- a/ldap/servers/plugins/replication/replutil.c -+++ b/ldap/servers/plugins/replication/replutil.c -@@ -64,14 +64,14 @@ get_cleattrs() - { - if (cleattrs[0] == NULL) { - cleattrs[0] = type_objectclass; -- cleattrs[1] = attr_changenumber; -- cleattrs[2] = attr_targetdn; -- cleattrs[3] = attr_changetype; -- cleattrs[4] = attr_newrdn; -- cleattrs[5] = attr_deleteoldrdn; -- cleattrs[6] = attr_changes; -- cleattrs[7] = attr_newsuperior; -- cleattrs[8] = attr_changetime; -+ cleattrs[1] = repl_changenumber; -+ cleattrs[2] = repl_targetdn; -+ cleattrs[3] = repl_changetype; -+ cleattrs[4] = repl_newrdn; -+ cleattrs[5] = repl_deleteoldrdn; -+ cleattrs[6] = repl_changes; -+ cleattrs[7] = repl_newsuperior; -+ cleattrs[8] = repl_changetime; - cleattrs[9] = NULL; - } - return cleattrs; -diff --git a/ldap/servers/plugins/retrocl/retrocl.h b/ldap/servers/plugins/retrocl/retrocl.h -index 06482a14c..2ce76fcec 100644 ---- a/ldap/servers/plugins/retrocl/retrocl.h -+++ b/ldap/servers/plugins/retrocl/retrocl.h -@@ -94,17 +94,17 @@ extern int retrocl_nattributes; - extern char **retrocl_attributes; - extern char **retrocl_aliases; - --extern const char *attr_changenumber; --extern const char *attr_targetdn; --extern const char *attr_changetype; --extern const char *attr_newrdn; --extern const char *attr_newsuperior; --extern const char *attr_deleteoldrdn; --extern const char *attr_changes; --extern const char *attr_changetime; --extern const char *attr_objectclass; --extern const char *attr_nsuniqueid; --extern const char *attr_isreplicated; -+extern const char *retrocl_changenumber; -+extern const char *retrocl_targetdn; -+extern const char *retrocl_changetype; -+extern const char *retrocl_newrdn; -+extern const char *retrocl_newsuperior; -+extern const char *retrocl_deleteoldrdn; -+extern const char *retrocl_changes; -+extern const char *retrocl_changetime; -+extern const char *retrocl_objectclass; -+extern const char *retrocl_nsuniqueid; -+extern const char *retrocl_isreplicated; - - extern PRLock *retrocl_internal_lock; - extern Slapi_RWLock *retrocl_cn_lock; -diff --git a/ldap/servers/plugins/retrocl/retrocl_cn.c b/ldap/servers/plugins/retrocl/retrocl_cn.c -index 709d7a857..5fc5f586d 100644 ---- a/ldap/servers/plugins/retrocl/retrocl_cn.c -+++ b/ldap/servers/plugins/retrocl/retrocl_cn.c -@@ -62,7 +62,7 @@ handle_cnum_entry(Slapi_Entry *e, void *callback_data) - Slapi_Attr *chattr = NULL; - sval = NULL; - value = NULL; -- if (slapi_entry_attr_find(e, attr_changenumber, &chattr) == 0) { -+ if (slapi_entry_attr_find(e, retrocl_changenumber, &chattr) == 0) { - slapi_attr_first_value(chattr, &sval); - if (NULL != sval) { - value = slapi_value_get_berval(sval); -@@ -79,7 +79,7 @@ handle_cnum_entry(Slapi_Entry *e, void *callback_data) - chattr = NULL; - sval = NULL; - value = NULL; -- if (slapi_entry_attr_find(e, attr_changetime, &chattr) == 0) { -+ if (slapi_entry_attr_find(e, retrocl_changetime, &chattr) == 0) { - slapi_attr_first_value(chattr, &sval); - if (NULL != sval) { - value = slapi_value_get_berval(sval); -@@ -134,7 +134,7 @@ retrocl_get_changenumbers(void) - cr.cr_time = 0; - - slapi_seq_callback(RETROCL_CHANGELOG_DN, SLAPI_SEQ_FIRST, -- (char *)attr_changenumber, /* cast away const */ -+ (char *)retrocl_changenumber, /* cast away const */ - NULL, NULL, 0, &cr, NULL, handle_cnum_result, - handle_cnum_entry, NULL); - -@@ -144,7 +144,7 @@ retrocl_get_changenumbers(void) - slapi_ch_free((void **)&cr.cr_time); - - slapi_seq_callback(RETROCL_CHANGELOG_DN, SLAPI_SEQ_LAST, -- (char *)attr_changenumber, /* cast away const */ -+ (char *)retrocl_changenumber, /* cast away const */ - NULL, NULL, 0, &cr, NULL, handle_cnum_result, - handle_cnum_entry, NULL); - -@@ -185,7 +185,7 @@ retrocl_getchangetime(int type, int *err) - return NO_TIME; - } - slapi_seq_callback(RETROCL_CHANGELOG_DN, type, -- (char *)attr_changenumber, /* cast away const */ -+ (char *)retrocl_changenumber, /* cast away const */ - NULL, - NULL, 0, &cr, NULL, - handle_cnum_result, handle_cnum_entry, NULL); -@@ -353,7 +353,7 @@ retrocl_update_lastchangenumber(void) - cr.cr_cnum = 0; - cr.cr_time = 0; - slapi_seq_callback(RETROCL_CHANGELOG_DN, SLAPI_SEQ_LAST, -- (char *)attr_changenumber, /* cast away const */ -+ (char *)retrocl_changenumber, /* cast away const */ - NULL, NULL, 0, &cr, NULL, handle_cnum_result, - handle_cnum_entry, NULL); - -diff --git a/ldap/servers/plugins/retrocl/retrocl_po.c b/ldap/servers/plugins/retrocl/retrocl_po.c -index d2af79b31..e1488f56b 100644 ---- a/ldap/servers/plugins/retrocl/retrocl_po.c -+++ b/ldap/servers/plugins/retrocl/retrocl_po.c -@@ -25,17 +25,17 @@ modrdn2reple(Slapi_Entry *e, const char *newrdn, int deloldrdn, LDAPMod **ldm, c - - /******************************/ - --const char *attr_changenumber = "changenumber"; --const char *attr_targetdn = "targetdn"; --const char *attr_changetype = "changetype"; --const char *attr_newrdn = "newrdn"; --const char *attr_deleteoldrdn = "deleteoldrdn"; --const char *attr_changes = "changes"; --const char *attr_newsuperior = "newsuperior"; --const char *attr_changetime = "changetime"; --const char *attr_objectclass = "objectclass"; --const char *attr_nsuniqueid = "nsuniqueid"; --const char *attr_isreplicated = "isreplicated"; -+const char *retrocl_changenumber = "changenumber"; -+const char *retrocl_targetdn = "targetdn"; -+const char *retrocl_changetype = "changetype"; -+const char *retrocl_newrdn = "newrdn"; -+const char *retrocl_deleteoldrdn = "deleteoldrdn"; -+const char *retrocl_changes = "changes"; -+const char *retrocl_newsuperior = "newsuperior"; -+const char *retrocl_changetime = "changetime"; -+const char *retrocl_objectclass = "objectclass"; -+const char *retrocl_nsuniqueid = "nsuniqueid"; -+const char *retrocl_isreplicated = "isreplicated"; - - /* - * Function: make_changes_string -@@ -185,7 +185,7 @@ write_replog_db( - changenum, dn); - - /* Construct the dn of this change record */ -- edn = slapi_ch_smprintf("%s=%lu,%s", attr_changenumber, changenum, RETROCL_CHANGELOG_DN); -+ edn = slapi_ch_smprintf("%s=%lu,%s", retrocl_changenumber, changenum, RETROCL_CHANGELOG_DN); - - /* - * Create the entry struct, and fill in fields common to all types -@@ -214,7 +214,7 @@ write_replog_db( - attributeAlias = attributeName; - } - -- if (strcasecmp(attributeName, attr_nsuniqueid) == 0) { -+ if (strcasecmp(attributeName, retrocl_nsuniqueid) == 0) { - Slapi_Entry *entry = NULL; - const char *uniqueId = NULL; - -@@ -236,7 +236,7 @@ write_replog_db( - - extensibleObject = 1; - -- } else if (strcasecmp(attributeName, attr_isreplicated) == 0) { -+ } else if (strcasecmp(attributeName, retrocl_isreplicated) == 0) { - int isReplicated = 0; - char *attributeValue = NULL; - -@@ -298,17 +298,17 @@ write_replog_db( - sprintf(chnobuf, "%lu", changenum); - val.bv_val = chnobuf; - val.bv_len = strlen(chnobuf); -- slapi_entry_add_values(e, attr_changenumber, vals); -+ slapi_entry_add_values(e, retrocl_changenumber, vals); - - /* Set the targetentrydn attribute */ - val.bv_val = dn; - val.bv_len = strlen(dn); -- slapi_entry_add_values(e, attr_targetdn, vals); -+ slapi_entry_add_values(e, retrocl_targetdn, vals); - - /* Set the changeTime attribute */ - val.bv_val = format_genTime(curtime); - val.bv_len = strlen(val.bv_val); -- slapi_entry_add_values(e, attr_changetime, vals); -+ slapi_entry_add_values(e, retrocl_changetime, vals); - slapi_ch_free((void **)&val.bv_val); - - /* -@@ -344,7 +344,7 @@ write_replog_db( - /* Set the changetype attribute */ - val.bv_val = "delete"; - val.bv_len = 6; -- slapi_entry_add_values(e, attr_changetype, vals); -+ slapi_entry_add_values(e, retrocl_changetype, vals); - } - break; - -@@ -422,7 +422,7 @@ entry2reple(Slapi_Entry *e, Slapi_Entry *oe, int optype) - } else { - return (1); - } -- slapi_entry_add_values(e, attr_changetype, vals); -+ slapi_entry_add_values(e, retrocl_changetype, vals); - - estr = slapi_entry2str(oe, &len); - p = estr; -@@ -435,7 +435,7 @@ entry2reple(Slapi_Entry *e, Slapi_Entry *oe, int optype) - } - val.bv_val = p; - val.bv_len = len - (p - estr); /* length + terminating \0 */ -- slapi_entry_add_values(e, attr_changes, vals); -+ slapi_entry_add_values(e, retrocl_changes, vals); - slapi_ch_free_string(&estr); - return 0; - } -@@ -471,7 +471,7 @@ mods2reple(Slapi_Entry *e, LDAPMod **ldm) - if (NULL != l) { - val.bv_val = l->ls_buf; - val.bv_len = l->ls_len + 1; /* string + terminating \0 */ -- slapi_entry_add_values(e, attr_changes, vals); -+ slapi_entry_add_values(e, retrocl_changes, vals); - lenstr_free(&l); - } - } -@@ -511,12 +511,12 @@ modrdn2reple( - - val.bv_val = "modrdn"; - val.bv_len = 6; -- slapi_entry_add_values(e, attr_changetype, vals); -+ slapi_entry_add_values(e, retrocl_changetype, vals); - - if (newrdn) { - val.bv_val = (char *)newrdn; /* cast away const */ - val.bv_len = strlen(newrdn); -- slapi_entry_add_values(e, attr_newrdn, vals); -+ slapi_entry_add_values(e, retrocl_newrdn, vals); - } - - if (deloldrdn == 0) { -@@ -526,12 +526,12 @@ modrdn2reple( - val.bv_val = "TRUE"; - val.bv_len = 4; - } -- slapi_entry_add_values(e, attr_deleteoldrdn, vals); -+ slapi_entry_add_values(e, retrocl_deleteoldrdn, vals); - - if (newsuperior) { - val.bv_val = (char *)newsuperior; /* cast away const */ - val.bv_len = strlen(newsuperior); -- slapi_entry_add_values(e, attr_newsuperior, vals); -+ slapi_entry_add_values(e, retrocl_newsuperior, vals); - } - - if (NULL != ldm) { -@@ -540,7 +540,7 @@ modrdn2reple( - if (l->ls_len) { - val.bv_val = l->ls_buf; - val.bv_len = l->ls_len; -- slapi_entry_add_values(e, attr_changes, vals); -+ slapi_entry_add_values(e, retrocl_changes, vals); - } - lenstr_free(&l); - } -diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c -index 0378eb7f6..d031dc3f8 100644 ---- a/ldap/servers/plugins/retrocl/retrocl_trim.c -+++ b/ldap/servers/plugins/retrocl/retrocl_trim.c -@@ -49,15 +49,15 @@ static const char ** - get_cleattrs(void) - { - if (cleattrs[0] == NULL) { -- cleattrs[0] = attr_objectclass; -- cleattrs[1] = attr_changenumber; -- cleattrs[2] = attr_targetdn; -- cleattrs[3] = attr_changetype; -- cleattrs[4] = attr_newrdn; -- cleattrs[5] = attr_deleteoldrdn; -- cleattrs[6] = attr_changes; -- cleattrs[7] = attr_newsuperior; -- cleattrs[8] = attr_changetime; -+ cleattrs[0] = retrocl_objectclass; -+ cleattrs[1] = retrocl_changenumber; -+ cleattrs[2] = retrocl_targetdn; -+ cleattrs[3] = retrocl_changetype; -+ cleattrs[4] = retrocl_newrdn; -+ cleattrs[5] = retrocl_deleteoldrdn; -+ cleattrs[6] = retrocl_changes; -+ cleattrs[7] = retrocl_newsuperior; -+ cleattrs[8] = retrocl_changetime; - cleattrs[9] = NULL; - } - return cleattrs; -@@ -81,7 +81,7 @@ delete_changerecord(changeNumber cnum) - char *dnbuf; - int delrc; - -- dnbuf = slapi_ch_smprintf("%s=%ld, %s", attr_changenumber, cnum, -+ dnbuf = slapi_ch_smprintf("%s=%ld, %s", retrocl_changenumber, cnum, - RETROCL_CHANGELOG_DN); - pb = slapi_pblock_new(); - slapi_delete_internal_set_pb(pb, dnbuf, NULL /*controls*/, NULL /* uniqueid */, -@@ -154,7 +154,7 @@ handle_getchangetime_search(Slapi_Entry *e, void *callback_data) - if (NULL != e) { - Slapi_Value *sval = NULL; - const struct berval *val = NULL; -- rc = slapi_entry_attr_find(e, attr_changetime, &attr); -+ rc = slapi_entry_attr_find(e, retrocl_changetime, &attr); - /* Bug 624442: Logic checking for lack of timestamp was - reversed. */ - if (0 != rc || slapi_attr_first_value(attr, &sval) == -1 || -@@ -174,14 +174,14 @@ handle_getchangetime_search(Slapi_Entry *e, void *callback_data) - /* - * Function: get_changetime - * Arguments: cnum - number of change record to retrieve -- * Returns: Taking the attr_changetime of the 'cnum' entry, -+ * Returns: Taking the retrocl_changetime of the 'cnum' entry, - * it converts it into time_t (parse_localTime) and returns this time value. - * It returns 0 in the following cases: -- * - changerecord entry has not attr_changetime -+ * - changerecord entry has not retrocl_changetime - * - attr_changetime attribute has no value - * - attr_changetime attribute value is empty - * -- * Description: Retrieve attr_changetime ("changetime") from a changerecord whose number is "cnum". -+ * Description: Retrieve retrocl_changetime ("changetime") from a changerecord whose number is "cnum". - */ - static time_t - get_changetime(changeNumber cnum, int *err) -@@ -198,7 +198,7 @@ get_changetime(changeNumber cnum, int *err) - } - crtp->crt_nentries = crtp->crt_err = 0; - crtp->crt_time = 0; -- PR_snprintf(fstr, sizeof(fstr), "%s=%ld", attr_changenumber, cnum); -+ PR_snprintf(fstr, sizeof(fstr), "%s=%ld", retrocl_changenumber, cnum); - - pb = slapi_pblock_new(); - slapi_search_internal_set_pb(pb, RETROCL_CHANGELOG_DN, --- -2.26.2 - diff --git a/SOURCES/0008-Issue-3657-Add-options-to-dsctl-for-dsrc-file.patch b/SOURCES/0008-Issue-3657-Add-options-to-dsctl-for-dsrc-file.patch new file mode 100644 index 0000000..9bca531 --- /dev/null +++ b/SOURCES/0008-Issue-3657-Add-options-to-dsctl-for-dsrc-file.patch @@ -0,0 +1,502 @@ +From 4faec52810e12070ef72da347bb590c57d8761e4 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Fri, 20 Nov 2020 17:47:18 -0500 +Subject: [PATCH 1/2] Issue 3657 - Add options to dsctl for dsrc file + +Description: Add options to create, modify, delete, and display + the .dsrc CLI tool shortcut file. + +Relates: https://github.com/389ds/389-ds-base/issues/3657 + +Reviewed by: firstyear(Thanks!) +--- + dirsrvtests/tests/suites/clu/dsrc_test.py | 136 ++++++++++ + src/lib389/cli/dsctl | 2 + + src/lib389/lib389/cli_ctl/dsrc.py | 312 ++++++++++++++++++++++ + 3 files changed, 450 insertions(+) + create mode 100644 dirsrvtests/tests/suites/clu/dsrc_test.py + create mode 100644 src/lib389/lib389/cli_ctl/dsrc.py + +diff --git a/dirsrvtests/tests/suites/clu/dsrc_test.py b/dirsrvtests/tests/suites/clu/dsrc_test.py +new file mode 100644 +index 000000000..1b27700ec +--- /dev/null ++++ b/dirsrvtests/tests/suites/clu/dsrc_test.py +@@ -0,0 +1,136 @@ ++import logging ++import pytest ++import os ++from os.path import expanduser ++from lib389.cli_base import FakeArgs ++from lib389.cli_ctl.dsrc import create_dsrc, modify_dsrc, delete_dsrc, display_dsrc ++from lib389._constants import DEFAULT_SUFFIX, DN_DM ++from lib389.topologies import topology_st as topo ++ ++log = logging.getLogger(__name__) ++ ++ ++@pytest.fixture(scope="function") ++def setup(topo, request): ++ """Preserve any existing .dsrc file""" ++ ++ dsrc_file = f'{expanduser("~")}/.dsrc' ++ backup_file = dsrc_file + ".original" ++ if os.path.exists(dsrc_file): ++ os.rename(dsrc_file, backup_file) ++ ++ def fin(): ++ if os.path.exists(backup_file): ++ os.rename(backup_file, dsrc_file) ++ ++ request.addfinalizer(fin) ++ ++ ++def test_dsrc(topo, setup): ++ """Test "dsctl dsrc" command ++ ++ :id: 0610de6c-e167-4761-bdab-3e677b2d44bb ++ :setup: Standalone Instance ++ :steps: ++ 1. Test creation works ++ 2. Test creating duplicate section ++ 3. Test adding an additional inst config works ++ 4. Test removing an instance works ++ 5. Test modify works ++ 6. Test delete works ++ 7. Test display fails when no file is present ++ ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Success ++ """ ++ ++ inst = topo.standalone ++ serverid = inst.serverid ++ second_inst_name = "Second" ++ second_inst_basedn = "o=second" ++ different_suffix = "o=different" ++ ++ # Setup our args ++ args = FakeArgs() ++ args.basedn = DEFAULT_SUFFIX ++ args.binddn = DN_DM ++ args.json = None ++ args.uri = None ++ args.saslmech = None ++ args.tls_cacertdir = None ++ args.tls_cert = None ++ args.tls_key = None ++ args.tls_reqcert = None ++ args.starttls = None ++ args.cancel_starttls = None ++ args.pwdfile = None ++ args.do_it = True ++ ++ # Create a dsrc configuration entry ++ create_dsrc(inst, log, args) ++ display_dsrc(inst, topo.logcap.log, args) ++ assert topo.logcap.contains("basedn = " + args.basedn) ++ assert topo.logcap.contains("binddn = " + args.binddn) ++ assert topo.logcap.contains("[" + serverid + "]") ++ topo.logcap.flush() ++ ++ # Attempt to add duplicate instance section ++ with pytest.raises(ValueError): ++ create_dsrc(inst, log, args) ++ ++ # Test adding a second instance works correctly ++ inst.serverid = second_inst_name ++ args.basedn = second_inst_basedn ++ create_dsrc(inst, log, args) ++ display_dsrc(inst, topo.logcap.log, args) ++ assert topo.logcap.contains("basedn = " + args.basedn) ++ assert topo.logcap.contains("[" + second_inst_name + "]") ++ topo.logcap.flush() ++ ++ # Delete second instance ++ delete_dsrc(inst, log, args) ++ inst.serverid = serverid # Restore original instance name ++ display_dsrc(inst, topo.logcap.log, args) ++ assert not topo.logcap.contains("[" + second_inst_name + "]") ++ assert not topo.logcap.contains("basedn = " + args.basedn) ++ # Make sure first instance config is still present ++ assert topo.logcap.contains("[" + serverid + "]") ++ assert topo.logcap.contains("binddn = " + args.binddn) ++ topo.logcap.flush() ++ ++ # Modify the config ++ args.basedn = different_suffix ++ modify_dsrc(inst, log, args) ++ display_dsrc(inst, topo.logcap.log, args) ++ assert topo.logcap.contains(different_suffix) ++ topo.logcap.flush() ++ ++ # Remove an arg from the config ++ args.basedn = "" ++ modify_dsrc(inst, log, args) ++ display_dsrc(inst, topo.logcap.log, args) ++ assert not topo.logcap.contains(different_suffix) ++ topo.logcap.flush() ++ ++ # Remove the last entry, which should delete the file ++ delete_dsrc(inst, log, args) ++ dsrc_file = f'{expanduser("~")}/.dsrc' ++ assert not os.path.exists(dsrc_file) ++ ++ # Make sure display fails ++ with pytest.raises(ValueError): ++ display_dsrc(inst, log, args) ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main(["-s", CURRENT_FILE]) ++ +diff --git a/src/lib389/cli/dsctl b/src/lib389/cli/dsctl +index fe9bc10e9..69f069297 100755 +--- a/src/lib389/cli/dsctl ++++ b/src/lib389/cli/dsctl +@@ -23,6 +23,7 @@ from lib389.cli_ctl import tls as cli_tls + from lib389.cli_ctl import health as cli_health + from lib389.cli_ctl import nsstate as cli_nsstate + from lib389.cli_ctl import dbgen as cli_dbgen ++from lib389.cli_ctl import dsrc as cli_dsrc + from lib389.cli_ctl.instance import instance_remove_all + from lib389.cli_base import ( + disconnect_instance, +@@ -61,6 +62,7 @@ cli_tls.create_parser(subparsers) + cli_health.create_parser(subparsers) + cli_nsstate.create_parser(subparsers) + cli_dbgen.create_parser(subparsers) ++cli_dsrc.create_parser(subparsers) + + argcomplete.autocomplete(parser) + +diff --git a/src/lib389/lib389/cli_ctl/dsrc.py b/src/lib389/lib389/cli_ctl/dsrc.py +new file mode 100644 +index 000000000..e49c7f819 +--- /dev/null ++++ b/src/lib389/lib389/cli_ctl/dsrc.py +@@ -0,0 +1,312 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2020 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++ ++import json ++from os.path import expanduser ++from os import path, remove ++from ldapurl import isLDAPUrl ++from ldap.dn import is_dn ++import configparser ++ ++ ++def create_dsrc(inst, log, args): ++ """Create the .dsrc file ++ ++ [instance] ++ uri = ldaps://hostname:port ++ basedn = dc=example,dc=com ++ binddn = uid=user,.... ++ saslmech = [EXTERNAL|PLAIN] ++ tls_cacertdir = /path/to/cacertdir ++ tls_cert = /path/to/user.crt ++ tls_key = /path/to/user.key ++ tls_reqcert = [never, hard, allow] ++ starttls = [true, false] ++ pwdfile = /path/to/file ++ """ ++ ++ dsrc_file = f'{expanduser("~")}/.dsrc' ++ config = configparser.ConfigParser() ++ config.read(dsrc_file) ++ ++ # Verify this section does not already exist ++ instances = config.sections() ++ if inst.serverid in instances: ++ raise ValueError("There is already a configuration section for this instance!") ++ ++ # Process and validate the args ++ config[inst.serverid] = {} ++ ++ if args.uri is not None: ++ if not isLDAPUrl(args.uri): ++ raise ValueError("The uri is not a valid LDAP URL!") ++ if args.uri.startswith("ldapi"): ++ # We must use EXTERNAL saslmech for LDAPI ++ args.saslmech = "EXTERNAL" ++ config[inst.serverid]['uri'] = args.uri ++ if args.basedn is not None: ++ if not is_dn(args.basedn): ++ raise ValueError("The basedn is not a valid DN!") ++ config[inst.serverid]['basedn'] = args.basedn ++ if args.binddn is not None: ++ if not is_dn(args.binddn): ++ raise ValueError("The binddn is not a valid DN!") ++ config[inst.serverid]['binddn'] = args.binddn ++ if args.saslmech is not None: ++ if args.saslmech not in ['EXTERNAL', 'PLAIN']: ++ raise ValueError("The saslmech must be EXTERNAL or PLAIN!") ++ config[inst.serverid]['saslmech'] = args.saslmech ++ if args.tls_cacertdir is not None: ++ if not path.exists(args.tls_cacertdir): ++ raise ValueError('--tls-cacertdir directory does not exist!') ++ config[inst.serverid]['tls_cacertdir'] = args.tls_cacertdir ++ if args.tls_cert is not None: ++ if not path.exists(args.tls_cert): ++ raise ValueError('--tls-cert does not point to an existing file!') ++ config[inst.serverid]['tls_cert'] = args.tls_cert ++ if args.tls_key is not None: ++ if not path.exists(args.tls_key): ++ raise ValueError('--tls-key does not point to an existing file!') ++ config[inst.serverid]['tls_key'] = args.tls_key ++ if args.tls_reqcert is not None: ++ if args.tls_reqcert not in ['never', 'hard', 'allow']: ++ raise ValueError('--tls-reqcert value is invalid (must be either "never", "allow", or "hard")!') ++ config[inst.serverid]['tls_reqcert'] = args.tls_reqcert ++ if args.starttls: ++ config[inst.serverid]['starttls'] = 'true' ++ if args.pwdfile is not None: ++ if not path.exists(args.pwdfile): ++ raise ValueError('--pwdfile does not exist!') ++ config[inst.serverid]['pwdfile'] = args.pwdfile ++ ++ if len(config[inst.serverid]) == 0: ++ # No args set ++ raise ValueError("You must set at least one argument for the new dsrc file!") ++ ++ # Print a preview of the config ++ log.info(f'Updating "{dsrc_file}" with:\n') ++ log.info(f' [{inst.serverid}]') ++ for k, v in config[inst.serverid].items(): ++ log.info(f' {k} = {v}') ++ ++ # Perform confirmation? ++ if not args.do_it: ++ while 1: ++ val = input(f'\nUpdate "{dsrc_file}" ? [yes]: ').rstrip().lower() ++ if val == '' or val == 'y' or val == 'yes': ++ break ++ if val == 'n' or val == 'no': ++ return ++ ++ # Now write the file ++ with open(dsrc_file, 'w') as configfile: ++ config.write(configfile) ++ ++ log.info(f'Successfully updated: {dsrc_file}') ++ ++ ++def modify_dsrc(inst, log, args): ++ """Modify the instance config ++ """ ++ dsrc_file = f'{expanduser("~")}/.dsrc' ++ ++ if path.exists(dsrc_file): ++ config = configparser.ConfigParser() ++ config.read(dsrc_file) ++ ++ # Verify we have a section to modify ++ instances = config.sections() ++ if inst.serverid not in instances: ++ raise ValueError("There is no configuration section for this instance to modify!") ++ ++ # Process and validate the args ++ if args.uri is not None: ++ if not isLDAPUrl(args.uri): ++ raise ValueError("The uri is not a valid LDAP URL!") ++ if args.uri.startswith("ldapi"): ++ # We must use EXTERNAL saslmech for LDAPI ++ args.saslmech = "EXTERNAL" ++ if args.uri == '': ++ del config[inst.serverid]['uri'] ++ else: ++ config[inst.serverid]['uri'] = args.uri ++ if args.basedn is not None: ++ if not is_dn(args.basedn): ++ raise ValueError("The basedn is not a valid DN!") ++ if args.basedn == '': ++ del config[inst.serverid]['basedn'] ++ else: ++ config[inst.serverid]['basedn'] = args.basedn ++ if args.binddn is not None: ++ if not is_dn(args.binddn): ++ raise ValueError("The binddn is not a valid DN!") ++ if args.binddn == '': ++ del config[inst.serverid]['binddn'] ++ else: ++ config[inst.serverid]['binddn'] = args.binddn ++ if args.saslmech is not None: ++ if args.saslmech not in ['EXTERNAL', 'PLAIN']: ++ raise ValueError("The saslmech must be EXTERNAL or PLAIN!") ++ if args.saslmech == '': ++ del config[inst.serverid]['saslmech'] ++ else: ++ config[inst.serverid]['saslmech'] = args.saslmech ++ if args.tls_cacertdir is not None: ++ if not path.exists(args.tls_cacertdir): ++ raise ValueError('--tls-cacertdir directory does not exist!') ++ if args.tls_cacertdir == '': ++ del config[inst.serverid]['tls_cacertdir'] ++ else: ++ config[inst.serverid]['tls_cacertdir'] = args.tls_cacertdir ++ if args.tls_cert is not None: ++ if not path.exists(args.tls_cert): ++ raise ValueError('--tls-cert does not point to an existing file!') ++ if args.tls_cert == '': ++ del config[inst.serverid]['tls_cert'] ++ else: ++ config[inst.serverid]['tls_cert'] = args.tls_cert ++ if args.tls_key is not None: ++ if not path.exists(args.tls_key): ++ raise ValueError('--tls-key does not point to an existing file!') ++ if args.tls_key == '': ++ del config[inst.serverid]['tls_key'] ++ else: ++ config[inst.serverid]['tls_key'] = args.tls_key ++ if args.tls_reqcert is not None: ++ if args.tls_reqcert not in ['never', 'hard', 'allow']: ++ raise ValueError('--tls-reqcert value is invalid (must be either "never", "allow", or "hard")!') ++ if args.tls_reqcert == '': ++ del config[inst.serverid]['tls_reqcert'] ++ else: ++ config[inst.serverid]['tls_reqcert'] = args.tls_reqcert ++ if args.starttls: ++ config[inst.serverid]['starttls'] = 'true' ++ if args.cancel_starttls: ++ config[inst.serverid]['starttls'] = 'false' ++ if args.pwdfile is not None: ++ if not path.exists(args.pwdfile): ++ raise ValueError('--pwdfile does not exist!') ++ if args.pwdfile == '': ++ del config[inst.serverid]['pwdfile'] ++ else: ++ config[inst.serverid]['pwdfile'] = args.pwdfile ++ ++ # Okay now rewrite the file ++ with open(dsrc_file, 'w') as configfile: ++ config.write(configfile) ++ ++ log.info(f'Successfully updated: {dsrc_file}') ++ else: ++ raise ValueError(f'There is no .dsrc file "{dsrc_file}" to update!') ++ ++ ++def delete_dsrc(inst, log, args): ++ """Delete the .dsrc file ++ """ ++ dsrc_file = f'{expanduser("~")}/.dsrc' ++ if path.exists(dsrc_file): ++ if not args.do_it: ++ # Get confirmation ++ while 1: ++ val = input(f'\nAre you sure you want to remove this instances configuration ? [no]: ').rstrip().lower() ++ if val == 'y' or val == 'yes': ++ break ++ if val == '' or val == 'n' or val == 'no': ++ return ++ ++ config = configparser.ConfigParser() ++ config.read(dsrc_file) ++ instances = config.sections() ++ if inst.serverid not in instances: ++ raise ValueError("The is no configuration for this instance") ++ ++ # Update the config object ++ del config[inst.serverid] ++ ++ if len(config.sections()) == 0: ++ # The file would be empty so just delete it ++ try: ++ remove(dsrc_file) ++ log.info(f'Successfully removed: {dsrc_file}') ++ return ++ except OSError as e: ++ raise ValueError(f'Failed to delete "{dsrc_file}", error: {str(e)}') ++ else: ++ # write the updated config ++ with open(dsrc_file, 'w') as configfile: ++ config.write(configfile) ++ else: ++ raise ValueError(f'There is no .dsrc file "{dsrc_file}" to update!') ++ ++ log.info(f'Successfully updated: {dsrc_file}') ++ ++def display_dsrc(inst, log, args): ++ """Display the contents of the ~/.dsrc file ++ """ ++ dsrc_file = f'{expanduser("~")}/.dsrc' ++ ++ if not path.exists(dsrc_file): ++ raise ValueError(f'There is no dsrc file "{dsrc_file}" to display!') ++ ++ config = configparser.ConfigParser() ++ config.read(dsrc_file) ++ instances = config.sections() ++ ++ for inst_section in instances: ++ if args.json: ++ log.info(json.dumps({inst_section: dict(config[inst_section])}, indent=4)) ++ else: ++ log.info(f'[{inst_section}]') ++ for k, v in config[inst_section].items(): ++ log.info(f'{k} = {v}') ++ log.info("") ++ ++ ++def create_parser(subparsers): ++ dsrc_parser = subparsers.add_parser('dsrc', help="Manage the .dsrc file") ++ subcommands = dsrc_parser.add_subparsers(help="action") ++ ++ # Create .dsrc file ++ dsrc_create_parser = subcommands.add_parser('create', help='Generate the .dsrc file') ++ dsrc_create_parser.set_defaults(func=create_dsrc) ++ dsrc_create_parser.add_argument('--uri', help="The URI (LDAP URL) for the Directory Server instance.") ++ dsrc_create_parser.add_argument('--basedn', help="The default database suffix.") ++ dsrc_create_parser.add_argument('--binddn', help="The default Bind DN used or authentication.") ++ dsrc_create_parser.add_argument('--saslmech', help="The SASL mechanism to use: PLAIN or EXTERNAL.") ++ dsrc_create_parser.add_argument('--tls-cacertdir', help="The directory containing the Trusted Certificate Authority certificate.") ++ dsrc_create_parser.add_argument('--tls-cert', help="The absolute file name to the server certificate.") ++ dsrc_create_parser.add_argument('--tls-key', help="The absolute file name to the server certificate key.") ++ dsrc_create_parser.add_argument('--tls-reqcert', help="Request certificate strength: 'never', 'allow', 'hard'") ++ dsrc_create_parser.add_argument('--starttls', action='store_true', help="Use startTLS for connection to the server.") ++ dsrc_create_parser.add_argument('--pwdfile', help="The absolute path to a file containing the Bind DN's password.") ++ dsrc_create_parser.add_argument('--do-it', action='store_true', help="Create the file without any confirmation.") ++ ++ dsrc_modify_parser = subcommands.add_parser('modify', help='Modify the .dsrc file') ++ dsrc_modify_parser.set_defaults(func=modify_dsrc) ++ dsrc_modify_parser.add_argument('--uri', nargs='?', const='', help="The URI (LDAP URL) for the Directory Server instance.") ++ dsrc_modify_parser.add_argument('--basedn', nargs='?', const='', help="The default database suffix.") ++ dsrc_modify_parser.add_argument('--binddn', nargs='?', const='', help="The default Bind DN used or authentication.") ++ dsrc_modify_parser.add_argument('--saslmech', nargs='?', const='', help="The SASL mechanism to use: PLAIN or EXTERNAL.") ++ dsrc_modify_parser.add_argument('--tls-cacertdir', nargs='?', const='', help="The directory containing the Trusted Certificate Authority certificate.") ++ dsrc_modify_parser.add_argument('--tls-cert', nargs='?', const='', help="The absolute file name to the server certificate.") ++ dsrc_modify_parser.add_argument('--tls-key', nargs='?', const='', help="The absolute file name to the server certificate key.") ++ dsrc_modify_parser.add_argument('--tls-reqcert', nargs='?', const='', help="Request certificate strength: 'never', 'allow', 'hard'") ++ dsrc_modify_parser.add_argument('--starttls', action='store_true', help="Use startTLS for connection to the server.") ++ dsrc_modify_parser.add_argument('--cancel-starttls', action='store_true', help="Do not use startTLS for connection to the server.") ++ dsrc_modify_parser.add_argument('--pwdfile', nargs='?', const='', help="The absolute path to a file containing the Bind DN's password.") ++ dsrc_modify_parser.add_argument('--do-it', action='store_true', help="Update the file without any confirmation.") ++ ++ # Delete the instance from the .dsrc file ++ dsrc_delete_parser = subcommands.add_parser('delete', help='Delete instance configuration from the .dsrc file.') ++ dsrc_delete_parser.set_defaults(func=delete_dsrc) ++ dsrc_delete_parser.add_argument('--do-it', action='store_true', ++ help="Delete this instance's configuration from the .dsrc file.") ++ ++ # Display .dsrc file ++ dsrc_display_parser = subcommands.add_parser('display', help='Display the contents of the .dsrc file.') ++ dsrc_display_parser.set_defaults(func=display_dsrc) +-- +2.26.2 + diff --git a/SOURCES/0008-Issue-51095-abort-operation-if-CSN-can-not-be-genera.patch b/SOURCES/0008-Issue-51095-abort-operation-if-CSN-can-not-be-genera.patch deleted file mode 100644 index a85a4bb..0000000 --- a/SOURCES/0008-Issue-51095-abort-operation-if-CSN-can-not-be-genera.patch +++ /dev/null @@ -1,466 +0,0 @@ -From 8d14ff153e9335b09739438344f9c3c78a496548 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Fri, 22 May 2020 10:42:11 -0400 -Subject: [PATCH 08/12] Issue 51095 - abort operation if CSN can not be - generated - -Bug Description: If we fail to get the system time then we were using an - uninitialized timespec struct which could lead to bizarre - times in CSN's. - -Fix description: Check if the system time function fails, and if it does - then abort the update operation. - -relates: https://pagure.io/389-ds-base/issue/51095 - -Reviewed by: firstyear & tbordaz(Thanks!!) ---- - ldap/servers/plugins/replication/repl5.h | 2 +- - .../plugins/replication/repl5_replica.c | 33 ++++++++------ - ldap/servers/slapd/back-ldbm/ldbm_add.c | 8 +++- - ldap/servers/slapd/back-ldbm/ldbm_delete.c | 9 +++- - ldap/servers/slapd/back-ldbm/ldbm_modify.c | 10 ++++- - ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 8 +++- - ldap/servers/slapd/csngen.c | 18 +++++++- - ldap/servers/slapd/entrywsi.c | 15 ++++--- - ldap/servers/slapd/slap.h | 2 +- - ldap/servers/slapd/slapi-plugin.h | 8 ++++ - ldap/servers/slapd/slapi-private.h | 5 ++- - ldap/servers/slapd/time.c | 43 +++++++++++++------ - 12 files changed, 118 insertions(+), 43 deletions(-) - -diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h -index 72b7089e3..638471744 100644 ---- a/ldap/servers/plugins/replication/repl5.h -+++ b/ldap/servers/plugins/replication/repl5.h -@@ -776,7 +776,7 @@ void replica_disable_replication(Replica *r); - int replica_start_agreement(Replica *r, Repl_Agmt *ra); - int windows_replica_start_agreement(Replica *r, Repl_Agmt *ra); - --CSN *replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn); -+int32_t replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn, CSN **opcsn); - int replica_get_attr(Slapi_PBlock *pb, const char *type, void *value); - - /* mapping tree extensions manipulation */ -diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c -index 02caa88d9..f01782330 100644 ---- a/ldap/servers/plugins/replication/repl5_replica.c -+++ b/ldap/servers/plugins/replication/repl5_replica.c -@@ -3931,11 +3931,9 @@ windows_replica_start_agreement(Replica *r, Repl_Agmt *ra) - * A callback function registered as op->o_csngen_handler and - * called by backend ops to generate opcsn. - */ --CSN * --replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn) -+int32_t -+replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn, CSN **opcsn) - { -- CSN *opcsn = NULL; -- - Replica *replica = replica_get_replica_for_op(pb); - if (NULL != replica) { - Slapi_Operation *op; -@@ -3946,17 +3944,26 @@ replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn) - CSNGen *gen = (CSNGen *)object_get_data(gen_obj); - if (NULL != gen) { - /* The new CSN should be greater than the base CSN */ -- csngen_new_csn(gen, &opcsn, PR_FALSE /* don't notify */); -- if (csn_compare(opcsn, basecsn) <= 0) { -- char opcsnstr[CSN_STRSIZE], basecsnstr[CSN_STRSIZE]; -+ if (csngen_new_csn(gen, opcsn, PR_FALSE /* don't notify */) != CSN_SUCCESS) { -+ /* Failed to generate CSN we must abort */ -+ object_release(gen_obj); -+ return -1; -+ } -+ if (csn_compare(*opcsn, basecsn) <= 0) { -+ char opcsnstr[CSN_STRSIZE]; -+ char basecsnstr[CSN_STRSIZE]; - char opcsn2str[CSN_STRSIZE]; - -- csn_as_string(opcsn, PR_FALSE, opcsnstr); -+ csn_as_string(*opcsn, PR_FALSE, opcsnstr); - csn_as_string(basecsn, PR_FALSE, basecsnstr); -- csn_free(&opcsn); -+ csn_free(opcsn); - csngen_adjust_time(gen, basecsn); -- csngen_new_csn(gen, &opcsn, PR_FALSE /* don't notify */); -- csn_as_string(opcsn, PR_FALSE, opcsn2str); -+ if (csngen_new_csn(gen, opcsn, PR_FALSE) != CSN_SUCCESS) { -+ /* Failed to generate CSN we must abort */ -+ object_release(gen_obj); -+ return -1; -+ } -+ csn_as_string(*opcsn, PR_FALSE, opcsn2str); - slapi_log_err(SLAPI_LOG_WARNING, repl_plugin_name, - "replica_generate_next_csn - " - "opcsn=%s <= basecsn=%s, adjusted opcsn=%s\n", -@@ -3966,14 +3973,14 @@ replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn) - * Insert opcsn into the csn pending list. - * This is the notify effect in csngen_new_csn(). - */ -- assign_csn_callback(opcsn, (void *)replica); -+ assign_csn_callback(*opcsn, (void *)replica); - } - object_release(gen_obj); - } - } - } - -- return opcsn; -+ return 0; - } - - /* -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c -index d0d88bf16..ee366c74c 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_add.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c -@@ -645,7 +645,13 @@ ldbm_back_add(Slapi_PBlock *pb) - * Current op is a user request. Opcsn will be assigned - * if the dn is in an updatable replica. - */ -- opcsn = entry_assign_operation_csn(pb, e, parententry ? parententry->ep_entry : NULL); -+ if (entry_assign_operation_csn(pb, e, parententry ? parententry->ep_entry : NULL, &opcsn) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_add", -+ "failed to generate add CSN for entry (%s), aborting operation\n", -+ slapi_entry_get_dn(e)); -+ ldap_result_code = LDAP_OPERATIONS_ERROR; -+ goto error_return; -+ } - } - if (opcsn != NULL) { - entry_set_csn(e, opcsn); -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c -index 873b5b00e..fbcb57310 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c -@@ -464,7 +464,14 @@ replace_entry: - * by entry_assign_operation_csn() if the dn is in an - * updatable replica. - */ -- opcsn = entry_assign_operation_csn ( pb, e->ep_entry, NULL ); -+ if (entry_assign_operation_csn(pb, e->ep_entry, NULL, &opcsn) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_delete", -+ "failed to generate delete CSN for entry (%s), aborting operation\n", -+ slapi_entry_get_dn(e->ep_entry)); -+ retval = -1; -+ ldap_result_code = LDAP_OPERATIONS_ERROR; -+ goto error_return; -+ } - } - if (opcsn != NULL) { - if (!is_fixup_operation) { -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c -index b0c477e3f..e9d7e87e3 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c -@@ -598,12 +598,18 @@ ldbm_back_modify(Slapi_PBlock *pb) - goto error_return; - } - opcsn = operation_get_csn(operation); -- if (NULL == opcsn && operation->o_csngen_handler) { -+ if (opcsn == NULL && operation->o_csngen_handler) { - /* - * Current op is a user request. Opcsn will be assigned - * if the dn is in an updatable replica. - */ -- opcsn = entry_assign_operation_csn(pb, e->ep_entry, NULL); -+ if (entry_assign_operation_csn(pb, e->ep_entry, NULL, &opcsn) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_modify", -+ "failed to generate modify CSN for entry (%s), aborting operation\n", -+ slapi_entry_get_dn(e->ep_entry)); -+ ldap_result_code = LDAP_OPERATIONS_ERROR; -+ goto error_return; -+ } - } - if (opcsn) { - entry_set_maxcsn(e->ep_entry, opcsn); -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c -index 26698012a..fde83c99f 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c -@@ -543,7 +543,13 @@ ldbm_back_modrdn(Slapi_PBlock *pb) - * Current op is a user request. Opcsn will be assigned - * if the dn is in an updatable replica. - */ -- opcsn = entry_assign_operation_csn(pb, e->ep_entry, parententry ? parententry->ep_entry : NULL); -+ if (entry_assign_operation_csn(pb, e->ep_entry, parententry ? parententry->ep_entry : NULL, &opcsn) != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_modrdn", -+ "failed to generate modrdn CSN for entry (%s), aborting operation\n", -+ slapi_entry_get_dn(e->ep_entry)); -+ ldap_result_code = LDAP_OPERATIONS_ERROR; -+ goto error_return; -+ } - } - if (opcsn != NULL) { - entry_set_maxcsn(e->ep_entry, opcsn); -diff --git a/ldap/servers/slapd/csngen.c b/ldap/servers/slapd/csngen.c -index 68dbbda8e..b08d8b25c 100644 ---- a/ldap/servers/slapd/csngen.c -+++ b/ldap/servers/slapd/csngen.c -@@ -164,6 +164,7 @@ csngen_free(CSNGen **gen) - int - csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify) - { -+ struct timespec now = {0}; - int rc = CSN_SUCCESS; - time_t cur_time; - int delta; -@@ -179,12 +180,25 @@ csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify) - return CSN_MEMORY_ERROR; - } - -- slapi_rwlock_wrlock(gen->lock); -+ if ((rc = slapi_clock_gettime(&now)) != 0) { -+ /* Failed to get system time, we must abort */ -+ slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn", -+ "Failed to get system time (%s)\n", -+ slapd_system_strerror(rc)); -+ return CSN_TIME_ERROR; -+ } -+ cur_time = now.tv_sec; - -- cur_time = slapi_current_utc_time(); -+ slapi_rwlock_wrlock(gen->lock); - - /* check if the time should be adjusted */ - delta = cur_time - gen->state.sampled_time; -+ if (delta > _SEC_PER_DAY || delta < (-1 * _SEC_PER_DAY)) { -+ /* We had a jump larger than a day */ -+ slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn", -+ "Detected large jump in CSN time. Delta: %d (current time: %ld vs previous time: %ld)\n", -+ delta, cur_time, gen->state.sampled_time); -+ } - if (delta > 0) { - rc = _csngen_adjust_local_time(gen, cur_time); - if (rc != CSN_SUCCESS) { -diff --git a/ldap/servers/slapd/entrywsi.c b/ldap/servers/slapd/entrywsi.c -index 5d1d7238a..31bf65d8e 100644 ---- a/ldap/servers/slapd/entrywsi.c -+++ b/ldap/servers/slapd/entrywsi.c -@@ -224,13 +224,12 @@ entry_add_rdn_csn(Slapi_Entry *e, const CSN *csn) - slapi_rdn_free(&rdn); - } - --CSN * --entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry) -+int32_t -+entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry, CSN **opcsn) - { - Slapi_Operation *op; - const CSN *basecsn = NULL; - const CSN *parententry_dncsn = NULL; -- CSN *opcsn = NULL; - - slapi_pblock_get(pb, SLAPI_OPERATION, &op); - -@@ -252,14 +251,16 @@ entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parent - basecsn = parententry_dncsn; - } - } -- opcsn = op->o_csngen_handler(pb, basecsn); -+ if(op->o_csngen_handler(pb, basecsn, opcsn) != 0) { -+ return -1; -+ } - -- if (NULL != opcsn) { -- operation_set_csn(op, opcsn); -+ if (*opcsn) { -+ operation_set_csn(op, *opcsn); - } - } - -- return opcsn; -+ return 0; - } - - /* -diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h -index a4cae784a..cef8c789c 100644 ---- a/ldap/servers/slapd/slap.h -+++ b/ldap/servers/slapd/slap.h -@@ -1480,7 +1480,7 @@ struct op; - typedef void (*result_handler)(struct conn *, struct op *, int, char *, char *, int, struct berval **); - typedef int (*search_entry_handler)(Slapi_Backend *, struct conn *, struct op *, struct slapi_entry *); - typedef int (*search_referral_handler)(Slapi_Backend *, struct conn *, struct op *, struct berval **); --typedef CSN *(*csngen_handler)(Slapi_PBlock *pb, const CSN *basecsn); -+typedef int32_t *(*csngen_handler)(Slapi_PBlock *pb, const CSN *basecsn, CSN **opcsn); - typedef int (*replica_attr_handler)(Slapi_PBlock *pb, const char *type, void **value); - - /* -diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h -index be1e52e4d..834a98742 100644 ---- a/ldap/servers/slapd/slapi-plugin.h -+++ b/ldap/servers/slapd/slapi-plugin.h -@@ -6743,6 +6743,14 @@ int slapi_reslimit_get_integer_limit(Slapi_Connection *conn, int handle, int *li - */ - time_t slapi_current_time(void) __attribute__((deprecated)); - -+/** -+ * Get the system time and check for errors. Return -+ * -+ * \param tp - a timespec struct where the system time is set -+ * \return result code, upon success tp is set to the system time -+ */ -+int32_t slapi_clock_gettime(struct timespec *tp); -+ - /** - * Returns the current system time as a hr clock relative to uptime - * This means the clock is not affected by timezones -diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h -index d85ee43e5..c98c1947c 100644 ---- a/ldap/servers/slapd/slapi-private.h -+++ b/ldap/servers/slapd/slapi-private.h -@@ -233,7 +233,8 @@ enum - CSN_INVALID_PARAMETER, /* invalid function argument */ - CSN_INVALID_FORMAT, /* invalid state format */ - CSN_LDAP_ERROR, /* LDAP operation failed */ -- CSN_NSPR_ERROR /* NSPR API failure */ -+ CSN_NSPR_ERROR, /* NSPR API failure */ -+ CSN_TIME_ERROR /* Error generating new CSN due to clock failure */ - }; - - typedef struct csngen CSNGen; -@@ -326,7 +327,7 @@ int slapi_entries_diff(Slapi_Entry **old_entries, Slapi_Entry **new_entries, int - void set_attr_to_protected_list(char *attr, int flag); - - /* entrywsi.c */ --CSN *entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry); -+int32_t entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry, CSN **opcsn); - const CSN *entry_get_maxcsn(const Slapi_Entry *entry); - void entry_set_maxcsn(Slapi_Entry *entry, const CSN *csn); - const CSN *entry_get_dncsn(const Slapi_Entry *entry); -diff --git a/ldap/servers/slapd/time.c b/ldap/servers/slapd/time.c -index 8048a3359..545538404 100644 ---- a/ldap/servers/slapd/time.c -+++ b/ldap/servers/slapd/time.c -@@ -61,6 +61,25 @@ poll_current_time() - return 0; - } - -+/* -+ * Check if the time function returns an error. If so return the errno -+ */ -+int32_t -+slapi_clock_gettime(struct timespec *tp) -+{ -+ int32_t rc = 0; -+ -+ PR_ASSERT(tp && tp->tv_nsec == 0 && tp->tv_sec == 0); -+ -+ if (clock_gettime(CLOCK_REALTIME, tp) != 0) { -+ rc = errno; -+ } -+ -+ PR_ASSERT(rc == 0); -+ -+ return rc; -+} -+ - time_t - current_time(void) - { -@@ -69,7 +88,7 @@ current_time(void) - * but this should be removed in favour of the - * more accurately named slapi_current_utc_time - */ -- struct timespec now; -+ struct timespec now = {0}; - clock_gettime(CLOCK_REALTIME, &now); - return now.tv_sec; - } -@@ -83,7 +102,7 @@ slapi_current_time(void) - struct timespec - slapi_current_rel_time_hr(void) - { -- struct timespec now; -+ struct timespec now = {0}; - clock_gettime(CLOCK_MONOTONIC, &now); - return now; - } -@@ -91,7 +110,7 @@ slapi_current_rel_time_hr(void) - struct timespec - slapi_current_utc_time_hr(void) - { -- struct timespec ltnow; -+ struct timespec ltnow = {0}; - clock_gettime(CLOCK_REALTIME, <now); - return ltnow; - } -@@ -99,7 +118,7 @@ slapi_current_utc_time_hr(void) - time_t - slapi_current_utc_time(void) - { -- struct timespec ltnow; -+ struct timespec ltnow = {0}; - clock_gettime(CLOCK_REALTIME, <now); - return ltnow.tv_sec; - } -@@ -108,8 +127,8 @@ void - slapi_timestamp_utc_hr(char *buf, size_t bufsize) - { - PR_ASSERT(bufsize >= SLAPI_TIMESTAMP_BUFSIZE); -- struct timespec ltnow; -- struct tm utctm; -+ struct timespec ltnow = {0}; -+ struct tm utctm = {0}; - clock_gettime(CLOCK_REALTIME, <now); - gmtime_r(&(ltnow.tv_sec), &utctm); - strftime(buf, bufsize, "%Y%m%d%H%M%SZ", &utctm); -@@ -140,7 +159,7 @@ format_localTime_log(time_t t, int initsize __attribute__((unused)), char *buf, - { - - long tz; -- struct tm *tmsp, tms; -+ struct tm *tmsp, tms = {0}; - char tbuf[*bufsize]; - char sign; - /* make sure our buffer will be big enough. Need at least 29 */ -@@ -191,7 +210,7 @@ format_localTime_hr_log(time_t t, long nsec, int initsize __attribute__((unused) - { - - long tz; -- struct tm *tmsp, tms; -+ struct tm *tmsp, tms = {0}; - char tbuf[*bufsize]; - char sign; - /* make sure our buffer will be big enough. Need at least 39 */ -@@ -278,7 +297,7 @@ slapi_timespec_expire_check(struct timespec *expire) - if (expire->tv_sec == 0 && expire->tv_nsec == 0) { - return TIMER_CONTINUE; - } -- struct timespec now; -+ struct timespec now = {0}; - clock_gettime(CLOCK_MONOTONIC, &now); - if (now.tv_sec > expire->tv_sec || - (expire->tv_sec == now.tv_sec && now.tv_sec > expire->tv_nsec)) { -@@ -293,7 +312,7 @@ format_localTime(time_t from) - in the syntax of a generalizedTime, except without the time zone. */ - { - char *into; -- struct tm t; -+ struct tm t = {0}; - - localtime_r(&from, &t); - -@@ -362,7 +381,7 @@ format_genTime(time_t from) - in the syntax of a generalizedTime. */ - { - char *into; -- struct tm t; -+ struct tm t = {0}; - - gmtime_r(&from, &t); - into = slapi_ch_malloc(SLAPI_TIMESTAMP_BUFSIZE); -@@ -382,7 +401,7 @@ time_t - read_genTime(struct berval *from) - { - struct tm t = {0}; -- time_t retTime; -+ time_t retTime = {0}; - time_t diffsec = 0; - int i, gflag = 0, havesec = 0; - --- -2.26.2 - diff --git a/SOURCES/0009-Issue-4440-BUG-ldifgen-with-start-idx-option-fails-w.patch b/SOURCES/0009-Issue-4440-BUG-ldifgen-with-start-idx-option-fails-w.patch new file mode 100644 index 0000000..1a0df22 --- /dev/null +++ b/SOURCES/0009-Issue-4440-BUG-ldifgen-with-start-idx-option-fails-w.patch @@ -0,0 +1,902 @@ +From 201cb1147c0a34bddbd3e5c03aecd804c47a9905 Mon Sep 17 00:00:00 2001 +From: progier389 <72748589+progier389@users.noreply.github.com> +Date: Thu, 19 Nov 2020 10:21:10 +0100 +Subject: [PATCH 2/2] Issue 4440 - BUG - ldifgen with --start-idx option fails + with unsupported operand (#4444) + +Bug description: +Got TypeError exception when usign: + dsctl -v slapd-localhost ldifgen users --suffix + dc=example,dc=com --parent ou=people,dc=example,dc=com + --number 100000 --generic --start-idx=50 +The reason is that by default python parser provides + value for numeric options: + as an integer if specified by "--option value" or + as a string if specified by "--option=value" + +Fix description: +convert the numeric parameters to integer when using it. + options impacted are: + - in users subcommand: --number , --start-idx + - in mod-load subcommand: --num-users, --add-users, + --del-users, --modrdn-users, --mod-users + +FYI: An alternative solution would have been to indicate the +parser that these values are an integer. But two reasons + leaded me to implement the first solution: + - first solution fix the problem for all users while the + second one fixes only dsctl command. + - first solution is easier to test: + I just added a new test file generated by a script + that duplicated existing ldifgen test, renamed the + test cases and replaced the numeric arguments by + strings. + Second solution would need to redesign the test framework + to be able to test the parser. + +relates: https://github.com/389ds/389-ds-base/issues/4440 + +Reviewed by: + +Platforms tested: F32 + +(cherry picked from commit 3c3e1f30cdb046a1aabb93aacebcf261a76a0892) +--- + .../tests/suites/clu/dbgen_test_usan.py | 806 ++++++++++++++++++ + src/lib389/lib389/cli_ctl/dbgen.py | 10 +- + src/lib389/lib389/dbgen.py | 3 + + 3 files changed, 814 insertions(+), 5 deletions(-) + create mode 100644 dirsrvtests/tests/suites/clu/dbgen_test_usan.py + +diff --git a/dirsrvtests/tests/suites/clu/dbgen_test_usan.py b/dirsrvtests/tests/suites/clu/dbgen_test_usan.py +new file mode 100644 +index 000000000..80ff63417 +--- /dev/null ++++ b/dirsrvtests/tests/suites/clu/dbgen_test_usan.py +@@ -0,0 +1,806 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2020 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import time ++ ++""" ++ This file contains tests similar to dbgen_test.py ++ except that paramaters that are number are expressed as string ++ (to mimic the parameters parser default behavior which returns an ++ int when parsing "option value" and a string when parsing "option=value" ++ This file has been generated by usign: ++sed ' ++9r z1 ++s/ test_/ test_usan/ ++/args.*= [0-9]/s,[0-9]*$,"&", ++/:id:/s/.$/1/ ++' dbgen_test.py > dbgen_test_usan.py ++ ( with z1 file containing this comment ) ++""" ++ ++ ++ ++import subprocess ++import pytest ++ ++from lib389.cli_ctl.dbgen import * ++from lib389.cos import CosClassicDefinitions, CosPointerDefinitions, CosIndirectDefinitions, CosTemplates ++from lib389.idm.account import Accounts ++from lib389.idm.group import Groups ++from lib389.idm.role import ManagedRoles, FilteredRoles, NestedRoles ++from lib389.tasks import * ++from lib389.utils import * ++from lib389.topologies import topology_st ++from lib389.cli_base import FakeArgs ++ ++pytestmark = pytest.mark.tier0 ++ ++LOG_FILE = '/tmp/dbgen.log' ++logging.getLogger(__name__).setLevel(logging.DEBUG) ++log = logging.getLogger(__name__) ++ ++ ++@pytest.fixture(scope="function") ++def set_log_file_and_ldif(topology_st, request): ++ global ldif_file ++ ldif_file = get_ldif_dir(topology_st.standalone) + '/created.ldif' ++ ++ fh = logging.FileHandler(LOG_FILE) ++ fh.setLevel(logging.DEBUG) ++ log.addHandler(fh) ++ ++ def fin(): ++ log.info('Delete files') ++ os.remove(LOG_FILE) ++ os.remove(ldif_file) ++ ++ request.addfinalizer(fin) ++ ++ ++def run_offline_import(instance, ldif_file): ++ log.info('Stopping the server and running offline import...') ++ instance.stop() ++ assert instance.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None, ++ import_file=ldif_file) ++ instance.start() ++ ++ ++def run_ldapmodify_from_file(instance, ldif_file, output_to_check=None): ++ LDAP_MOD = '/usr/bin/ldapmodify' ++ log.info('Add entries from ldif file with ldapmodify') ++ result = subprocess.check_output([LDAP_MOD, '-cx', '-D', DN_DM, '-w', PASSWORD, ++ '-h', instance.host, '-p', str(instance.port), '-af', ldif_file]) ++ if output_to_check is not None: ++ assert output_to_check in ensure_str(result) ++ ++ ++def check_value_in_log_and_reset(content_list): ++ with open(LOG_FILE, 'r+') as f: ++ file_content = f.read() ++ log.info('Check if content is present in output') ++ for item in content_list: ++ assert item in file_content ++ ++ log.info('Reset log file for next test') ++ f.truncate(0) ++ ++ ++@pytest.mark.ds50545 ++@pytest.mark.bz1798394 ++@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") ++def test_usandsconf_dbgen_users(topology_st, set_log_file_and_ldif): ++ """Test ldifgen (formerly dbgen) tool to create ldif with users ++ ++ :id: 426b5b94-9923-454d-a736-7e71ca985e91 ++ :setup: Standalone instance ++ :steps: ++ 1. Create DS instance ++ 2. Run ldifgen to generate ldif with users ++ 3. Import generated ldif to database ++ 4. Check it was properly imported ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ ++ standalone = topology_st.standalone ++ ++ args = FakeArgs() ++ args.suffix = DEFAULT_SUFFIX ++ args.parent = 'ou=people,dc=example,dc=com' ++ args.number = "1000" ++ args.rdn_cn = False ++ args.generic = True ++ args.start_idx = "50" ++ args.localize = False ++ args.ldif_file = ldif_file ++ ++ content_list = ['Generating LDIF with the following options:', ++ 'suffix={}'.format(args.suffix), ++ 'parent={}'.format(args.parent), ++ 'number={}'.format(args.number), ++ 'rdn-cn={}'.format(args.rdn_cn), ++ 'generic={}'.format(args.generic), ++ 'start-idx={}'.format(args.start_idx), ++ 'localize={}'.format(args.localize), ++ 'ldif-file={}'.format(args.ldif_file), ++ 'Writing LDIF', ++ 'Successfully created LDIF file: {}'.format(args.ldif_file)] ++ ++ log.info('Run ldifgen to create users ldif') ++ dbgen_create_users(standalone, log, args) ++ ++ log.info('Check if file exists') ++ assert os.path.exists(ldif_file) ++ ++ check_value_in_log_and_reset(content_list) ++ ++ log.info('Get number of accounts before import') ++ accounts = Accounts(standalone, DEFAULT_SUFFIX) ++ count_account = len(accounts.filter('(uid=*)')) ++ ++ run_offline_import(standalone, ldif_file) ++ ++ log.info('Check that accounts are imported') ++ assert len(accounts.filter('(uid=*)')) > count_account ++ ++ ++@pytest.mark.ds50545 ++@pytest.mark.bz1798394 ++@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") ++def test_usandsconf_dbgen_groups(topology_st, set_log_file_and_ldif): ++ """Test ldifgen (formerly dbgen) tool to create ldif with group ++ ++ :id: 97207413-9a93-4065-a5ec-63aa93801a31 ++ :setup: Standalone instance ++ :steps: ++ 1. Create DS instance ++ 2. Run ldifgen to generate ldif with group ++ 3. Import generated ldif to database ++ 4. Check it was properly imported ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ LDAP_RESULT = 'adding new entry "cn=myGroup-1,ou=groups,dc=example,dc=com"' ++ ++ standalone = topology_st.standalone ++ ++ args = FakeArgs() ++ args.NAME = 'myGroup' ++ args.parent = 'ou=groups,dc=example,dc=com' ++ args.suffix = DEFAULT_SUFFIX ++ args.number = "1" ++ args.num_members = "1000" ++ args.create_members = True ++ args.member_attr = 'uniquemember' ++ args.member_parent = 'ou=people,dc=example,dc=com' ++ args.ldif_file = ldif_file ++ ++ content_list = ['Generating LDIF with the following options:', ++ 'NAME={}'.format(args.NAME), ++ 'number={}'.format(args.number), ++ 'suffix={}'.format(args.suffix), ++ 'num-members={}'.format(args.num_members), ++ 'create-members={}'.format(args.create_members), ++ 'member-parent={}'.format(args.member_parent), ++ 'member-attr={}'.format(args.member_attr), ++ 'ldif-file={}'.format(args.ldif_file), ++ 'Writing LDIF', ++ 'Successfully created LDIF file: {}'.format(args.ldif_file)] ++ ++ log.info('Run ldifgen to create group ldif') ++ dbgen_create_groups(standalone, log, args) ++ ++ log.info('Check if file exists') ++ assert os.path.exists(ldif_file) ++ ++ check_value_in_log_and_reset(content_list) ++ ++ log.info('Get number of accounts before import') ++ accounts = Accounts(standalone, DEFAULT_SUFFIX) ++ count_account = len(accounts.filter('(uid=*)')) ++ ++ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db ++ # ldapmodify will complain about already existing parent which causes subprocess to return exit code != 0 ++ with pytest.raises(subprocess.CalledProcessError): ++ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) ++ ++ log.info('Check that accounts are imported') ++ assert len(accounts.filter('(uid=*)')) > count_account ++ ++ log.info('Check that group is imported') ++ groups = Groups(standalone, DEFAULT_SUFFIX) ++ assert groups.exists(args.NAME + '-1') ++ new_group = groups.get(args.NAME + '-1') ++ new_group.present('uniquemember', 'uid=group_entry1-0152,ou=people,dc=example,dc=com') ++ ++ ++@pytest.mark.ds50545 ++@pytest.mark.bz1798394 ++@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") ++def test_usandsconf_dbgen_cos_classic(topology_st, set_log_file_and_ldif): ++ """Test ldifgen (formerly dbgen) tool to create a COS definition ++ ++ :id: 8557f994-8a91-4f8a-86f6-9cb826a0b8f1 ++ :setup: Standalone instance ++ :steps: ++ 1. Create DS instance ++ 2. Run ldifgen to generate ldif with classic COS definition ++ 3. Import generated ldif to database ++ 4. Check it was properly imported ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ ++ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def,ou=cos definitions,dc=example,dc=com"' ++ ++ standalone = topology_st.standalone ++ ++ args = FakeArgs() ++ args.type = 'classic' ++ args.NAME = 'My_Postal_Def' ++ args.parent = 'ou=cos definitions,dc=example,dc=com' ++ args.create_parent = True ++ args.cos_specifier = 'businessCategory' ++ args.cos_attr = ['postalcode', 'telephonenumber'] ++ args.cos_template = 'cn=sales,cn=classicCoS,dc=example,dc=com' ++ args.ldif_file = ldif_file ++ ++ content_list = ['Generating LDIF with the following options:', ++ 'NAME={}'.format(args.NAME), ++ 'type={}'.format(args.type), ++ 'parent={}'.format(args.parent), ++ 'create-parent={}'.format(args.create_parent), ++ 'cos-specifier={}'.format(args.cos_specifier), ++ 'cos-template={}'.format(args.cos_template), ++ 'cos-attr={}'.format(args.cos_attr), ++ 'ldif-file={}'.format(args.ldif_file), ++ 'Writing LDIF', ++ 'Successfully created LDIF file: {}'.format(args.ldif_file)] ++ ++ log.info('Run ldifgen to create COS definition ldif') ++ dbgen_create_cos_def(standalone, log, args) ++ ++ log.info('Check if file exists') ++ assert os.path.exists(ldif_file) ++ ++ check_value_in_log_and_reset(content_list) ++ ++ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db ++ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) ++ ++ log.info('Check that COS definition is imported') ++ cos_def = CosClassicDefinitions(standalone, args.parent) ++ assert cos_def.exists(args.NAME) ++ new_cos = cos_def.get(args.NAME) ++ assert new_cos.present('cosTemplateDN', args.cos_template) ++ assert new_cos.present('cosSpecifier', args.cos_specifier) ++ assert new_cos.present('cosAttribute', args.cos_attr[0]) ++ assert new_cos.present('cosAttribute', args.cos_attr[1]) ++ ++ ++@pytest.mark.ds50545 ++@pytest.mark.bz1798394 ++@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") ++def test_usandsconf_dbgen_cos_pointer(topology_st, set_log_file_and_ldif): ++ """Test ldifgen (formerly dbgen) tool to create a COS definition ++ ++ :id: 6b26ca6d-226a-4f93-925e-faf95cc20211 ++ :setup: Standalone instance ++ :steps: ++ 1. Create DS instance ++ 2. Run ldifgen to generate ldif with pointer COS definition ++ 3. Import generated ldif to database ++ 4. Check it was properly imported ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ ++ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_pointer,ou=cos pointer definitions,dc=example,dc=com"' ++ ++ standalone = topology_st.standalone ++ ++ args = FakeArgs() ++ args.type = 'pointer' ++ args.NAME = 'My_Postal_Def_pointer' ++ args.parent = 'ou=cos pointer definitions,dc=example,dc=com' ++ args.create_parent = True ++ args.cos_specifier = None ++ args.cos_attr = ['postalcode', 'telephonenumber'] ++ args.cos_template = 'cn=sales,cn=pointerCoS,dc=example,dc=com' ++ args.ldif_file = ldif_file ++ ++ content_list = ['Generating LDIF with the following options:', ++ 'NAME={}'.format(args.NAME), ++ 'type={}'.format(args.type), ++ 'parent={}'.format(args.parent), ++ 'create-parent={}'.format(args.create_parent), ++ 'cos-template={}'.format(args.cos_template), ++ 'cos-attr={}'.format(args.cos_attr), ++ 'ldif-file={}'.format(args.ldif_file), ++ 'Writing LDIF', ++ 'Successfully created LDIF file: {}'.format(args.ldif_file)] ++ ++ log.info('Run ldifgen to create COS definition ldif') ++ dbgen_create_cos_def(standalone, log, args) ++ ++ log.info('Check if file exists') ++ assert os.path.exists(ldif_file) ++ ++ check_value_in_log_and_reset(content_list) ++ ++ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db ++ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) ++ ++ log.info('Check that COS definition is imported') ++ cos_def = CosPointerDefinitions(standalone, args.parent) ++ assert cos_def.exists(args.NAME) ++ new_cos = cos_def.get(args.NAME) ++ assert new_cos.present('cosTemplateDN', args.cos_template) ++ assert new_cos.present('cosAttribute', args.cos_attr[0]) ++ assert new_cos.present('cosAttribute', args.cos_attr[1]) ++ ++ ++@pytest.mark.ds50545 ++@pytest.mark.bz1798394 ++@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") ++def test_usandsconf_dbgen_cos_indirect(topology_st, set_log_file_and_ldif): ++ """Test ldifgen (formerly dbgen) tool to create a COS definition ++ ++ :id: ab4b799e-e801-432a-a61d-badad2628201 ++ :setup: Standalone instance ++ :steps: ++ 1. Create DS instance ++ 2. Run ldifgen to generate ldif with indirect COS definition ++ 3. Import generated ldif to database ++ 4. Check it was properly imported ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ ++ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_indirect,ou=cos indirect definitions,dc=example,dc=com"' ++ ++ standalone = topology_st.standalone ++ ++ args = FakeArgs() ++ args.type = 'indirect' ++ args.NAME = 'My_Postal_Def_indirect' ++ args.parent = 'ou=cos indirect definitions,dc=example,dc=com' ++ args.create_parent = True ++ args.cos_specifier = 'businessCategory' ++ args.cos_attr = ['postalcode', 'telephonenumber'] ++ args.cos_template = None ++ args.ldif_file = ldif_file ++ ++ content_list = ['Generating LDIF with the following options:', ++ 'NAME={}'.format(args.NAME), ++ 'type={}'.format(args.type), ++ 'parent={}'.format(args.parent), ++ 'create-parent={}'.format(args.create_parent), ++ 'cos-specifier={}'.format(args.cos_specifier), ++ 'cos-attr={}'.format(args.cos_attr), ++ 'ldif-file={}'.format(args.ldif_file), ++ 'Writing LDIF', ++ 'Successfully created LDIF file: {}'.format(args.ldif_file)] ++ ++ log.info('Run ldifgen to create COS definition ldif') ++ dbgen_create_cos_def(standalone, log, args) ++ ++ log.info('Check if file exists') ++ assert os.path.exists(ldif_file) ++ ++ check_value_in_log_and_reset(content_list) ++ ++ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db ++ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) ++ ++ log.info('Check that COS definition is imported') ++ cos_def = CosIndirectDefinitions(standalone, args.parent) ++ assert cos_def.exists(args.NAME) ++ new_cos = cos_def.get(args.NAME) ++ assert new_cos.present('cosIndirectSpecifier', args.cos_specifier) ++ assert new_cos.present('cosAttribute', args.cos_attr[0]) ++ assert new_cos.present('cosAttribute', args.cos_attr[1]) ++ ++ ++@pytest.mark.ds50545 ++@pytest.mark.bz1798394 ++@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") ++def test_usandsconf_dbgen_cos_template(topology_st, set_log_file_and_ldif): ++ """Test ldifgen (formerly dbgen) tool to create a COS template ++ ++ :id: 544017c7-4a82-4e7d-a047-00b68a28e071 ++ :setup: Standalone instance ++ :steps: ++ 1. Create DS instance ++ 2. Run ldifgen to generate ldif with COS template ++ 3. Import generated ldif to database ++ 4. Check it was properly imported ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ ++ LDAP_RESULT = 'adding new entry "cn=My_Template,ou=cos templates,dc=example,dc=com"' ++ ++ standalone = topology_st.standalone ++ ++ args = FakeArgs() ++ args.NAME = 'My_Template' ++ args.parent = 'ou=cos templates,dc=example,dc=com' ++ args.create_parent = True ++ args.cos_priority = "1" ++ args.cos_attr_val = 'postalcode:12345' ++ args.ldif_file = ldif_file ++ ++ content_list = ['Generating LDIF with the following options:', ++ 'NAME={}'.format(args.NAME), ++ 'parent={}'.format(args.parent), ++ 'create-parent={}'.format(args.create_parent), ++ 'cos-priority={}'.format(args.cos_priority), ++ 'cos-attr-val={}'.format(args.cos_attr_val), ++ 'ldif-file={}'.format(args.ldif_file), ++ 'Writing LDIF', ++ 'Successfully created LDIF file: {}'.format(args.ldif_file)] ++ ++ log.info('Run ldifgen to create COS template ldif') ++ dbgen_create_cos_tmp(standalone, log, args) ++ ++ log.info('Check if file exists') ++ assert os.path.exists(ldif_file) ++ ++ check_value_in_log_and_reset(content_list) ++ ++ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db ++ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) ++ ++ log.info('Check that COS template is imported') ++ cos_temp = CosTemplates(standalone, args.parent) ++ assert cos_temp.exists(args.NAME) ++ new_cos = cos_temp.get(args.NAME) ++ assert new_cos.present('cosPriority', str(args.cos_priority)) ++ assert new_cos.present('postalcode', '12345') ++ ++ ++@pytest.mark.ds50545 ++@pytest.mark.bz1798394 ++@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") ++def test_usandsconf_dbgen_managed_role(topology_st, set_log_file_and_ldif): ++ """Test ldifgen (formerly dbgen) tool to create a managed role ++ ++ :id: 10e77b41-0bc1-4ad5-a144-2c5107455b91 ++ :setup: Standalone instance ++ :steps: ++ 1. Create DS instance ++ 2. Run ldifgen to generate ldif with managed role ++ 3. Import generated ldif to database ++ 4. Check it was properly imported ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ ++ LDAP_RESULT = 'adding new entry "cn=My_Managed_Role,ou=managed roles,dc=example,dc=com"' ++ ++ standalone = topology_st.standalone ++ ++ args = FakeArgs() ++ ++ args.NAME = 'My_Managed_Role' ++ args.parent = 'ou=managed roles,dc=example,dc=com' ++ args.create_parent = True ++ args.type = 'managed' ++ args.filter = None ++ args.role_dn = None ++ args.ldif_file = ldif_file ++ ++ content_list = ['Generating LDIF with the following options:', ++ 'NAME={}'.format(args.NAME), ++ 'parent={}'.format(args.parent), ++ 'create-parent={}'.format(args.create_parent), ++ 'type={}'.format(args.type), ++ 'ldif-file={}'.format(args.ldif_file), ++ 'Writing LDIF', ++ 'Successfully created LDIF file: {}'.format(args.ldif_file)] ++ ++ log.info('Run ldifgen to create managed role ldif') ++ dbgen_create_role(standalone, log, args) ++ ++ log.info('Check if file exists') ++ assert os.path.exists(ldif_file) ++ ++ check_value_in_log_and_reset(content_list) ++ ++ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db ++ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) ++ ++ log.info('Check that managed role is imported') ++ roles = ManagedRoles(standalone, DEFAULT_SUFFIX) ++ assert roles.exists(args.NAME) ++ ++ ++@pytest.mark.ds50545 ++@pytest.mark.bz1798394 ++@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") ++def test_usandsconf_dbgen_filtered_role(topology_st, set_log_file_and_ldif): ++ """Test ldifgen (formerly dbgen) tool to create a filtered role ++ ++ :id: cb3c8ea8-4234-40e2-8810-fb6a25973921 ++ :setup: Standalone instance ++ :steps: ++ 1. Create DS instance ++ 2. Run ldifgen to generate ldif with filtered role ++ 3. Import generated ldif to database ++ 4. Check it was properly imported ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ ++ LDAP_RESULT = 'adding new entry "cn=My_Filtered_Role,ou=filtered roles,dc=example,dc=com"' ++ ++ standalone = topology_st.standalone ++ ++ args = FakeArgs() ++ ++ args.NAME = 'My_Filtered_Role' ++ args.parent = 'ou=filtered roles,dc=example,dc=com' ++ args.create_parent = True ++ args.type = 'filtered' ++ args.filter = '"objectclass=posixAccount"' ++ args.role_dn = None ++ args.ldif_file = ldif_file ++ ++ content_list = ['Generating LDIF with the following options:', ++ 'NAME={}'.format(args.NAME), ++ 'parent={}'.format(args.parent), ++ 'create-parent={}'.format(args.create_parent), ++ 'type={}'.format(args.type), ++ 'filter={}'.format(args.filter), ++ 'ldif-file={}'.format(args.ldif_file), ++ 'Writing LDIF', ++ 'Successfully created LDIF file: {}'.format(args.ldif_file)] ++ ++ log.info('Run ldifgen to create filtered role ldif') ++ dbgen_create_role(standalone, log, args) ++ ++ log.info('Check if file exists') ++ assert os.path.exists(ldif_file) ++ ++ check_value_in_log_and_reset(content_list) ++ ++ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db ++ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) ++ ++ log.info('Check that filtered role is imported') ++ roles = FilteredRoles(standalone, DEFAULT_SUFFIX) ++ assert roles.exists(args.NAME) ++ new_role = roles.get(args.NAME) ++ assert new_role.present('nsRoleFilter', args.filter) ++ ++ ++@pytest.mark.ds50545 ++@pytest.mark.bz1798394 ++@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") ++def test_usandsconf_dbgen_nested_role(topology_st, set_log_file_and_ldif): ++ """Test ldifgen (formerly dbgen) tool to create a nested role ++ ++ :id: 97fff0a8-3103-4adb-be04-2799ff58d8f1 ++ :setup: Standalone instance ++ :steps: ++ 1. Create DS instance ++ 2. Run ldifgen to generate ldif with nested role ++ 3. Import generated ldif to database ++ 4. Check it was properly imported ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ ++ LDAP_RESULT = 'adding new entry "cn=My_Nested_Role,ou=nested roles,dc=example,dc=com"' ++ ++ standalone = topology_st.standalone ++ ++ args = FakeArgs() ++ args.NAME = 'My_Nested_Role' ++ args.parent = 'ou=nested roles,dc=example,dc=com' ++ args.create_parent = True ++ args.type = 'nested' ++ args.filter = None ++ args.role_dn = ['cn=some_role,ou=roles,dc=example,dc=com'] ++ args.ldif_file = ldif_file ++ ++ content_list = ['Generating LDIF with the following options:', ++ 'NAME={}'.format(args.NAME), ++ 'parent={}'.format(args.parent), ++ 'create-parent={}'.format(args.create_parent), ++ 'type={}'.format(args.type), ++ 'role-dn={}'.format(args.role_dn), ++ 'ldif-file={}'.format(args.ldif_file), ++ 'Writing LDIF', ++ 'Successfully created LDIF file: {}'.format(args.ldif_file)] ++ ++ log.info('Run ldifgen to create nested role ldif') ++ dbgen_create_role(standalone, log, args) ++ ++ log.info('Check if file exists') ++ assert os.path.exists(ldif_file) ++ ++ check_value_in_log_and_reset(content_list) ++ ++ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db ++ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) ++ ++ log.info('Check that nested role is imported') ++ roles = NestedRoles(standalone, DEFAULT_SUFFIX) ++ assert roles.exists(args.NAME) ++ new_role = roles.get(args.NAME) ++ assert new_role.present('nsRoleDN', args.role_dn[0]) ++ ++ ++@pytest.mark.ds50545 ++@pytest.mark.bz1798394 ++@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") ++def test_usandsconf_dbgen_mod_ldif_mixed(topology_st, set_log_file_and_ldif): ++ """Test ldifgen (formerly dbgen) tool to create mixed modification ldif ++ ++ :id: 4a2e0901-2b48-452e-a4a0-507735132c81 ++ :setup: Standalone instance ++ :steps: ++ 1. Create DS instance ++ 2. Run ldifgen to generate modification ldif ++ 3. Import generated ldif to database ++ 4. Check it was properly imported ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ ++ standalone = topology_st.standalone ++ ++ args = FakeArgs() ++ args.parent = DEFAULT_SUFFIX ++ args.create_users = True ++ args.delete_users = True ++ args.create_parent = False ++ args.num_users = "1000" ++ args.add_users = "100" ++ args.del_users = "999" ++ args.modrdn_users = "100" ++ args.mod_users = "10" ++ args.mod_attrs = ['cn', 'uid', 'sn'] ++ args.randomize = False ++ args.ldif_file = ldif_file ++ ++ content_list = ['Generating LDIF with the following options:', ++ 'create-users={}'.format(args.create_users), ++ 'parent={}'.format(args.parent), ++ 'create-parent={}'.format(args.create_parent), ++ 'delete-users={}'.format(args.delete_users), ++ 'num-users={}'.format(args.num_users), ++ 'add-users={}'.format(args.add_users), ++ 'del-users={}'.format(args.del_users), ++ 'modrdn-users={}'.format(args.modrdn_users), ++ 'mod-users={}'.format(args.mod_users), ++ 'mod-attrs={}'.format(args.mod_attrs), ++ 'randomize={}'.format(args.randomize), ++ 'ldif-file={}'.format(args.ldif_file), ++ 'Writing LDIF', ++ 'Successfully created LDIF file: {}'.format(args.ldif_file)] ++ ++ log.info('Run ldifgen to create modification ldif') ++ dbgen_create_mods(standalone, log, args) ++ ++ log.info('Check if file exists') ++ assert os.path.exists(ldif_file) ++ ++ check_value_in_log_and_reset(content_list) ++ ++ log.info('Get number of accounts before import') ++ accounts = Accounts(standalone, DEFAULT_SUFFIX) ++ count_account = len(accounts.filter('(uid=*)')) ++ ++ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db ++ # ldapmodify will complain about a lot of changes done which causes subprocess to return exit code != 0 ++ with pytest.raises(subprocess.CalledProcessError): ++ run_ldapmodify_from_file(standalone, ldif_file) ++ ++ log.info('Check that some accounts are imported') ++ assert len(accounts.filter('(uid=*)')) > count_account ++ ++ ++@pytest.mark.ds50545 ++@pytest.mark.bz1798394 ++@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") ++def test_usandsconf_dbgen_nested_ldif(topology_st, set_log_file_and_ldif): ++ """Test ldifgen (formerly dbgen) tool to create nested ldif ++ ++ :id: 9c281c28-4169-45e0-8c07-c5502d9a7581 ++ :setup: Standalone instance ++ :steps: ++ 1. Create DS instance ++ 2. Run ldifgen to generate nested ldif ++ 3. Import generated ldif to database ++ 4. Check it was properly imported ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ ++ standalone = topology_st.standalone ++ ++ args = FakeArgs() ++ args.suffix = DEFAULT_SUFFIX ++ args.node_limit = "100" ++ args.num_users = "600" ++ args.ldif_file = ldif_file ++ ++ content_list = ['Generating LDIF with the following options:', ++ 'suffix={}'.format(args.suffix), ++ 'node-limit={}'.format(args.node_limit), ++ 'num-users={}'.format(args.num_users), ++ 'ldif-file={}'.format(args.ldif_file), ++ 'Writing LDIF', ++ 'Successfully created nested LDIF file ({}) containing 6 nodes/subtrees'.format(args.ldif_file)] ++ ++ log.info('Run ldifgen to create nested ldif') ++ dbgen_create_nested(standalone, log, args) ++ ++ log.info('Check if file exists') ++ assert os.path.exists(ldif_file) ++ ++ check_value_in_log_and_reset(content_list) ++ ++ log.info('Get number of accounts before import') ++ accounts = Accounts(standalone, DEFAULT_SUFFIX) ++ count_account = len(accounts.filter('(uid=*)')) ++ count_ou = len(accounts.filter('(ou=*)')) ++ ++ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db ++ # ldapmodify will complain about already existing suffix which causes subprocess to return exit code != 0 ++ with pytest.raises(subprocess.CalledProcessError): ++ run_ldapmodify_from_file(standalone, ldif_file) ++ ++ standalone.restart() ++ ++ log.info('Check that accounts are imported') ++ assert len(accounts.filter('(uid=*)')) > count_account ++ assert len(accounts.filter('(ou=*)')) > count_ou ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) +diff --git a/src/lib389/lib389/cli_ctl/dbgen.py b/src/lib389/lib389/cli_ctl/dbgen.py +index 7bc3892ba..058342fb1 100644 +--- a/src/lib389/lib389/cli_ctl/dbgen.py ++++ b/src/lib389/lib389/cli_ctl/dbgen.py +@@ -451,13 +451,13 @@ def dbgen_create_mods(inst, log, args): + props = { + "createUsers": args.create_users, + "deleteUsers": args.delete_users, +- "numUsers": args.num_users, ++ "numUsers": int(args.num_users), + "parent": args.parent, + "createParent": args.create_parent, +- "addUsers": args.add_users, +- "delUsers": args.del_users, +- "modrdnUsers": args.modrdn_users, +- "modUsers": args.mod_users, ++ "addUsers": int(args.add_users), ++ "delUsers": int(args.del_users), ++ "modrdnUsers": int(args.modrdn_users), ++ "modUsers": int(args.mod_users), + "random": args.randomize, + "modAttrs": args.mod_attrs + } +diff --git a/src/lib389/lib389/dbgen.py b/src/lib389/lib389/dbgen.py +index 6273781a2..10fb200f7 100644 +--- a/src/lib389/lib389/dbgen.py ++++ b/src/lib389/lib389/dbgen.py +@@ -220,6 +220,9 @@ def dbgen_users(instance, number, ldif_file, suffix, generic=False, entry_name=" + """ + Generate an LDIF of randomly named entries + """ ++ # Lets insure that integer parameters are not string ++ number=int(number) ++ startIdx=int(startIdx) + familyname_file = os.path.join(instance.ds_paths.data_dir, 'dirsrv/data/dbgen-FamilyNames') + givename_file = os.path.join(instance.ds_paths.data_dir, 'dirsrv/data/dbgen-GivenNames') + familynames = [] +-- +2.26.2 + diff --git a/SOURCES/0009-Issue-51113-Allow-using-uid-for-replication-manager-.patch b/SOURCES/0009-Issue-51113-Allow-using-uid-for-replication-manager-.patch deleted file mode 100644 index e5dbb3d..0000000 --- a/SOURCES/0009-Issue-51113-Allow-using-uid-for-replication-manager-.patch +++ /dev/null @@ -1,179 +0,0 @@ -From 52ce524f7672563b543e84401665765cfa72dea5 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Tue, 26 May 2020 17:03:11 -0400 -Subject: [PATCH 09/12] Issue 51113 - Allow using uid for replication manager - entry - -Bug Description: Currently it was hardcoded to only allow "cn" as - the rdn attribute for the replication manager entry. - -Fix description: Allow setting the rdn attribute of the replication - manager DS ldap object, and include the schema that - allows "uid". - -relates: https://pagure.io/389-ds-base/issue/51113 - -Reviewed by: spichugi & firstyear(Thanks!!) ---- - src/lib389/lib389/cli_conf/replication.py | 53 ++++++++++++----------- - src/lib389/lib389/replica.py | 11 +++-- - 2 files changed, 35 insertions(+), 29 deletions(-) - -diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py -index 09cb9b435..b9bc3d291 100644 ---- a/src/lib389/lib389/cli_conf/replication.py -+++ b/src/lib389/lib389/cli_conf/replication.py -@@ -199,19 +199,21 @@ def enable_replication(inst, basedn, log, args): - - # Create replication manager if password was provided - if args.bind_dn and args.bind_passwd: -- cn_rdn = args.bind_dn.split(",", 1)[0] -- cn_val = cn_rdn.split("=", 1)[1] -- manager = BootstrapReplicationManager(inst, dn=args.bind_dn) -+ rdn = args.bind_dn.split(",", 1)[0] -+ rdn_attr, rdn_val = rdn.split("=", 1) -+ manager = BootstrapReplicationManager(inst, dn=args.bind_dn, rdn_attr=rdn_attr) - try: - manager.create(properties={ -- 'cn': cn_val, -+ 'cn': rdn_val, -+ 'uid': rdn_val, - 'userPassword': args.bind_passwd - }) - except ldap.ALREADY_EXISTS: - # Already there, but could have different password. Delete and recreate - manager.delete() - manager.create(properties={ -- 'cn': cn_val, -+ 'cn': rdn_val, -+ 'uid': rdn_val, - 'userPassword': args.bind_passwd - }) - except ldap.NO_SUCH_OBJECT: -@@ -511,22 +513,23 @@ def get_cl(inst, basedn, log, args): - - - def create_repl_manager(inst, basedn, log, args): -- manager_cn = "replication manager" -+ manager_name = "replication manager" - repl_manager_password = "" - repl_manager_password_confirm = "" - - if args.name: -- manager_cn = args.name -- -- if is_a_dn(manager_cn): -- # A full DN was provided, make sure it uses "cn" for the RDN -- if manager_cn.split("=", 1)[0].lower() != "cn": -- raise ValueError("Replication manager DN must use \"cn\" for the rdn attribute") -- manager_dn = manager_cn -- manager_rdn = manager_dn.split(",", 1)[0] -- manager_cn = manager_rdn.split("=", 1)[1] -+ manager_name = args.name -+ -+ if is_a_dn(manager_name): -+ # A full DN was provided -+ manager_dn = manager_name -+ manager_rdn = manager_name.split(",", 1)[0] -+ manager_attr, manager_name = manager_rdn.split("=", 1) -+ if manager_attr.lower() not in ['cn', 'uid']: -+ raise ValueError(f'The RDN attribute "{manager_attr}" is not allowed, you must use "cn" or "uid"') - else: -- manager_dn = "cn={},cn=config".format(manager_cn) -+ manager_dn = "cn={},cn=config".format(manager_name) -+ manager_attr = "cn" - - if args.passwd: - repl_manager_password = args.passwd -@@ -544,10 +547,11 @@ def create_repl_manager(inst, basedn, log, args): - repl_manager_password = "" - repl_manager_password_confirm = "" - -- manager = BootstrapReplicationManager(inst, dn=manager_dn) -+ manager = BootstrapReplicationManager(inst, dn=manager_dn, rdn_attr=manager_attr) - try: - manager.create(properties={ -- 'cn': manager_cn, -+ 'cn': manager_name, -+ 'uid': manager_name, - 'userPassword': repl_manager_password - }) - if args.suffix: -@@ -564,7 +568,8 @@ def create_repl_manager(inst, basedn, log, args): - # Already there, but could have different password. Delete and recreate - manager.delete() - manager.create(properties={ -- 'cn': manager_cn, -+ 'cn': manager_name, -+ 'uid': manager_name, - 'userPassword': repl_manager_password - }) - if args.suffix: -@@ -954,6 +959,7 @@ def get_winsync_agmt_status(inst, basedn, log, args): - status = agmt.status(winsync=True, use_json=args.json) - log.info(status) - -+ - # - # Tasks - # -@@ -1347,8 +1353,7 @@ def create_parser(subparsers): - agmt_set_parser.add_argument('--wait-async-results', help="The amount of time in milliseconds the server waits if " - "the consumer is not ready before resending data") - agmt_set_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after " -- "a consumer sends back a busy response before making another " -- "attempt to acquire access.") -+ "a consumer sends back a busy response before making another attempt to acquire access.") - agmt_set_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.") - agmt_set_parser.add_argument('--flow-control-window', help="Sets the maximum number of entries and updates sent by a supplier, which are not acknowledged by the consumer.") - agmt_set_parser.add_argument('--flow-control-pause', help="The time in milliseconds to pause after reaching the number of entries and updates set in \"--flow-control-window\"") -@@ -1438,8 +1443,7 @@ def create_parser(subparsers): - winsync_agmt_add_parser.add_argument('--subtree-pair', help="Set the subtree pair: :") - winsync_agmt_add_parser.add_argument('--conn-timeout', help="The timeout used for replicaton connections") - winsync_agmt_add_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after " -- "a consumer sends back a busy response before making another " -- "attempt to acquire access.") -+ "a consumer sends back a busy response before making another attempt to acquire access.") - winsync_agmt_add_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.") - winsync_agmt_add_parser.add_argument('--init', action='store_true', default=False, help="Initialize the agreement after creating it.") - -@@ -1468,8 +1472,7 @@ def create_parser(subparsers): - winsync_agmt_set_parser.add_argument('--subtree-pair', help="Set the subtree pair: :") - winsync_agmt_set_parser.add_argument('--conn-timeout', help="The timeout used for replicaton connections") - winsync_agmt_set_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after " -- "a consumer sends back a busy response before making another " -- "attempt to acquire access.") -+ "a consumer sends back a busy response before making another attempt to acquire access.") - winsync_agmt_set_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.") - - # Get -diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py -index e3fc7fe1f..f8adb3ce2 100644 ---- a/src/lib389/lib389/replica.py -+++ b/src/lib389/lib389/replica.py -@@ -1779,15 +1779,18 @@ class BootstrapReplicationManager(DSLdapObject): - :type instance: lib389.DirSrv - :param dn: The dn to create - :type dn: str -+ :param rdn_attr: The attribute to use for the RDN -+ :type rdn_attr: str - """ -- def __init__(self, instance, dn='cn=replication manager,cn=config'): -+ def __init__(self, instance, dn='cn=replication manager,cn=config', rdn_attr='cn'): - super(BootstrapReplicationManager, self).__init__(instance, dn) -- self._rdn_attribute = 'cn' -+ self._rdn_attribute = rdn_attr - self._must_attributes = ['cn', 'userPassword'] - self._create_objectclasses = [ - 'top', -- 'netscapeServer', -- 'nsAccount' -+ 'inetUser', # for uid -+ 'netscapeServer', # for cn -+ 'nsAccount', # for authentication attributes - ] - if ds_is_older('1.4.0'): - self._create_objectclasses.remove('nsAccount') --- -2.26.2 - diff --git a/SOURCES/0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch b/SOURCES/0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch new file mode 100644 index 0000000..17de2c9 --- /dev/null +++ b/SOURCES/0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch @@ -0,0 +1,127 @@ +From 2a2773d4bf8553ba64b396d567fe05506b22c94c Mon Sep 17 00:00:00 2001 +From: progier389 <72748589+progier389@users.noreply.github.com> +Date: Tue, 24 Nov 2020 19:22:49 +0100 +Subject: [PATCH] Issue 4449 - dsconf replication monitor fails to retrieve + database RUV - consumer (Unavailable) (#4451) + +Bug Description: + +"dsconf replication monitor" fails to retrieve database RUV entry from consumer and this +appears into the Cockpit web UI too. +The problem is that the bind credentials are not rightly propagated when trying to get +the consumers agreement status. Then supplier credntials are used instead and RUV +is searched anonymously because there is no bind dn in ldapi case. + +Fix Description: + +- Propagates the bind credentials when computing agreement status +- Add a credential cache because now a replica password could get asked several times: + when discovering the topology and + when getting the agreement maxcsn +- No testcase in 1.4.3 branch as the file modfied in master does not exists + +- Add a comment about nonlocal keyword + +Relates: #4449 + +Reviewers: + firstyear + droideck + mreynolds + +Issue 4449: Add a comment about nonlocal keyword + +(cherry picked from commit 73ee04fa12cd1de3a5e47c109e79e31c1aaaa2ab) +--- + src/lib389/lib389/cli_conf/replication.py | 13 +++++++++++-- + src/lib389/lib389/replica.py | 16 ++++++++++++---- + 2 files changed, 23 insertions(+), 6 deletions(-) + +diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py +index 9dbaa320a..248972cba 100644 +--- a/src/lib389/lib389/cli_conf/replication.py ++++ b/src/lib389/lib389/cli_conf/replication.py +@@ -369,9 +369,16 @@ def set_repl_config(inst, basedn, log, args): + + def get_repl_monitor_info(inst, basedn, log, args): + connection_data = dsrc_to_repl_monitor(DSRC_HOME, log) ++ credentials_cache = {} + + # Additional details for the connections to the topology + def get_credentials(host, port): ++ # credentials_cache is nonlocal to refer to the instance ++ # from enclosing function (get_repl_monitor_info)` ++ nonlocal credentials_cache ++ key = f'{host}:{port}' ++ if key in credentials_cache: ++ return credentials_cache[key] + found = False + if args.connections: + connections = args.connections +@@ -406,8 +413,10 @@ def get_repl_monitor_info(inst, basedn, log, args): + binddn = input(f'\nEnter a bind DN for {host}:{port}: ').rstrip() + bindpw = getpass(f"Enter a password for {binddn} on {host}:{port}: ").rstrip() + +- return {"binddn": binddn, +- "bindpw": bindpw} ++ credentials = {"binddn": binddn, ++ "bindpw": bindpw} ++ credentials_cache[key] = credentials ++ return credentials + + repl_monitor = ReplicationMonitor(inst) + report_dict = repl_monitor.generate_report(get_credentials, args.json) +diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py +index c2ad2104d..3d89e61fb 100644 +--- a/src/lib389/lib389/replica.py ++++ b/src/lib389/lib389/replica.py +@@ -2487,9 +2487,10 @@ class ReplicationMonitor(object): + else: + self._log = logging.getLogger(__name__) + +- def _get_replica_status(self, instance, report_data, use_json): ++ def _get_replica_status(self, instance, report_data, use_json, get_credentials=None): + """Load all of the status data to report + and add new hostname:port pairs for future processing ++ :type get_credentials: function + """ + + replicas_status = [] +@@ -2503,6 +2504,13 @@ class ReplicationMonitor(object): + for agmt in agmts.list(): + host = agmt.get_attr_val_utf8_l("nsds5replicahost") + port = agmt.get_attr_val_utf8_l("nsds5replicaport") ++ if get_credentials is not None: ++ credentials = get_credentials(host, port) ++ binddn = credentials["binddn"] ++ bindpw = credentials["bindpw"] ++ else: ++ binddn = instance.binddn ++ bindpw = instance.bindpw + protocol = agmt.get_attr_val_utf8_l('nsds5replicatransportinfo') + # Supply protocol here because we need it only for connection + # and agreement status is already preformatted for the user output +@@ -2510,9 +2518,9 @@ class ReplicationMonitor(object): + if consumer not in report_data: + report_data[f"{consumer}:{protocol}"] = None + if use_json: +- agmts_status.append(json.loads(agmt.status(use_json=True))) ++ agmts_status.append(json.loads(agmt.status(use_json=True, binddn=binddn, bindpw=bindpw))) + else: +- agmts_status.append(agmt.status()) ++ agmts_status.append(agmt.status(binddn=binddn, bindpw=bindpw)) + replicas_status.append({"replica_id": replica_id, + "replica_root": replica_root, + "replica_status": "Available", +@@ -2535,7 +2543,7 @@ class ReplicationMonitor(object): + initial_inst_key = f"{self._instance.config.get_attr_val_utf8_l('nsslapd-localhost')}:{self._instance.config.get_attr_val_utf8_l('nsslapd-port')}" + # Do this on an initial instance to get the agreements to other instances + try: +- report_data[initial_inst_key] = self._get_replica_status(self._instance, report_data, use_json) ++ report_data[initial_inst_key] = self._get_replica_status(self._instance, report_data, use_json, get_credentials) + except ldap.LDAPError as e: + self._log.debug(f"Connection to consumer ({supplier_hostname}:{supplier_port}) failed, error: {e}") + report_data[initial_inst_key] = [{"replica_status": f"Unavailable - {e.args[0]['desc']}"}] +-- +2.26.2 + diff --git a/SOURCES/0010-Issue-50931-RFE-AD-filter-rewriter-for-ObjectCategor.patch b/SOURCES/0010-Issue-50931-RFE-AD-filter-rewriter-for-ObjectCategor.patch deleted file mode 100644 index 966627a..0000000 --- a/SOURCES/0010-Issue-50931-RFE-AD-filter-rewriter-for-ObjectCategor.patch +++ /dev/null @@ -1,34 +0,0 @@ -From ec85e986ec5710682de883f0f40f539b2f9945fa Mon Sep 17 00:00:00 2001 -From: Viktor Ashirov -Date: Wed, 27 May 2020 15:22:18 +0200 -Subject: [PATCH 10/12] Issue 50931 - RFE AD filter rewriter for ObjectCategory - -Bug Description: -ASAN build fails on RHEL due to linking issues - -Fix Description: -Add missing libslapd.la for librewriters.la - -Relates: https://pagure.io/389-ds-base/issue/50931 - -Reviewed by: tbordaz (Thanks!) ---- - Makefile.am | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/Makefile.am b/Makefile.am -index 2309f3010..0e5f04f91 100644 ---- a/Makefile.am -+++ b/Makefile.am -@@ -1159,7 +1159,7 @@ librewriters_la_SOURCES = \ - - librewriters_la_LDFLAGS = $(AM_LDFLAGS) - librewriters_la_CPPFLAGS = $(AM_CPPFLAGS) $(REWRITERS_INCLUDES) $(DSPLUGIN_CPPFLAGS) --librewriters_la_LIBADD = $(NSS_LINK) $(NSPR_LINK) -+librewriters_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK) - - #------------------------ - # libsvrcore --- -2.26.2 - diff --git a/SOURCES/0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch b/SOURCES/0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch new file mode 100644 index 0000000..70974ce --- /dev/null +++ b/SOURCES/0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch @@ -0,0 +1,63 @@ +From e540effa692976c2eef766f1f735702ba5dc0950 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Mon, 30 Nov 2020 09:03:33 +0100 +Subject: [PATCH] Issue 4243 - Fix test: SyncRepl plugin provides a wrong + cookie (#4467) + +Bug description: + This test case was incorrect. + During a refreshPersistent search, a cookie is sent + with the intermediate message that indicates the end of the refresh phase. + Then a second cookie is sent on the updated entry (group10) + I believed this test was successful some time ago but neither python-ldap + nor sync_repl changed (intermediate sent in post refresh). + So the testcase was never successful :( + +Fix description: + The fix is just to take into account the two expected cookies + +relates: https://github.com/389ds/389-ds-base/issues/4243 + +Reviewed by: Mark Reynolds + +Platforms tested: F31 +--- + .../tests/suites/syncrepl_plugin/basic_test.py | 12 +++++++----- + 1 file changed, 7 insertions(+), 5 deletions(-) + +diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py +index 79ec374bc..7b35537d5 100644 +--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py ++++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py +@@ -589,7 +589,7 @@ def test_sync_repl_cookie_with_failure(topology, request): + sync_repl.start() + time.sleep(5) + +- # Add a test group just to check that sync_repl receives only one update ++ # Add a test group just to check that sync_repl receives that SyncControlInfo cookie + group.append(groups.create(properties={'cn': 'group%d' % 10})) + + # create users, that automember/memberof will generate nested updates +@@ -610,13 +610,15 @@ def test_sync_repl_cookie_with_failure(topology, request): + time.sleep(10) + cookies = sync_repl.get_result() + +- # checking that the cookie list contains only one entry +- assert len(cookies) == 1 +- prev = 0 ++ # checking that the cookie list contains only two entries ++ # the one from the SyncInfo/RefreshDelete that indicates the end of the refresh ++ # the the one from SyncStateControl related to the only updated entry (group10) ++ assert len(cookies) == 2 ++ prev = -1 + for cookie in cookies: + log.info('Check cookie %s' % cookie) + +- assert int(cookie) > 0 ++ assert int(cookie) >= 0 + assert int(cookie) < 1000 + assert int(cookie) > prev + prev = int(cookie) +-- +2.26.2 + diff --git a/SOURCES/0011-Issue-50746-Add-option-to-healthcheck-to-list-all-th.patch b/SOURCES/0011-Issue-50746-Add-option-to-healthcheck-to-list-all-th.patch deleted file mode 100644 index c63a63c..0000000 --- a/SOURCES/0011-Issue-50746-Add-option-to-healthcheck-to-list-all-th.patch +++ /dev/null @@ -1,1218 +0,0 @@ -From 21ed5224d63e3118a39ddd5ea438367532541a8f Mon Sep 17 00:00:00 2001 -From: Matus Honek -Date: Mon, 2 Dec 2019 14:53:31 +0100 -Subject: [PATCH 11/12] Issue 50746 - Add option to healthcheck to list all the - lint reports - -Bug Description: -Healthcheck lacks a way to find out what checks are available. - -Fix Description: -Add dsctl healthcheck options to list available checks, known error -codes, and ability to run cehcks selectively. The checks are rather -hierarchically structured and in some cases matchable by patterns (by -use of asterisk). - -Fixes https://pagure.io/389-ds-base/issue/50746 - -Author: Matus Honek - -Review by: Mark, William, Simon (thanks for the patience!) - -(cherry picked from commit 4a55322c7bdb0b9ff57428ad0dc2e4d943572a69) ---- - src/lib389/cli/dsctl | 1 + - src/lib389/lib389/_mapped_object.py | 34 +--- - src/lib389/lib389/_mapped_object_lint.py | 157 ++++++++++++++++++ - src/lib389/lib389/backend.py | 13 +- - src/lib389/lib389/cli_ctl/health.py | 116 +++++++++---- - src/lib389/lib389/config.py | 13 +- - src/lib389/lib389/dseldif.py | 29 +--- - src/lib389/lib389/encrypted_attributes.py | 1 - - src/lib389/lib389/index.py | 3 - - src/lib389/lib389/lint.py | 125 ++++++++------ - src/lib389/lib389/monitor.py | 5 +- - src/lib389/lib389/nss_ssl.py | 23 ++- - src/lib389/lib389/plugins.py | 5 +- - src/lib389/lib389/replica.py | 10 +- - .../lib389/tests/mapped_object_lint_test.py | 78 +++++++++ - 15 files changed, 448 insertions(+), 165 deletions(-) - create mode 100644 src/lib389/lib389/_mapped_object_lint.py - create mode 100644 src/lib389/lib389/tests/mapped_object_lint_test.py - -diff --git a/src/lib389/cli/dsctl b/src/lib389/cli/dsctl -index fd9bd87c1..9deda7039 100755 ---- a/src/lib389/cli/dsctl -+++ b/src/lib389/cli/dsctl -@@ -64,6 +64,7 @@ cli_dbgen.create_parser(subparsers) - - argcomplete.autocomplete(parser) - -+ - # handle a control-c gracefully - def signal_handler(signal, frame): - print('\n\nExiting...') -diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py -index ce0ebfeb8..c60837601 100644 ---- a/src/lib389/lib389/_mapped_object.py -+++ b/src/lib389/lib389/_mapped_object.py -@@ -15,6 +15,7 @@ import json - from functools import partial - from lib389._entry import Entry - from lib389._constants import DIRSRV_STATE_ONLINE -+from lib389._mapped_object_lint import DSLint, DSLints - from lib389.utils import ( - ensure_bytes, ensure_str, ensure_int, ensure_list_bytes, ensure_list_str, - ensure_list_int, display_log_value, display_log_data -@@ -82,7 +83,7 @@ class DSLogging(object): - self._log.setLevel(logging.INFO) - - --class DSLdapObject(DSLogging): -+class DSLdapObject(DSLogging, DSLint): - """A single instance of DSLdapObjects - - :param instance: An instance -@@ -107,7 +108,6 @@ class DSLdapObject(DSLogging): - self._must_attributes = None - # attributes, we don't want to compare - self._compare_exclude = ['entryid', 'modifytimestamp', 'nsuniqueid'] -- self._lint_functions = None - self._server_controls = None - self._client_controls = None - self._object_filter = '(objectClass=*)' -@@ -985,38 +985,10 @@ class DSLdapObject(DSLogging): - """ - return self._create(rdn, properties, basedn, ensure=True) - -- def lint(self): -- """Override this to create a linter for a type. This means that we can detect -- and report common administrative errors in the server from our cli and -- rest tools. -- -- The structure of a result is:: -- -- { -- dsle: ''. dsle == ds lint error. Will be a code unique to -- this module for the error, IE DSBLE0001. -- severity: '[HIGH:MEDIUM:LOW]'. severity of the error. -- items: '(dn,dn,dn)'. List of affected DNs or names. -- detail: 'msg ...'. An explination of the error. -- fix: 'msg ...'. Steps to resolve the error. -- } -- -- :returns: An array of these dicts, on None if there are no errors. -- """ -- -- if not self._lint_functions: -- return None -- results = [] -- for fn in self._lint_functions: -- for result in fn(): -- if result is not None: -- results.append(result) -- return results -- - - # A challenge of this, is how do we manage indexes? They have two naming attributes.... - --class DSLdapObjects(DSLogging): -+class DSLdapObjects(DSLogging, DSLints): - """The object represents the next idea: "Everything is an instance of something - that exists in this way", i.e. we unite LDAP entries by some - set of parameters with the object. -diff --git a/src/lib389/lib389/_mapped_object_lint.py b/src/lib389/lib389/_mapped_object_lint.py -new file mode 100644 -index 000000000..2d03de98f ---- /dev/null -+++ b/src/lib389/lib389/_mapped_object_lint.py -@@ -0,0 +1,157 @@ -+from abc import ABC, abstractmethod -+from functools import partial -+from inspect import signature -+from typing import ( -+ Callable, -+ List, -+ Optional, -+ Tuple, -+ Union, -+ Type, -+ Generator, -+ Any -+) -+ -+ -+DSLintSpec = Tuple[str, Callable] -+DSLintParsedSpec = Tuple[Optional[str], Optional[str]] -+DSLintClassSpec = Generator[DSLintSpec, None, None] -+DSLintMethodSpec = Union[str, None, Type[List]] -+DSLintResults = Generator[Any, None, None] -+ -+ -+class DSLint(): -+ """In a super-class, create a method with name beginning with `_lint_` -+ which would yield results (as described below). Such a method will -+ then be available to the `lint()` method of the class. -+ -+ `lint_list`: takes a spec and yields available lints, recursively -+ `lint`: takes a spac and runs lints according to it, yielding results if any -+ -+ `spec`: is a colon-separated string, with prefix matching a method name and suffix -+ being passed down to the method. -+ -+ A class inheriting from hereby class shall implement a method named `lint_uid()` which -+ returns a pretty name of the object. This is to be used by a higher level code. -+ -+ Each lint method has to have a name prefix with _lint_. It may accept an optional -+ parameter `spec` in which case: -+ - it has to accept typing.List class as a parameter, in which case it shall yield -+ all possible lint specs for that method -+ - it receives the suffix provided to the `spec` of hereby `lint` method (as mentioned above) -+ -+ This means that we can detect and report common administrative errors -+ in the server from our cli and rest tools. -+ -+ The structure of a result shall be: -+ -+ { -+ dsle: ''. dsle == ds lint error. Will be a code unique to -+ this module for the error, IE DSBLE0001. -+ severity: '[HIGH:MEDIUM:LOW]'. severity of the error. -+ items: '(dn,dn,dn)'. List of affected DNs or names. -+ detail: 'msg ...'. An explination of the error. -+ fix: 'msg ...'. Steps to resolve the error. -+ } -+ """ -+ -+ @classmethod -+ def _dslint_fname(cls, method: Callable) -> Optional[str]: -+ """Return a pretty name for a method.""" -+ if callable(method) and method.__name__.startswith('_lint_'): -+ return method.__name__[len('_lint_'):] -+ else: -+ return None -+ -+ @staticmethod -+ def _dslint_parse_spec(spec: Optional[str]) -> DSLintParsedSpec: -+ """Split `spec` to prefix and suffix.""" -+ wanted, *rest = spec.split(':', 1) if spec else (None, None) -+ return (wanted if wanted not in [None, '*'] else None, -+ rest[0] if rest else None) -+ -+ @classmethod -+ def _dslint_make_spec(cls, method: Callable, spec: Optional[str] = None) -> str: -+ """Build a new spec from prefix (`method` name) and suffix (`spec`).""" -+ fname = cls._dslint_fname(method) -+ return f'{fname}:{spec}' if spec else fname -+ -+ def lint_list(self, spec: Optional[str] = None) -> DSLintClassSpec: -+ """Yield specs the object provides. -+ -+ This yields from each lint method yielding all specs it can provide. -+ """ -+ -+ assert hasattr(self, 'lint_uid') -+ -+ # Find _lint_ methods -+ # NOTE: There is a caveat: don't you dare try to getattr on a @property, or -+ # you get it executed. That's why the following line's complexity. -+ fs = [getattr(self, f) for f in dir(self) -+ if f.startswith('_lint_') and self._dslint_fname(getattr(self, f))] -+ -+ # Filter acording to the `spec` -+ wanted, rest = self._dslint_parse_spec(spec) -+ if wanted: -+ try: -+ fs = [next(filter(lambda f: self._dslint_fname(f) == wanted, fs))] -+ except StopIteration: -+ raise ValueError('there is no such lint function') -+ -+ # Yield known specs -+ for f in fs: -+ fspec_t = signature(f).parameters.get('spec', None) -+ if fspec_t: -+ assert fspec_t.annotation == DSLintMethodSpec -+ for fspec in [rest] if rest else f(spec=List): -+ yield self._dslint_make_spec(f, fspec), partial(f, spec=fspec) -+ else: -+ yield self._dslint_make_spec(f, rest), f -+ -+ def lint(self, spec: DSLintMethodSpec = None) -> DSLintResults: -+ """Lint the object according to the `spec`.""" -+ -+ if spec == List: -+ yield from self.lint_list() -+ else: -+ for fn, f in self.lint_list(spec): -+ yield from f() -+ -+ -+class DSLints(): -+ """This is a meta class to provide lint functionality to classes that provide -+ method `list` which returns list of objects that inherit from DSLint. -+ -+ Calling `lint` or `lint_list` method yields from respective object's methods. -+ -+ The `spec` is a colon-separated string. Its prefix matches the respective object's -+ `lint_uid` (or all when asterisk); the suffix is passed down to the respective -+ object's method. -+ """ -+ -+ def lint_list(self, spec: Optional[str] = None) -> DSLintClassSpec: -+ """Yield specs the objects returned by `list` method provide.""" -+ -+ assert hasattr(self, 'list') -+ -+ # Filter acording to the `spec` -+ wanted, rest_spec = DSLint._dslint_parse_spec(spec) -+ if wanted in [None, '*']: -+ clss = self.list() -+ else: -+ clss = (cls for cls in self.list() if cls.lint_uid() == wanted) -+ -+ # Yield known specs -+ for cls in clss: -+ for fn, f in cls.lint_list(spec=rest_spec): -+ yield (f'{cls.lint_uid()}:{fn}', -+ partial(f, rest_spec) if rest_spec else f) -+ -+ def lint(self, spec: DSLintMethodSpec = None) -> DSLintResults: -+ """Lint the objects returned by `list` method according to the `spec`.""" -+ -+ if spec == List: -+ yield from self.lint_list() -+ else: -+ for obj in self.list(): -+ yield from obj.lint() -diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py -index 4f752f414..8863ad1a8 100644 ---- a/src/lib389/lib389/backend.py -+++ b/src/lib389/lib389/backend.py -@@ -393,6 +393,10 @@ class BackendLegacy(object): - replace = [(ldap.MOD_REPLACE, 'nsslapd-require-index', 'on')] - self.modify_s(dn, replace) - -+ @classmethod -+ def lint_uid(cls): -+ return 'backends' -+ - - class Backend(DSLdapObject): - """Backend DSLdapObject with: -@@ -413,10 +417,12 @@ class Backend(DSLdapObject): - self._must_attributes = ['nsslapd-suffix', 'cn'] - self._create_objectclasses = ['top', 'extensibleObject', BACKEND_OBJECTCLASS_VALUE] - self._protected = False -- self._lint_functions = [self._lint_mappingtree, self._lint_search, self._lint_virt_attrs] - # Check if a mapping tree for this suffix exists. - self._mts = MappingTrees(self._instance) - -+ def lint_uid(self): -+ return self.get_attr_val_utf8_l('cn').lower() -+ - def _lint_virt_attrs(self): - """Check if any virtual attribute are incorrectly indexed""" - indexes = self.get_indexes() -@@ -497,7 +503,6 @@ class Backend(DSLdapObject): - result = DSBLE0001 - result['items'] = [bename, ] - yield result -- return None - - def create_sample_entries(self, version): - """Creates sample entries under nsslapd-suffix value -@@ -848,6 +853,10 @@ class Backends(DSLdapObjects): - self._childobject = Backend - self._basedn = DN_LDBM - -+ @classmethod -+ def lint_uid(cls): -+ return 'backends' -+ - def import_ldif(self, be_name, ldifs, chunk_size=None, encrypted=False, gen_uniq_id=None, only_core=False, - include_suffixes=None, exclude_suffixes=None): - """Do an import of the suffix""" -diff --git a/src/lib389/lib389/cli_ctl/health.py b/src/lib389/lib389/cli_ctl/health.py -index 3d15ad85e..6333a753a 100644 ---- a/src/lib389/lib389/cli_ctl/health.py -+++ b/src/lib389/lib389/cli_ctl/health.py -@@ -7,6 +7,9 @@ - # --- END COPYRIGHT BLOCK --- - - import json -+import re -+from lib389._mapped_object import DSLdapObjects -+from lib389._mapped_object_lint import DSLint - from lib389.cli_base import connect_instance, disconnect_instance - from lib389.cli_base.dsrc import dsrc_to_ldap, dsrc_arg_concat - from lib389.backend import Backends -@@ -15,17 +18,17 @@ from lib389.monitor import MonitorDiskSpace - from lib389.replica import Replica, Changelog5 - from lib389.nss_ssl import NssSsl - from lib389.dseldif import FSChecks, DSEldif -+from lib389 import lint - from lib389 import plugins - from lib389._constants import DSRC_HOME -+from functools import partial -+from typing import Iterable - --# These get all instances, then check them all. --CHECK_MANY_OBJECTS = [ -- Backends, --] - - # These get single instances and check them. - CHECK_OBJECTS = [ - Config, -+ Backends, - Encryption, - FSChecks, - plugins.ReferentialIntegrityPlugin, -@@ -52,44 +55,51 @@ def _format_check_output(log, result, idx): - log.info(result['fix']) - - --def health_check_run(inst, log, args): -- """Connect to the local server using LDAPI, and perform various health checks -- """ -+def _list_targets(inst): -+ for c in CHECK_OBJECTS: -+ o = c(inst) -+ yield o.lint_uid(), o -+ -+ -+def _list_errors(log): -+ for r in map(partial(getattr, lint), -+ filter(partial(re.match, r'^DS'), -+ dir(lint))): -+ log.info(f"{r['dsle']} :: {r['description']}") - -- # update the args for connect_instance() -- args.basedn = None -- args.binddn = None -- args.bindpw = None -- args.starttls = None -- args.pwdfile = None -- args.prompt = False -- dsrc_inst = dsrc_to_ldap(DSRC_HOME, args.instance, log.getChild('dsrc')) -- dsrc_inst = dsrc_arg_concat(args, dsrc_inst) -- try: -- inst = connect_instance(dsrc_inst=dsrc_inst, verbose=args.verbose, args=args) -- except Exception as e: -- raise ValueError('Failed to connect to Directory Server instance: ' + str(e)) - -+def _list_checks(inst, specs: Iterable[str]): -+ o_uids = dict(_list_targets(inst)) -+ for s in specs: -+ wanted, rest = DSLint._dslint_parse_spec(s) -+ if wanted == '*': -+ raise ValueError('Unexpected spec selector asterisk') -+ -+ if wanted in o_uids: -+ for l in o_uids[wanted].lint_list(rest): -+ yield o_uids[wanted], l -+ else: -+ raise ValueError('No such object specifier') -+ -+ -+def _print_checks(inst, specs: Iterable[str]) -> None: -+ for o, s in _list_checks(inst, specs): -+ print(f'{o.lint_uid()}:{s[0]}') -+ -+ -+def _run(inst, log, args, checks): - if not args.json: - log.info("Beginning lint report, this could take a while ...") -+ - report = [] -- for lo in CHECK_MANY_OBJECTS: -+ for o, s in checks: - if not args.json: -- log.info("Checking %s ..." % lo.__name__) -- lo_inst = lo(inst) -- for clo in lo_inst.list(): -- result = clo.lint() -- if result is not None: -- report += result -- for lo in CHECK_OBJECTS: -- if not args.json: -- log.info("Checking %s ..." % lo.__name__) -- lo_inst = lo(inst) -- result = lo_inst.lint() -- if result is not None: -- report += result -+ log.info(f"Checking {o.lint_uid()}:{s[0]} ...") -+ report += o.lint(s[0]) or [] -+ - if not args.json: - log.info("Healthcheck complete.") -+ - count = len(report) - if count == 0: - if not args.json: -@@ -110,6 +120,37 @@ def health_check_run(inst, log, args): - else: - log.info(json.dumps(report, indent=4)) - -+ -+def health_check_run(inst, log, args): -+ """Connect to the local server using LDAPI, and perform various health checks -+ """ -+ -+ if args.list_errors: -+ _list_errors(log) -+ return -+ -+ # update the args for connect_instance() -+ args.basedn = None -+ args.binddn = None -+ args.bindpw = None -+ args.starttls = None -+ args.pwdfile = None -+ args.prompt = False -+ dsrc_inst = dsrc_to_ldap(DSRC_HOME, args.instance, log.getChild('dsrc')) -+ dsrc_inst = dsrc_arg_concat(args, dsrc_inst) -+ try: -+ inst = connect_instance(dsrc_inst=dsrc_inst, verbose=args.verbose, args=args) -+ except Exception as e: -+ raise ValueError('Failed to connect to Directory Server instance: ' + str(e)) -+ -+ checks = args.check or dict(_list_targets(inst)).keys() -+ -+ if args.list_checks or args.dry_run: -+ _print_checks(inst, checks) -+ return -+ -+ _run(inst, log, args, _list_checks(inst, checks)) -+ - disconnect_instance(inst) - - -@@ -120,4 +161,9 @@ def create_parser(subparsers): - "remote Directory Server as this tool needs access to local resources, " - "otherwise the report may be inaccurate.") - run_healthcheck_parser.set_defaults(func=health_check_run) -- -+ run_healthcheck_parser.add_argument('--list-checks', action='store_true', help='List of known checks') -+ run_healthcheck_parser.add_argument('--list-errors', action='store_true', help='List of known error codes') -+ run_healthcheck_parser.add_argument('--dry-run', action='store_true', help='Do not execute the actual check, only list what would be done') -+ run_healthcheck_parser.add_argument('--check', nargs='+', default=None, -+ help='Areas to check. These can be obtained by --list-checks. Every element on the left of the colon (:)' -+ ' may be replaced by an asterisk if multiple options on the right are available.') -diff --git a/src/lib389/lib389/config.py b/src/lib389/lib389/config.py -index a29d0244c..aa4c92beb 100644 ---- a/src/lib389/lib389/config.py -+++ b/src/lib389/lib389/config.py -@@ -54,7 +54,6 @@ class Config(DSLdapObject): - ] - self._compare_exclude = self._compare_exclude + config_compare_exclude - self._rdn_attribute = 'cn' -- self._lint_functions = [self._lint_hr_timestamp, self._lint_passwordscheme] - - @property - def dn(self): -@@ -197,6 +196,10 @@ class Config(DSLdapObject): - fields = 'nsslapd-security nsslapd-ssl-check-hostname'.split() - return self._instance.getEntry(DN_CONFIG, attrlist=fields) - -+ @classmethod -+ def lint_uid(cls): -+ return 'config' -+ - def _lint_hr_timestamp(self): - hr_timestamp = self.get_attr_val('nsslapd-logging-hr-timestamps-enabled') - if ensure_bytes('on') != hr_timestamp: -@@ -242,20 +245,22 @@ class Encryption(DSLdapObject): - self._rdn_attribute = 'cn' - self._must_attributes = ['cn'] - self._protected = True -- self._lint_functions = [self._lint_check_tls_version] - - def create(self, rdn=None, properties={'cn': 'encryption', 'nsSSLClientAuth': 'allowed'}): - if rdn is not None: - self._log.debug("dn on cn=encryption is not None. This is a mistake.") - super(Encryption, self).create(properties=properties) - -+ @classmethod -+ def lint_uid(cls): -+ return 'encryption' -+ - def _lint_check_tls_version(self): - tls_min = self.get_attr_val('sslVersionMin') - if tls_min is not None and tls_min < ensure_bytes('TLS1.1'): - report = copy.deepcopy(DSELE0001) - report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid) - yield report -- yield None - - @property - def ciphers(self): -@@ -487,7 +492,6 @@ class LDBMConfig(DSLdapObject): - self._dn = DN_CONFIG_LDBM - # config_compare_exclude = [] - self._rdn_attribute = 'cn' -- self._lint_functions = [] - self._protected = True - - -@@ -506,5 +510,4 @@ class BDB_LDBMConfig(DSLdapObject): - self._dn = DN_CONFIG_LDBM_BDB - self._config_compare_exclude = [] - self._rdn_attribute = 'cn' -- self._lint_functions = [] - self._protected = True -diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py -index 5378e6ee9..96c9af9d1 100644 ---- a/src/lib389/lib389/dseldif.py -+++ b/src/lib389/lib389/dseldif.py -@@ -16,6 +16,7 @@ from datetime import timedelta - from stat import ST_MODE - # from lib389.utils import print_nice_time - from lib389.paths import Paths -+from lib389._mapped_object_lint import DSLint - from lib389.lint import ( - DSPERMLE0001, - DSPERMLE0002, -@@ -25,7 +26,7 @@ from lib389.lint import ( - ) - - --class DSEldif(object): -+class DSEldif(DSLint): - """A class for working with dse.ldif file - - :param instance: An instance -@@ -58,15 +59,10 @@ class DSEldif(object): - processed_line = line - else: - processed_line = processed_line[:-1] + line[1:] -- self._lint_functions = [self._lint_nsstate] - -- def lint(self): -- results = [] -- for fn in self._lint_functions: -- for result in fn(): -- if result is not None: -- results.append(result) -- return results -+ @classmethod -+ def lint_uid(cls): -+ return 'dseldif' - - def _lint_nsstate(self): - suffixes = self.readNsState() -@@ -320,7 +316,7 @@ class DSEldif(object): - return states - - --class FSChecks(object): -+class FSChecks(DSLint): - """This is for the healthcheck feature, check commonly used system config files the - server uses. This is here for lack of a better place to add this class. - """ -@@ -344,17 +340,10 @@ class FSChecks(object): - 'report': DSPERMLE0002 - }, - ] -- self._lint_functions = [self._lint_file_perms] - -- def lint(self): -- """Run a lint/healthcheck for this class -- """ -- results = [] -- for fn in self._lint_functions: -- for result in fn(): -- if result is not None: -- results.append(result) -- return results -+ @classmethod -+ def lint_uid(cls): -+ return 'fschecks' - - def _lint_file_perms(self): - """Test file permissions are safe -diff --git a/src/lib389/lib389/encrypted_attributes.py b/src/lib389/lib389/encrypted_attributes.py -index 9afd2e66b..2fa26cef9 100644 ---- a/src/lib389/lib389/encrypted_attributes.py -+++ b/src/lib389/lib389/encrypted_attributes.py -@@ -27,7 +27,6 @@ class EncryptedAttr(DSLdapObject): - self._must_attributes = ['cn', 'nsEncryptionAlgorithm'] - self._create_objectclasses = ['top', 'nsAttributeEncryption'] - self._protected = False -- self._lint_functions = [] - - - class EncryptedAttrs(DSLdapObjects): -diff --git a/src/lib389/lib389/index.py b/src/lib389/lib389/index.py -index 6932883b7..a3d019d27 100644 ---- a/src/lib389/lib389/index.py -+++ b/src/lib389/lib389/index.py -@@ -41,7 +41,6 @@ class Index(DSLdapObject): - self._must_attributes = ['cn', 'nsSystemIndex', 'nsIndexType'] - self._create_objectclasses = ['top', 'nsIndex'] - self._protected = False -- self._lint_functions = [] - - - class Indexes(DSLdapObjects): -@@ -77,7 +76,6 @@ class VLVSearch(DSLdapObject): - self._must_attributes = ['cn', 'vlvbase', 'vlvscope', 'vlvfilter'] - self._create_objectclasses = ['top', 'vlvSearch'] - self._protected = False -- self._lint_functions = [] - self._be_name = None - - def get_sorts(self): -@@ -163,7 +161,6 @@ class VLVIndex(DSLdapObject): - self._must_attributes = ['cn', 'vlvsort'] - self._create_objectclasses = ['top', 'vlvIndex'] - self._protected = False -- self._lint_functions = [] - - - class VLVIndexes(DSLdapObjects): -diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py -index b5a305bc3..a103feec7 100644 ---- a/src/lib389/lib389/lint.py -+++ b/src/lib389/lib389/lint.py -@@ -14,8 +14,9 @@ - DSBLE0001 = { - 'dsle': 'DSBLE0001', - 'severity': 'MEDIUM', -- 'items' : [], -- 'detail' : """This backend may be missing the correct mapping tree references. Mapping Trees allow -+ 'description': 'Possibly incorrect mapping tree.', -+ 'items': [], -+ 'detail': """This backend may be missing the correct mapping tree references. Mapping Trees allow - the directory server to determine which backend an operation is routed to in the - abscence of other information. This is extremely important for correct functioning - of LDAP ADD for example. -@@ -32,7 +33,7 @@ objectClass: extensibleObject - objectClass: nsMappingTree - - """, -- 'fix' : """Either you need to create the mapping tree, or you need to repair the related -+ 'fix': """Either you need to create the mapping tree, or you need to repair the related - mapping tree. You will need to do this by hand by editing cn=config, or stopping - the instance and editing dse.ldif. - """ -@@ -41,25 +42,28 @@ the instance and editing dse.ldif. - DSBLE0002 = { - 'dsle': 'DSBLE0002', - 'severity': 'HIGH', -- 'items' : [], -- 'detail' : """Unable to query the backend. LDAP error (ERROR)""", -- 'fix' : """Check the server's error and access logs for more information.""" -+ 'description': 'Unable to query backend.', -+ 'items': [], -+ 'detail': """Unable to query the backend. LDAP error (ERROR)""", -+ 'fix': """Check the server's error and access logs for more information.""" - } - - DSBLE0003 = { - 'dsle': 'DSBLE0003', - 'severity': 'LOW', -- 'items' : [], -- 'detail' : """The backend database has not been initialized yet""", -- 'fix' : """You need to import an LDIF file, or create the suffix entry, in order to initialize the database.""" -+ 'description': 'Uninitialized backend database.', -+ 'items': [], -+ 'detail': """The backend database has not been initialized yet""", -+ 'fix': """You need to import an LDIF file, or create the suffix entry, in order to initialize the database.""" - } - - # Config checks - DSCLE0001 = { -- 'dsle' : 'DSCLE0001', -- 'severity' : 'LOW', -+ 'dsle': 'DSCLE0001', -+ 'severity': 'LOW', -+ 'description': 'Different log timestamp format.', - 'items': ['cn=config', ], -- 'detail' : """nsslapd-logging-hr-timestamps-enabled changes the log format in directory server from -+ 'detail': """nsslapd-logging-hr-timestamps-enabled changes the log format in directory server from - - [07/Jun/2017:17:15:58 +1000] - -@@ -70,7 +74,7 @@ to - This actually provides a performance improvement. Additionally, this setting will be - removed in a future release. - """, -- 'fix' : """Set nsslapd-logging-hr-timestamps-enabled to on. -+ 'fix': """Set nsslapd-logging-hr-timestamps-enabled to on. - You can use 'dsconf' to set this attribute. Here is an example: - - # dsconf slapd-YOUR_INSTANCE config replace nsslapd-logging-hr-timestamps-enabled=on""" -@@ -79,8 +83,9 @@ You can use 'dsconf' to set this attribute. Here is an example: - DSCLE0002 = { - 'dsle': 'DSCLE0002', - 'severity': 'HIGH', -- 'items' : ['cn=config', ], -- 'detail' : """Password storage schemes in Directory Server define how passwords are hashed via a -+ 'description': 'Weak passwordStorageScheme.', -+ 'items': ['cn=config', ], -+ 'detail': """Password storage schemes in Directory Server define how passwords are hashed via a - one-way mathematical function for storage. Knowing the hash it is difficult to gain - the input, but knowing the input you can easily compare the hash. - -@@ -112,14 +117,15 @@ You can also use 'dsconf' to replace these values. Here is an example: - DSELE0001 = { - 'dsle': 'DSELE0001', - 'severity': 'MEDIUM', -- 'items' : ['cn=encryption,cn=config', ], -+ 'description': 'Weak TLS protocol version.', -+ 'items': ['cn=encryption,cn=config', ], - 'detail': """This Directory Server may not be using strong TLS protocol versions. TLS1.0 is known to - have a number of issues with the protocol. Please see: - - https://tools.ietf.org/html/rfc7457 - - It is advised you set this value to the maximum possible.""", -- 'fix' : """There are two options for setting the TLS minimum version allowed. You, -+ 'fix': """There are two options for setting the TLS minimum version allowed. You, - can set "sslVersionMin" in "cn=encryption,cn=config" to a version greater than "TLS1.0" - You can also use 'dsconf' to set this value. Here is an example: - -@@ -137,7 +143,8 @@ minimum version, but doing this affects the entire system: - DSRILE0001 = { - 'dsle': 'DSRILE0001', - 'severity': 'LOW', -- 'items' : ['cn=referential integrity postoperation,cn=plugins,cn=config', ], -+ 'description': 'Referential integrity plugin may be slower.', -+ 'items': ['cn=referential integrity postoperation,cn=plugins,cn=config', ], - 'detail': """The referential integrity plugin has an asynchronous processing mode. - This is controlled by the update-delay flag. When this value is 0, referential - integrity plugin processes these changes inside of the operation that modified -@@ -151,7 +158,7 @@ delays to your server by batching changes rather than smaller updates during syn - - We advise that you set this value to 0, and enable referint on all masters as it provides a more predictable behaviour. - """, -- 'fix' : """Set referint-update-delay to 0. -+ 'fix': """Set referint-update-delay to 0. - - You can use 'dsconf' to set this value. Here is an example: - -@@ -164,12 +171,13 @@ You must restart the Directory Server for this change to take effect.""" - DSRILE0002 = { - 'dsle': 'DSRILE0002', - 'severity': 'HIGH', -- 'items' : ['cn=referential integrity postoperation,cn=plugins,cn=config'], -+ 'description': 'Referential integrity plugin configured with unindexed attribute.', -+ 'items': ['cn=referential integrity postoperation,cn=plugins,cn=config'], - 'detail': """The referential integrity plugin is configured to use an attribute (ATTR) - that does not have an "equality" index in backend (BACKEND). - Failure to have the proper indexing will lead to unindexed searches which - cause high CPU and can significantly slow the server down.""", -- 'fix' : """Check the attributes set in "referint-membership-attr" to make sure they have -+ 'fix': """Check the attributes set in "referint-membership-attr" to make sure they have - an index defined that has at least the equality "eq" index type. You will - need to reindex the database after adding the missing index type. Here is an - example using dsconf: -@@ -182,12 +190,13 @@ example using dsconf: - DSDSLE0001 = { - 'dsle': 'DSDSLE0001', - 'severity': 'HIGH', -- 'items' : ['Server', 'cn=config'], -+ 'description': 'Low disk space.', -+ 'items': ['Server', 'cn=config'], - 'detail': """The disk partition used by the server (PARTITION), either for the database, the - configuration files, or the logs is over 90% full. If the partition becomes - completely filled serious problems can occur with the database or the server's - stability.""", -- 'fix' : """Attempt to free up disk space. Also try removing old rotated logs, or disable any -+ 'fix': """Attempt to free up disk space. Also try removing old rotated logs, or disable any - verbose logging levels that might have been set. You might consider enabling - the "Disk Monitoring" feature in cn=config to help prevent a disorderly shutdown - of the server: -@@ -210,9 +219,10 @@ Please see the Administration guide for more information: - DSREPLLE0001 = { - 'dsle': 'DSREPLLE0001', - 'severity': 'HIGH', -- 'items' : ['Replication', 'Agreement'], -+ 'description': 'Replication agreement not set to be synchronized.', -+ 'items': ['Replication', 'Agreement'], - 'detail': """The replication agreement (AGMT) under "SUFFIX" is not in synchronization.""", -- 'fix' : """You may need to reinitialize this replication agreement. Please check the errors -+ 'fix': """You may need to reinitialize this replication agreement. Please check the errors - log for more information. If you do need to reinitialize the agreement you can do so - using dsconf. Here is an example: - -@@ -223,9 +233,10 @@ using dsconf. Here is an example: - DSREPLLE0002 = { - 'dsle': 'DSREPLLE0002', - 'severity': 'LOW', -- 'items' : ['Replication', 'Conflict Entries'], -+ 'description': 'Replication conflict entries found.', -+ 'items': ['Replication', 'Conflict Entries'], - 'detail': "There were COUNT conflict entries found under the replication suffix \"SUFFIX\".", -- 'fix' : """While conflict entries are expected to occur in an MMR environment, they -+ 'fix': """While conflict entries are expected to occur in an MMR environment, they - should be resolved. In regards to conflict entries there is always the original/counterpart - entry that has a normal DN, and then the conflict version of that entry. Technically both - entries are valid, you as the administrator, needs to decide which entry you want to keep. -@@ -253,38 +264,42 @@ can use the CLI tool "dsconf" to resolve the conflict. Here is an example: - DSREPLLE0003 = { - 'dsle': 'DSREPLLE0003', - 'severity': 'MEDIUM', -- 'items' : ['Replication', 'Agreement'], -+ 'description': 'Unsynchronized replication agreement.', -+ 'items': ['Replication', 'Agreement'], - 'detail': """The replication agreement (AGMT) under "SUFFIX" is not in synchronization. - Status message: MSG""", -- 'fix' : """Replication is not in synchronization but it may recover. Continue to -+ 'fix': """Replication is not in synchronization but it may recover. Continue to - monitor this agreement.""" - } - - DSREPLLE0004 = { - 'dsle': 'DSREPLLE0004', - 'severity': 'MEDIUM', -- 'items' : ['Replication', 'Agreement'], -+ 'description': 'Unable to get replication agreement status.', -+ 'items': ['Replication', 'Agreement'], - 'detail': """Failed to get the agreement status for agreement (AGMT) under "SUFFIX". Error (ERROR).""", -- 'fix' : """None""" -+ 'fix': """None""" - } - - DSREPLLE0005 = { - 'dsle': 'DSREPLLE0005', - 'severity': 'MEDIUM', -- 'items' : ['Replication', 'Agreement'], -+ 'description': 'Replication consumer not reachable.', -+ 'items': ['Replication', 'Agreement'], - 'detail': """The replication agreement (AGMT) under "SUFFIX" is not in synchronization, - because the consumer server is not reachable.""", -- 'fix' : """Check if the consumer is running, and also check the errors log for more information.""" -+ 'fix': """Check if the consumer is running, and also check the errors log for more information.""" - } - - # Replication changelog - DSCLLE0001 = { - 'dsle': 'DSCLLE0001', - 'severity': 'LOW', -- 'items' : ['Replication', 'Changelog'], -+ 'description': 'Changelog trimming not configured.', -+ 'items': ['Replication', 'Changelog'], - 'detail': """The replication changelog does have any kind of trimming configured. This will - lead to the changelog size growing indefinitely.""", -- 'fix' : """Configure changelog trimming, preferably by setting the maximum age of a changelog -+ 'fix': """Configure changelog trimming, preferably by setting the maximum age of a changelog - record. Here is an example: - - # dsconf slapd-YOUR_INSTANCE replication set-changelog --max-age 30d""" -@@ -294,27 +309,30 @@ record. Here is an example: - DSCERTLE0001 = { - 'dsle': 'DSCERTLE0001', - 'severity': 'MEDIUM', -- 'items' : ['Expiring Certificate'], -+ 'description': 'Certificate about to expire.', -+ 'items': ['Expiring Certificate'], - 'detail': """The certificate (CERT) will expire in less than 30 days""", -- 'fix' : """Renew the certificate before it expires to prevent disruptions with TLS connections.""" -+ 'fix': """Renew the certificate before it expires to prevent disruptions with TLS connections.""" - } - - DSCERTLE0002 = { - 'dsle': 'DSCERTLE0002', - 'severity': 'HIGH', -- 'items' : ['Expired Certificate'], -+ 'description': 'Certificate expired.', -+ 'items': ['Expired Certificate'], - 'detail': """The certificate (CERT) has expired""", -- 'fix' : """Renew or remove the certificate.""" -+ 'fix': """Renew or remove the certificate.""" - } - - # Virtual Attrs & COS. Note - ATTR and SUFFIX are replaced by the reporting function - DSVIRTLE0001 = { - 'dsle': 'DSVIRTLE0001', - 'severity': 'HIGH', -- 'items' : ['Virtual Attributes'], -+ 'description': 'Virtual attribute indexed.', -+ 'items': ['Virtual Attributes'], - 'detail': """You should not index virtual attributes, and as this will break searches that - use the attribute in a filter.""", -- 'fix' : """Remove the index for this attribute from the backend configuration. -+ 'fix': """Remove the index for this attribute from the backend configuration. - Here is an example using 'dsconf' to remove an index: - - # dsconf slapd-YOUR_INSTANCE backend index delete --attr ATTR SUFFIX""" -@@ -324,10 +342,11 @@ Here is an example using 'dsconf' to remove an index: - DSPERMLE0001 = { - 'dsle': 'DSPERMLE0001', - 'severity': 'MEDIUM', -- 'items' : ['File Permissions'], -+ 'description': 'Incorrect file permissions.', -+ 'items': ['File Permissions'], - 'detail': """The file "FILE" does not have the expected permissions (PERMS). This - can cause issues with replication and chaining.""", -- 'fix' : """Change the file permissions: -+ 'fix': """Change the file permissions: - - # chmod PERMS FILE""" - } -@@ -336,10 +355,11 @@ can cause issues with replication and chaining.""", - DSPERMLE0002 = { - 'dsle': 'DSPERMLE0002', - 'severity': 'HIGH', -- 'items' : ['File Permissions'], -+ 'description': 'Incorrect security database file permissions.', -+ 'items': ['File Permissions'], - 'detail': """The file "FILE" does not have the expected permissions (PERMS). The - security database pin/password files should only be readable by Directory Server user.""", -- 'fix' : """Change the file permissions: -+ 'fix': """Change the file permissions: - - # chmod PERMS FILE""" - } -@@ -348,11 +368,12 @@ security database pin/password files should only be readable by Directory Server - DSSKEWLE0001 = { - 'dsle': 'DSSKEWLE0001', - 'severity': 'Low', -- 'items' : ['Replication'], -+ 'description': 'Medium time skew.', -+ 'items': ['Replication'], - 'detail': """The time skew is over 6 hours. If this time skew continues to increase - to 24 hours then replication can potentially stop working. Please continue to - monitor the time skew offsets for increasing values.""", -- 'fix' : """Monitor the time skew and avoid making changes to the system time. -+ 'fix': """Monitor the time skew and avoid making changes to the system time. - Also look at https://access.redhat.com/documentation/en-us/red_hat_directory_server/11/html/administration_guide/managing_replication-troubleshooting_replication_related_problems - and find the paragraph "Too much time skew".""" - } -@@ -360,13 +381,14 @@ and find the paragraph "Too much time skew".""" - DSSKEWLE0002 = { - 'dsle': 'DSSKEWLE0002', - 'severity': 'Medium', -- 'items' : ['Replication'], -+ 'description': 'Major time skew.', -+ 'items': ['Replication'], - 'detail': """The time skew is over 12 hours. If this time skew continues to increase - to 24 hours then replication can potentially stop working. Please continue to - monitor the time skew offsets for increasing values. Setting nsslapd-ignore-time-skew - to "on" on each replica will allow replication to continue, but if the time skew - continues to increase other more serious replication problems can occur.""", -- 'fix' : """Monitor the time skew and avoid making changes to the system time. -+ 'fix': """Monitor the time skew and avoid making changes to the system time. - If you get close to 24 hours of time skew replication may stop working. - In that case configure the server to ignore the time skew until the system - times can be fixed/synchronized: -@@ -380,12 +402,13 @@ and find the paragraph "Too much time skew".""" - DSSKEWLE0003 = { - 'dsle': 'DSSKEWLE0003', - 'severity': 'High', -- 'items' : ['Replication'], -+ 'description': 'Extensive time skew.', -+ 'items': ['Replication'], - 'detail': """The time skew is over 24 hours. Setting nsslapd-ignore-time-skew - to "on" on each replica will allow replication to continue, but if the - time skew continues to increase other serious replication problems can - occur.""", -- 'fix' : """Avoid making changes to the system time, and make sure the clocks -+ 'fix': """Avoid making changes to the system time, and make sure the clocks - on all the replicas are correct. If you haven't set the server's - "ignore time skew" setting then do the following on all the replicas - until the time issues have been resolved: -diff --git a/src/lib389/lib389/monitor.py b/src/lib389/lib389/monitor.py -index 73750c3c2..4ac7d7174 100644 ---- a/src/lib389/lib389/monitor.py -+++ b/src/lib389/lib389/monitor.py -@@ -358,7 +358,10 @@ class MonitorDiskSpace(DSLdapObject): - def __init__(self, instance, dn=None): - super(MonitorDiskSpace, self).__init__(instance=instance, dn=dn) - self._dn = "cn=disk space,cn=monitor" -- self._lint_functions = [self._lint_disk_space] -+ -+ @classmethod -+ def lint_uid(cls): -+ return 'monitor-disk-space' - - def _lint_disk_space(self): - partitions = self.get_attr_vals_utf8_l("dsDisk") -diff --git a/src/lib389/lib389/nss_ssl.py b/src/lib389/lib389/nss_ssl.py -index d14e7ce6f..e257424fd 100644 ---- a/src/lib389/lib389/nss_ssl.py -+++ b/src/lib389/lib389/nss_ssl.py -@@ -21,6 +21,7 @@ import subprocess - from datetime import datetime, timedelta - from subprocess import check_output, run, PIPE - from lib389.passwd import password_generate -+from lib389._mapped_object_lint import DSLint - from lib389.lint import DSCERTLE0001, DSCERTLE0002 - from lib389.utils import ensure_str, format_cmd_list - import uuid -@@ -42,7 +43,7 @@ VALID_MIN = 61 # Days - log = logging.getLogger(__name__) - - --class NssSsl(object): -+class NssSsl(DSLint): - def __init__(self, dirsrv=None, dbpassword=None, dbpath=None): - self.dirsrv = dirsrv - self._certdb = dbpath -@@ -56,18 +57,14 @@ class NssSsl(object): - else: - self.dbpassword = dbpassword - -- self.db_files = {"dbm_backend": ["%s/%s" % (self._certdb, f) for f in ("key3.db", "cert8.db", "secmod.db")], -- "sql_backend": ["%s/%s" % (self._certdb, f) for f in ("key4.db", "cert9.db", "pkcs11.txt")], -- "support": ["%s/%s" % (self._certdb, f) for f in ("noise.txt", PIN_TXT, PWD_TXT)]} -- self._lint_functions = [self._lint_certificate_expiration,] -- -- def lint(self): -- results = [] -- for fn in self._lint_functions: -- for result in fn(): -- if result is not None: -- results.append(result) -- return results -+ self.db_files = {group: [f"{self._certdb}/{f}" for f in files] -+ for group, files in {"dbm_backend": ("key3.db", "cert8.db", "secmod.db"), -+ "sql_backend": ("key4.db", "cert9.db", "pkcs11.txt"), -+ "support": ("noise.txt", PIN_TXT, PWD_TXT)}.items()} -+ -+ @classmethod -+ def lint_uid(cls): -+ return 'ssl' - - def _lint_certificate_expiration(self): - """Check all the certificates in the db if they will expire within 30 days -diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py -index f68a1d114..89e660287 100644 ---- a/src/lib389/lib389/plugins.py -+++ b/src/lib389/lib389/plugins.py -@@ -431,7 +431,6 @@ class ReferentialIntegrityPlugin(Plugin): - 'referint-logfile', - 'referint-membership-attr', - ]) -- self._lint_functions = [self._lint_update_delay, self._lint_attr_indexes] - - def create(self, rdn=None, properties=None, basedn=None): - """Create an instance of the plugin""" -@@ -443,6 +442,10 @@ class ReferentialIntegrityPlugin(Plugin): - properties['referint-logfile'] = referint_log - return super(ReferentialIntegrityPlugin, self).create(rdn, properties, basedn) - -+ @classmethod -+ def lint_uid(cls): -+ return 'refint' -+ - def _lint_update_delay(self): - if self.status(): - delay = self.get_attr_val_int("referint-update-delay") -diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py -index f8adb3ce2..f575e58d5 100644 ---- a/src/lib389/lib389/replica.py -+++ b/src/lib389/lib389/replica.py -@@ -1049,7 +1049,10 @@ class Changelog5(DSLdapObject): - 'extensibleobject', - ] - self._protected = False -- self._lint_functions = [self._lint_cl_trimming] -+ -+ @classmethod -+ def lint_uid(cls): -+ return 'changelog' - - def _lint_cl_trimming(self): - """Check that cl trimming is at least defined to prevent unbounded growth""" -@@ -1120,7 +1123,10 @@ class Replica(DSLdapObject): - self._create_objectclasses.append('extensibleobject') - self._protected = False - self._suffix = None -- self._lint_functions = [self._lint_agmts_status, self._lint_conflicts] -+ -+ @classmethod -+ def lint_uid(cls): -+ return 'replication' - - def _lint_agmts_status(self): - replicas = Replicas(self._instance).list() -diff --git a/src/lib389/lib389/tests/mapped_object_lint_test.py b/src/lib389/lib389/tests/mapped_object_lint_test.py -new file mode 100644 -index 000000000..a4ca0ea3c ---- /dev/null -+++ b/src/lib389/lib389/tests/mapped_object_lint_test.py -@@ -0,0 +1,78 @@ -+from typing import List -+ -+import pytest -+ -+from lib389._mapped_object_lint import ( -+ DSLint, -+ DSLints, -+ DSLintMethodSpec -+) -+ -+ -+def test_dslint(): -+ class DS(DSLint): -+ def lint_uid(self) -> str: -+ return self.param -+ -+ def __init__(self, param): -+ self.param = param -+ self.suffixes = ['suffixA', 'suffixB'] -+ -+ def _lint_nsstate(self, spec: DSLintMethodSpec = None): -+ if spec == List: -+ yield from self.suffixes -+ else: -+ to_lint = [spec] if spec else self._lint_nsstate(spec=List) -+ for tl in to_lint: -+ if tl == 'suffixA': -+ pass -+ elif tl == 'suffixB': -+ yield 'suffixB is bad' -+ else: -+ raise ValueError('There is no such suffix') -+ -+ def _lint_second(self): -+ yield from () -+ -+ def _lint_third(self): -+ yield from ['this is a fail'] -+ -+ class DSs(DSLints): -+ def list(self): -+ for i in [DS("ma"), DS("mb")]: -+ yield i -+ -+ # single -+ inst = DS("a") -+ inst_lints = {'nsstate:suffixA', 'nsstate:suffixB', 'second', 'third'} -+ -+ assert inst.param == "a" -+ -+ assert set(dict(inst.lint_list()).keys()) == inst_lints -+ -+ assert set(dict(inst.lint_list('nsstate')).keys()) \ -+ == {f'nsstate:suffix{s}' for s in "AB"} -+ -+ assert list(inst._lint_nsstate(spec=List)) == ['suffixA', 'suffixB'] -+ assert list(inst.lint()) == ['suffixB is bad', 'this is a fail'] -+ -+ assert list(inst.lint('nsstate')) == ['suffixB is bad'] -+ assert list(inst.lint('nsstate:suffixA')) == [] -+ assert list(inst.lint('nsstate:suffixB')) == ['suffixB is bad'] -+ with pytest.raises(ValueError): -+ list(inst.lint('nonexistent')) -+ -+ # multiple -+ insts = DSs() -+ -+ assert insts.lint_list -+ assert insts.lint -+ -+ assert set(dict(insts.lint_list()).keys()) \ -+ == {f'{m}:{s}' for m in ['ma', 'mb'] for s in inst_lints} -+ assert set(dict(insts.lint_list('*')).keys()) \ -+ == {f'{m}:{s}' for m in ['ma', 'mb'] for s in inst_lints} -+ assert set(dict(insts.lint_list('*:nsstate')).keys()) \ -+ == {f'{m}:nsstate:suffix{s}' for m in ['ma', 'mb'] for s in "AB"} -+ assert set(dict(insts.lint_list('mb:nsstate')).keys()) \ -+ == {f'mb:nsstate:suffix{s}' for s in "AB"} --- -2.26.2 - diff --git a/SOURCES/0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch b/SOURCES/0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch new file mode 100644 index 0000000..13a64c2 --- /dev/null +++ b/SOURCES/0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch @@ -0,0 +1,254 @@ +From 8b0ba11c3dfb577d1696f4b71a6f4e9f8d42349f Mon Sep 17 00:00:00 2001 +From: Pierre Rogier +Date: Mon, 30 Nov 2020 12:42:17 +0100 +Subject: [PATCH] Add dsconf replication monitor test case (gitHub issue 4449) + in 1.4.3 branch + +--- + .../tests/suites/clu/repl_monitor_test.py | 234 ++++++++++++++++++ + 1 file changed, 234 insertions(+) + create mode 100644 dirsrvtests/tests/suites/clu/repl_monitor_test.py + +diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +new file mode 100644 +index 000000000..b03d170c8 +--- /dev/null ++++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +@@ -0,0 +1,234 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2020 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import time ++import subprocess ++import pytest ++ ++from lib389.cli_conf.replication import get_repl_monitor_info ++from lib389.tasks import * ++from lib389.utils import * ++from lib389.topologies import topology_m2 ++from lib389.cli_base import FakeArgs ++from lib389.cli_base.dsrc import dsrc_arg_concat ++from lib389.cli_base import connect_instance ++ ++pytestmark = pytest.mark.tier0 ++ ++LOG_FILE = '/tmp/monitor.log' ++logging.getLogger(__name__).setLevel(logging.DEBUG) ++log = logging.getLogger(__name__) ++ ++ ++@pytest.fixture(scope="function") ++def set_log_file(request): ++ fh = logging.FileHandler(LOG_FILE) ++ fh.setLevel(logging.DEBUG) ++ log.addHandler(fh) ++ ++ def fin(): ++ log.info('Delete files') ++ os.remove(LOG_FILE) ++ ++ config = os.path.expanduser(DSRC_HOME) ++ if os.path.exists(config): ++ os.remove(config) ++ ++ request.addfinalizer(fin) ++ ++ ++def check_value_in_log_and_reset(content_list, second_list=None, single_value=None, error_list=None): ++ with open(LOG_FILE, 'r+') as f: ++ file_content = f.read() ++ ++ for item in content_list: ++ log.info('Check that "{}" is present'.format(item)) ++ assert item in file_content ++ ++ if second_list is not None: ++ log.info('Check for "{}"'.format(second_list)) ++ for item in second_list: ++ assert item in file_content ++ ++ if single_value is not None: ++ log.info('Check for "{}"'.format(single_value)) ++ assert single_value in file_content ++ ++ if error_list is not None: ++ log.info('Check that "{}" is not present'.format(error_list)) ++ for item in error_list: ++ assert item not in file_content ++ ++ log.info('Reset log file') ++ f.truncate(0) ++ ++ ++@pytest.mark.ds50545 ++@pytest.mark.bz1739718 ++@pytest.mark.skipif(ds_is_older("1.4.0"), reason="Not implemented") ++def test_dsconf_replication_monitor(topology_m2, set_log_file): ++ """Test replication monitor that was ported from legacy tools ++ ++ :id: ce48020d-7c30-41b7-8f68-144c9cd757f6 ++ :setup: 2 MM topology ++ :steps: ++ 1. Create DS instance ++ 2. Run replication monitor with connections option ++ 3. Run replication monitor with aliases option ++ 4. Run replication monitor with --json option ++ 5. Run replication monitor with .dsrc file created ++ 6. Run replication monitor with connections option as if using dsconf CLI ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ """ ++ ++ m1 = topology_m2.ms["master1"] ++ m2 = topology_m2.ms["master2"] ++ ++ alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')', ++ 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')'] ++ ++ connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port) ++ content_list = ['Replica Root: dc=example,dc=com', ++ 'Replica ID: 1', ++ 'Replica Status: Available', ++ 'Max CSN', ++ 'Status For Agreement: "002" ('+ m2.host + ':' + str(m2.port) + ')', ++ 'Replica Enabled: on', ++ 'Update In Progress: FALSE', ++ 'Last Update Start:', ++ 'Last Update End:', ++ 'Number Of Changes Sent:', ++ 'Number Of Changes Skipped: None', ++ 'Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded', ++ 'Last Init Start:', ++ 'Last Init End:', ++ 'Last Init Status:', ++ 'Reap Active: 0', ++ 'Replication Status: In Synchronization', ++ 'Replication Lag Time:', ++ 'Supplier: ', ++ m2.host + ':' + str(m2.port), ++ 'Replica Root: dc=example,dc=com', ++ 'Replica ID: 2', ++ 'Status For Agreement: "001" (' + m1.host + ':' + str(m1.port)+')'] ++ ++ error_list = ['consumer (Unavailable)', ++ 'Failed to retrieve database RUV entry from consumer'] ++ ++ json_list = ['type', ++ 'list', ++ 'items', ++ 'name', ++ m1.host + ':' + str(m1.port), ++ 'data', ++ '"replica_id": "1"', ++ '"replica_root": "dc=example,dc=com"', ++ '"replica_status": "Available"', ++ 'maxcsn', ++ 'agmts_status', ++ 'agmt-name', ++ '002', ++ 'replica', ++ m2.host + ':' + str(m2.port), ++ 'replica-enabled', ++ 'update-in-progress', ++ 'last-update-start', ++ 'last-update-end', ++ 'number-changes-sent', ++ 'number-changes-skipped', ++ 'last-update-status', ++ 'Error (0) Replica acquired successfully: Incremental update succeeded', ++ 'last-init-start', ++ 'last-init-end', ++ 'last-init-status', ++ 'reap-active', ++ 'replication-status', ++ 'In Synchronization', ++ 'replication-lag-time', ++ '"replica_id": "2"', ++ '001', ++ m1.host + ':' + str(m1.port)] ++ ++ dsrc_content = '[repl-monitor-connections]\n' \ ++ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ ++ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ ++ '\n' \ ++ '[repl-monitor-aliases]\n' \ ++ 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \ ++ 'M2 = ' + m2.host + ':' + str(m2.port) ++ ++ connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, ++ m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] ++ ++ aliases = ['M1=' + m1.host + ':' + str(m1.port), ++ 'M2=' + m2.host + ':' + str(m2.port)] ++ ++ args = FakeArgs() ++ args.connections = connections ++ args.aliases = None ++ args.json = False ++ ++ log.info('Run replication monitor with connections option') ++ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) ++ check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) ++ ++ log.info('Run replication monitor with aliases option') ++ args.aliases = aliases ++ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) ++ check_value_in_log_and_reset(content_list, alias_content) ++ ++ log.info('Run replication monitor with --json option') ++ args.aliases = None ++ args.json = True ++ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) ++ check_value_in_log_and_reset(json_list) ++ ++ with open(os.path.expanduser(DSRC_HOME), 'w+') as f: ++ f.write(dsrc_content) ++ ++ args.connections = None ++ args.aliases = None ++ args.json = False ++ ++ log.info('Run replication monitor when .dsrc file is present with content') ++ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) ++ check_value_in_log_and_reset(content_list, alias_content) ++ os.remove(os.path.expanduser(DSRC_HOME)) ++ ++ log.info('Run replication monitor with connections option as if using dsconf CLI') ++ # Perform same test than steps 2 test but without using directly the topology instance. ++ # but with an instance similar to those than dsconf cli generates: ++ # step 2 args ++ args.connections = connections ++ args.aliases = None ++ args.json = False ++ # args needed to generate an instance with dsrc_arg_concat ++ args.instance = 'master1' ++ args.basedn = None ++ args.binddn = None ++ args.bindpw = None ++ args.pwdfile = None ++ args.prompt = False ++ args.starttls = False ++ dsrc_inst = dsrc_arg_concat(args, None) ++ inst = connect_instance(dsrc_inst, True, args) ++ get_repl_monitor_info(inst, DEFAULT_SUFFIX, log, args) ++ check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) +-- +2.26.2 + diff --git a/SOURCES/0012-Issue-50984-Memory-leaks-in-disk-monitoring.patch b/SOURCES/0012-Issue-50984-Memory-leaks-in-disk-monitoring.patch deleted file mode 100644 index 3e61905..0000000 --- a/SOURCES/0012-Issue-50984-Memory-leaks-in-disk-monitoring.patch +++ /dev/null @@ -1,54 +0,0 @@ -From 2540354b7eb6fa03db7d36a5b755001b0852aa1b Mon Sep 17 00:00:00 2001 -From: Simon Pichugin -Date: Thu, 26 Mar 2020 19:33:47 +0100 -Subject: [PATCH] Issue 50984 - Memory leaks in disk monitoring - -Description: Memory leaks are reported by the disk monitoring test suite. -The direct leak is related to char **dirs array which is not freed at all. -Free the array when we clean up or go to shutdown. -Fix disk_monitoring_test.py::test_below_half_of_the_threshold_not_starting_after_shutdown. -It should accept different exception when the instance is not started. - -https://pagure.io/389-ds-base/issue/50984 - -Reviewed by: firstyear (Thanks!) ---- - ldap/servers/slapd/daemon.c | 2 -- - ldap/servers/slapd/main.c | 1 - - 2 files changed, 3 deletions(-) - -diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c -index a70f40316..542d31037 100644 ---- a/ldap/servers/slapd/daemon.c -+++ b/ldap/servers/slapd/daemon.c -@@ -613,7 +613,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) - } - } - slapi_ch_array_free(dirs); -- dirs = NULL; - return; - } - /* -@@ -713,7 +712,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) - } - } - slapi_ch_array_free(dirs); -- dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */ - g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL); - return; - } -diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c -index e54b8e1c5..1f8b01959 100644 ---- a/ldap/servers/slapd/main.c -+++ b/ldap/servers/slapd/main.c -@@ -958,7 +958,6 @@ main(int argc, char **argv) - goto cleanup; - } - slapi_ch_array_free(dirs); -- dirs = NULL; - } - /* log the max fd limit as it is typically set in env/systemd */ - slapi_log_err(SLAPI_LOG_INFO, "main", --- -2.26.2 - diff --git a/SOURCES/0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch b/SOURCES/0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch new file mode 100644 index 0000000..74aa5aa --- /dev/null +++ b/SOURCES/0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch @@ -0,0 +1,100 @@ +From 389b2c825742392365262a719be7c8f594e7e522 Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Thu, 26 Nov 2020 09:08:13 +1000 +Subject: [PATCH] Issue 4460 - BUG - lib389 should use system tls policy + +Bug Description: Due to some changes in dsrc for tlsreqcert +and how def open was structured in lib389, the system ldap.conf +policy was ignored. + +Fix Description: Default to using the system ldap.conf policy +if undefined in lib389 or the tls_reqcert param in dsrc. + +fixes: #4460 + +Author: William Brown + +Review by: ??? +--- + src/lib389/lib389/__init__.py | 11 +++++++---- + src/lib389/lib389/cli_base/dsrc.py | 16 +++++++++------- + 2 files changed, 16 insertions(+), 11 deletions(-) + +diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py +index 99ea9cc6a..4e6a1905a 100644 +--- a/src/lib389/lib389/__init__.py ++++ b/src/lib389/lib389/__init__.py +@@ -962,7 +962,7 @@ class DirSrv(SimpleLDAPObject, object): + # Now, we are still an allocated ds object so we can be re-installed + self.state = DIRSRV_STATE_ALLOCATED + +- def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=ldap.OPT_X_TLS_HARD, ++ def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=None, + usercert=None, userkey=None): + ''' + It opens a ldap bound connection to dirsrv so that online +@@ -1025,9 +1025,12 @@ class DirSrv(SimpleLDAPObject, object): + try: + # Note this sets LDAP.OPT not SELF. Because once self has opened + # it can NOT change opts on reused (ie restart) +- self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert) +- self.log.debug("Using certificate policy %s", reqcert) +- self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", reqcert) ++ if reqcert is not None: ++ self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert) ++ self.log.debug("Using lib389 certificate policy %s", reqcert) ++ else: ++ self.log.debug("Using /etc/openldap/ldap.conf certificate policy") ++ self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", self.get_option(ldap.OPT_X_TLS_REQUIRE_CERT)) + except ldap.LDAPError as e: + self.log.fatal('TLS negotiation failed: %s', e) + raise e +diff --git a/src/lib389/lib389/cli_base/dsrc.py b/src/lib389/lib389/cli_base/dsrc.py +index fec18a5f9..9b09ea568 100644 +--- a/src/lib389/lib389/cli_base/dsrc.py ++++ b/src/lib389/lib389/cli_base/dsrc.py +@@ -45,7 +45,7 @@ def dsrc_arg_concat(args, dsrc_inst): + 'tls_cacertdir': None, + 'tls_cert': None, + 'tls_key': None, +- 'tls_reqcert': ldap.OPT_X_TLS_HARD, ++ 'tls_reqcert': None, + 'starttls': args.starttls, + 'prompt': False, + 'pwdfile': None, +@@ -134,7 +134,7 @@ def dsrc_to_ldap(path, instance_name, log): + dsrc_inst['binddn'] = config.get(instance_name, 'binddn', fallback=None) + dsrc_inst['saslmech'] = config.get(instance_name, 'saslmech', fallback=None) + if dsrc_inst['saslmech'] is not None and dsrc_inst['saslmech'] not in ['EXTERNAL', 'PLAIN']: +- raise Exception("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name)) ++ raise ValueError("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name)) + + dsrc_inst['tls_cacertdir'] = config.get(instance_name, 'tls_cacertdir', fallback=None) + # At this point, we should check if the provided cacertdir is indeed, a dir. This can be a cause +@@ -145,16 +145,18 @@ def dsrc_to_ldap(path, instance_name, log): + + dsrc_inst['tls_cert'] = config.get(instance_name, 'tls_cert', fallback=None) + dsrc_inst['tls_key'] = config.get(instance_name, 'tls_key', fallback=None) +- dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback='hard') +- if dsrc_inst['tls_reqcert'] not in ['never', 'allow', 'hard']: +- raise Exception("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name, +- path)) ++ dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback=None) + if dsrc_inst['tls_reqcert'] == 'never': + dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_NEVER + elif dsrc_inst['tls_reqcert'] == 'allow': + dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_ALLOW +- else: ++ elif dsrc_inst['tls_reqcert'] == 'hard': + dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_HARD ++ elif dsrc_inst['tls_reqcert'] is None: ++ # Use system value ++ pass ++ else: ++ raise ValueError("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name, path)) + dsrc_inst['starttls'] = config.getboolean(instance_name, 'starttls', fallback=False) + dsrc_inst['pwdfile'] = None + dsrc_inst['prompt'] = False +-- +2.26.2 + diff --git a/SOURCES/0013-Issue-50984-Memory-leaks-in-disk-monitoring.patch b/SOURCES/0013-Issue-50984-Memory-leaks-in-disk-monitoring.patch deleted file mode 100644 index d554989..0000000 --- a/SOURCES/0013-Issue-50984-Memory-leaks-in-disk-monitoring.patch +++ /dev/null @@ -1,52 +0,0 @@ -From a720e002751815323a295e11e77c56d7ce38314e Mon Sep 17 00:00:00 2001 -From: Simon Pichugin -Date: Fri, 27 Mar 2020 11:35:55 +0100 -Subject: [PATCH] Issue 50984 - Memory leaks in disk monitoring - -Description: Reset dirs pointer every time we free it. -The code may be changed in the future so we should make it -more robust. - -https://pagure.io/389-ds-base/issue/50984 - -Reviewed by: spichugi, tbordaz (one line commit rule) ---- - ldap/servers/slapd/daemon.c | 2 ++ - ldap/servers/slapd/main.c | 1 + - 2 files changed, 3 insertions(+) - -diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c -index 542d31037..a70f40316 100644 ---- a/ldap/servers/slapd/daemon.c -+++ b/ldap/servers/slapd/daemon.c -@@ -613,6 +613,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) - } - } - slapi_ch_array_free(dirs); -+ dirs = NULL; - return; - } - /* -@@ -712,6 +713,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) - } - } - slapi_ch_array_free(dirs); -+ dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */ - g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL); - return; - } -diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c -index 1f8b01959..e54b8e1c5 100644 ---- a/ldap/servers/slapd/main.c -+++ b/ldap/servers/slapd/main.c -@@ -958,6 +958,7 @@ main(int argc, char **argv) - goto cleanup; - } - slapi_ch_array_free(dirs); -+ dirs = NULL; - } - /* log the max fd limit as it is typically set in env/systemd */ - slapi_log_err(SLAPI_LOG_INFO, "main", --- -2.26.2 - diff --git a/SOURCES/0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch b/SOURCES/0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch new file mode 100644 index 0000000..16637bb --- /dev/null +++ b/SOURCES/0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch @@ -0,0 +1,60 @@ +From 05b66529117d1cd85a636ab7d8fc84abdec814de Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Thu, 12 Nov 2020 13:04:21 +1000 +Subject: [PATCH] Issue 4428 - BUG Paged Results with critical false causes + sigsegv in chaining + +Bug Description: When a paged search through chaining backend is +received with a false criticality (such as SSSD), chaining backend +will sigsegv due to a null context. + +Fix Description: When a NULL ctx is recieved to be freed, this is +as paged results have finished being sent, so we check the NULL +ctx and move on. + +fixes: #4428 + +Author: William Brown + +Review by: @droideck, @mreynolds389 +--- + ldap/servers/plugins/chainingdb/cb_search.c | 6 ++++++ + ldap/servers/plugins/chainingdb/cb_utils.c | 4 ++++ + 2 files changed, 10 insertions(+) + +diff --git a/ldap/servers/plugins/chainingdb/cb_search.c b/ldap/servers/plugins/chainingdb/cb_search.c +index 69d23a6b5..d47cbc8e4 100644 +--- a/ldap/servers/plugins/chainingdb/cb_search.c ++++ b/ldap/servers/plugins/chainingdb/cb_search.c +@@ -740,6 +740,12 @@ chaining_back_search_results_release(void **sr) + + slapi_log_err(SLAPI_LOG_PLUGIN, CB_PLUGIN_SUBSYSTEM, + "chaining_back_search_results_release\n"); ++ if (ctx == NULL) { ++ /* The paged search is already complete, just return */ ++ /* Could we have a ctx state flag instead? */ ++ return; ++ } ++ + if (ctx->readahead != ctx->tobefreed) { + slapi_entry_free(ctx->readahead); + } +diff --git a/ldap/servers/plugins/chainingdb/cb_utils.c b/ldap/servers/plugins/chainingdb/cb_utils.c +index dfd5dd92c..d52fd25a6 100644 +--- a/ldap/servers/plugins/chainingdb/cb_utils.c ++++ b/ldap/servers/plugins/chainingdb/cb_utils.c +@@ -279,7 +279,11 @@ cb_add_suffix(cb_backend_instance *inst, struct berval **bvals, int apply_mod, c + return LDAP_SUCCESS; + } + ++#ifdef DEBUG ++static int debug_on = 1; ++#else + static int debug_on = 0; ++#endif + + int + cb_debug_on() +-- +2.26.2 + diff --git a/SOURCES/0014-Issue-50201-nsIndexIDListScanLimit-accepts-any-value.patch b/SOURCES/0014-Issue-50201-nsIndexIDListScanLimit-accepts-any-value.patch deleted file mode 100644 index 704cff6..0000000 --- a/SOURCES/0014-Issue-50201-nsIndexIDListScanLimit-accepts-any-value.patch +++ /dev/null @@ -1,569 +0,0 @@ -From f60364cd9472edc61e7d327d13dca67eadf0c5b2 Mon Sep 17 00:00:00 2001 -From: Simon Pichugin -Date: Tue, 28 Apr 2020 23:44:20 +0200 -Subject: [PATCH] Issue 50201 - nsIndexIDListScanLimit accepts any value - -Bug Description: Setting of nsIndexIDListScanLimit like -'limit=2 limit=3' are detected and logged in error logs. -But the invalid value is successfully applied in the config entry -and the operation itself is successful. -The impact is limited because the index will be used following -idlistscanlimit rather than invalid definition nsIndexIDListScanLimit. - -Fix Description: Print the errors to the user when he tries to add -or to modify index config entry with malformed values. -Change tests accordingly. - -https://pagure.io/389-ds-base/issue/50201 - -Reviewed by: mreynolds, tbordaz (Thanks!) ---- - .../suites/filter/filterscanlimit_test.py | 87 ++++++++----------- - ldap/servers/slapd/back-ldbm/instance.c | 4 +- - ldap/servers/slapd/back-ldbm/ldbm_attr.c | 33 ++++++- - .../slapd/back-ldbm/ldbm_index_config.c | 59 +++++++++---- - ldap/servers/slapd/back-ldbm/ldif2ldbm.c | 2 +- - .../servers/slapd/back-ldbm/proto-back-ldbm.h | 2 +- - 6 files changed, 114 insertions(+), 73 deletions(-) - -diff --git a/dirsrvtests/tests/suites/filter/filterscanlimit_test.py b/dirsrvtests/tests/suites/filter/filterscanlimit_test.py -index dd9c6ee4e..0198f6533 100644 ---- a/dirsrvtests/tests/suites/filter/filterscanlimit_test.py -+++ b/dirsrvtests/tests/suites/filter/filterscanlimit_test.py -@@ -11,6 +11,7 @@ This script will test different type of Filers. - """ - - import os -+import ldap - import pytest - - from lib389._constants import DEFAULT_SUFFIX, PW_DM -@@ -19,11 +20,10 @@ from lib389.idm.user import UserAccounts - from lib389.idm.organizationalunit import OrganizationalUnits - from lib389.index import Index - from lib389.idm.account import Accounts --from lib389.idm.group import UniqueGroups, Group -+from lib389.idm.group import UniqueGroups - - pytestmark = pytest.mark.tier1 - -- - GIVEN_NAME = 'cn=givenname,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' - CN_NAME = 'cn=sn,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' - UNIQMEMBER = 'cn=uniquemember,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' -@@ -39,7 +39,6 @@ LIST_OF_USER_ACCOUNTING = [ - "Judy Wallace", - "Marcus Ward", - "Judy McFarland", -- "Anuj Hall", - "Gern Triplett", - "Emanuel Johnson", - "Brad Walker", -@@ -57,7 +56,6 @@ LIST_OF_USER_ACCOUNTING = [ - "Randy Ulrich", - "Richard Francis", - "Morgan White", -- "Anuj Maddox", - "Jody Jensen", - "Mike Carter", - "Gern Tyler", -@@ -77,8 +75,6 @@ LIST_OF_USER_HUMAN = [ - "Robert Daugherty", - "Torrey Mason", - "Brad Talbot", -- "Anuj Jablonski", -- "Harry Miller", - "Jeffrey Campaigne", - "Stephen Triplett", - "John Falena", -@@ -107,8 +103,7 @@ LIST_OF_USER_HUMAN = [ - "Tobias Schmith", - "Jon Goldstein", - "Janet Lutz", -- "Karl Cope", --] -+ "Karl Cope"] - - LIST_OF_USER_TESTING = [ - "Andy Bergin", -@@ -122,8 +117,7 @@ LIST_OF_USER_TESTING = [ - "Alan White", - "Daniel Ward", - "Lee Stockton", -- "Matthew Vaughan" --] -+ "Matthew Vaughan"] - - LIST_OF_USER_DEVELOPMENT = [ - "Kelly Winters", -@@ -143,7 +137,6 @@ LIST_OF_USER_DEVELOPMENT = [ - "Timothy Kelly", - "Sue Mason", - "Chris Alexander", -- "Anuj Jensen", - "Martin Talbot", - "Scott Farmer", - "Allison Jensen", -@@ -152,9 +145,7 @@ LIST_OF_USER_DEVELOPMENT = [ - "Dan Langdon", - "Ashley Knutson", - "Jon Bourke", -- "Pete Hunt", -- --] -+ "Pete Hunt"] - - LIST_OF_USER_PAYROLL = [ - "Ashley Chassin", -@@ -164,12 +155,17 @@ LIST_OF_USER_PAYROLL = [ - "Patricia Shelton", - "Dietrich Swain", - "Allison Hunter", -- "Anne-Louise Barnes" -+ "Anne-Louise Barnes"] - --] -+LIST_OF_USER_PEOPLE = [ -+ 'Sam Carter', -+ 'Tom Morris', -+ 'Kevin Vaughan', -+ 'Rich Daugherty', -+ 'Harry Miller', -+ 'Sam Schmith'] - - --@pytest.mark.skip(reason="https://pagure.io/389-ds-base/issue/50201") - def test_invalid_configuration(topo): - """" - Error handling for invalid configuration -@@ -190,10 +186,7 @@ def test_invalid_configuration(topo): - 'limit=0 flags=AND flags=AND', - 'limit=0 type=eq values=foo values=foo', - 'limit=0 type=eq values=foo,foo', -- 'limit=0 type=sub', -- 'limit=0 type=eq values=notvalid', - 'limit', -- 'limit=0 type=eq values=notavaliddn', - 'limit=0 type=pres values=bogus', - 'limit=0 type=eq,sub values=bogus', - 'limit=', -@@ -203,7 +196,8 @@ def test_invalid_configuration(topo): - 'limit=-2', - 'type=eq', - 'limit=0 type=bogus']: -- Index(topo.standalone, GIVEN_NAME).replace('nsIndexIDListScanLimit', i) -+ with pytest.raises(ldap.UNWILLING_TO_PERFORM): -+ Index(topo.standalone, GIVEN_NAME).replace('nsIndexIDListScanLimit', i) - - - def test_idlistscanlimit(topo): -@@ -247,28 +241,24 @@ def test_idlistscanlimit(topo): - (LIST_OF_USER_HUMAN, users_human), - (LIST_OF_USER_TESTING, users_testing), - (LIST_OF_USER_DEVELOPMENT, users_development), -- (LIST_OF_USER_PAYROLL, users_payroll)]: -+ (LIST_OF_USER_PAYROLL, users_payroll), -+ (LIST_OF_USER_PEOPLE, users_people)]: - for demo1 in data[0]: -+ fn = demo1.split()[0] -+ sn = demo1.split()[1] -+ uid = ''.join([fn[:1], sn]).lower() - data[1].create(properties={ -- 'uid': demo1, -+ 'uid': uid, - 'cn': demo1, -- 'sn': demo1.split()[1], -+ 'sn': sn, - 'uidNumber': str(1000), - 'gidNumber': '2000', -- 'homeDirectory': '/home/' + demo1, -- 'givenname': demo1.split()[0], -- 'userpassword': PW_DM -+ 'homeDirectory': f'/home/{uid}', -+ 'givenname': fn, -+ 'userpassword': PW_DM, -+ 'mail': f'{uid}@test.com' - }) - -- users_people.create(properties={ -- 'uid': 'scarter', -- 'cn': 'Sam Carter', -- 'sn': 'Carter', -- 'uidNumber': str(1000), -- 'gidNumber': '2000', -- 'homeDirectory': '/home/' + 'scarter', -- 'mail': 'scarter@anuj.com', -- }) - try: - # Change log levels - errorlog_value = topo.standalone.config.get_attr_val_utf8('nsslapd-errorlog-level') -@@ -297,16 +287,12 @@ def test_idlistscanlimit(topo): - - Index(topo.standalone, UNIQMEMBER).\ - replace('nsIndexIDListScanLimit', -- 'limit=0 type=eq values=uid=kvaughan,ou=People,' -- 'dc=example,dc=com,uid=rdaugherty,ou=People,dc=example,dc=com') -+ 'limit=0 type=eq values=uid=kvaughan\2Cou=People\2Cdc=example\2Cdc=com,' -+ 'uid=rdaugherty\2Cou=People\2Cdc=example\2Cdc=com') - - Index(topo.standalone, OBJECTCLASS).\ - replace('nsIndexIDListScanLimit', 'limit=0 type=eq flags=AND values=inetOrgPerson') - -- Index(topo.standalone, MAIL).\ -- replace('nsIndexIDListScanLimit', -- 'cn=mail,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config') -- - # Search with filter - for i in ['(sn=Lutz)', - '(sn=*ter)', -@@ -321,22 +307,24 @@ def test_idlistscanlimit(topo): - '(&(sn=*)(cn=*))', - '(sn=Hunter)', - '(&(givenname=Richard)(objectclass=organizationalPerson))', -- '(givenname=Anuj)', -+ '(givenname=Morgan)', - '(&(givenname=*)(cn=*))', - '(givenname=*)']: - assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter(f'{i}') - -- # Creating Group -- Group(topo.standalone, 'cn=Accounting Managers,ou=groups,dc=example,dc=com').\ -- add('uniquemember', -+ # Creating Groups and adding members -+ groups = UniqueGroups(topo.standalone, DEFAULT_SUFFIX) -+ accounting_managers = groups.ensure_state(properties={'cn': 'Accounting Managers'}) -+ hr_managers = groups.ensure_state(properties={'cn': 'HR Managers'}) -+ -+ accounting_managers.add('uniquemember', - ['uid=scarter, ou=People, dc=example,dc=com', - 'uid=tmorris, ou=People, dc=example,dc=com', - 'uid=kvaughan, ou=People, dc=example,dc=com', - 'uid=rdaugherty, ou=People, dc=example,dc=com', - 'uid=hmiller, ou=People, dc=example,dc=com']) - -- Group(topo.standalone, 'cn=HR Managers,ou=groups,dc=example,dc=com').\ -- add('uniquemember', -+ hr_managers.add('uniquemember', - ['uid=kvaughan, ou=People, dc=example,dc=com', - 'uid=cschmith, ou=People, dc=example,dc=com']) - -@@ -403,10 +391,9 @@ def test_idlistscanlimit(topo): - '(&(sn=*)(cn=*))', - '(sn=Hunter)', - '(&(givenname=Richard)(objectclass=organizationalPerson))', -- '(givenname=Anuj)', -+ '(givenname=Morgan)', - '(&(givenname=*)(cn=*))', - '(givenname=*)']: -- - assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter(value) - - finally: -diff --git a/ldap/servers/slapd/back-ldbm/instance.c b/ldap/servers/slapd/back-ldbm/instance.c -index 04c28ff39..07655a8ec 100644 ---- a/ldap/servers/slapd/back-ldbm/instance.c -+++ b/ldap/servers/slapd/back-ldbm/instance.c -@@ -231,7 +231,7 @@ ldbm_instance_create_default_indexes(backend *be) - - /* ldbm_instance_config_add_index_entry(inst, 2, argv); */ - e = ldbm_instance_init_config_entry(LDBM_PSEUDO_ATTR_DEFAULT, "none", 0, 0, 0); -- attr_index_config(be, "ldbm index init", 0, e, 1, 0); -+ attr_index_config(be, "ldbm index init", 0, e, 1, 0, NULL); - slapi_entry_free(e); - - if (!entryrdn_get_noancestorid()) { -@@ -240,7 +240,7 @@ ldbm_instance_create_default_indexes(backend *be) - * but we still want to use the attr index file APIs. - */ - e = ldbm_instance_init_config_entry(LDBM_ANCESTORID_STR, "eq", 0, 0, 0); -- attr_index_config(be, "ldbm index init", 0, e, 1, 0); -+ attr_index_config(be, "ldbm index init", 0, e, 1, 0, NULL); - slapi_entry_free(e); - } - -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c -index b9e130d77..f0d418572 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c -@@ -633,6 +633,18 @@ attr_index_idlistsize_config(Slapi_Entry *e, struct attrinfo *ai, char *returnte - return rc; - } - -+/* -+ * Function that process index attributes and modifies attrinfo structure -+ * -+ * Called while adding default indexes, during db2index execution and -+ * when we add/modify/delete index config entry -+ * -+ * If char *err_buf is not NULL, it will additionally print all error messages to STDERR -+ * It is used when we add/modify/delete index config entry, so the user would have a better verbose -+ * -+ * returns -1, 1 on a failure -+ * 0 on success -+ */ - int - attr_index_config( - backend *be, -@@ -640,7 +652,8 @@ attr_index_config( - int lineno, - Slapi_Entry *e, - int init __attribute__((unused)), -- int indextype_none) -+ int indextype_none, -+ char *err_buf) - { - ldbm_instance *inst = (ldbm_instance *)be->be_instance_info; - int j = 0; -@@ -662,6 +675,7 @@ attr_index_config( - slapi_attr_first_value(attr, &sval); - attrValue = slapi_value_get_berval(sval); - } else { -+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: missing indexing arguments\n"); - slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "Missing indexing arguments\n"); - return -1; - } -@@ -705,6 +719,10 @@ attr_index_config( - } - a->ai_indexmask = INDEX_OFFLINE; /* note that the index isn't available */ - } else { -+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, -+ "Error: %s: line %d: unknown index type \"%s\" (ignored) in entry (%s), " -+ "valid index types are \"pres\", \"eq\", \"approx\", or \"sub\"\n", -+ fname, lineno, attrValue->bv_val, slapi_entry_get_dn(e)); - slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", - "%s: line %d: unknown index type \"%s\" (ignored) in entry (%s), " - "valid index types are \"pres\", \"eq\", \"approx\", or \"sub\"\n", -@@ -715,6 +733,7 @@ attr_index_config( - } - if (hasIndexType == 0) { - /* indexType missing, error out */ -+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: missing index type\n"); - slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "Missing index type\n"); - attrinfo_delete(&a); - return -1; -@@ -873,16 +892,26 @@ attr_index_config( - slapi_ch_free((void **)&official_rules); - } - } -- - if ((return_value = attr_index_idlistsize_config(e, a, myreturntext))) { -+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, -+ "Error: %s: Failed to parse idscanlimit info: %d:%s\n", -+ fname, return_value, myreturntext); - slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "%s: Failed to parse idscanlimit info: %d:%s\n", - fname, return_value, myreturntext); -+ if (err_buf != NULL) { -+ /* we are inside of a callback, we shouldn't allow malformed attributes in index entries */ -+ attrinfo_delete(&a); -+ return return_value; -+ } - } - - /* initialize the IDL code's private data */ - return_value = idl_init_private(be, a); - if (0 != return_value) { - /* fatal error, exit */ -+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, -+ "Error: %s: line %d:Fatal Error: Failed to initialize attribute structure\n", -+ fname, lineno); - slapi_log_err(SLAPI_LOG_CRIT, "attr_index_config", - "%s: line %d:Fatal Error: Failed to initialize attribute structure\n", - fname, lineno); -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c -index 45f0034f0..720f93036 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c -@@ -25,26 +25,34 @@ int ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb, Slapi_Entry *en - #define INDEXTYPE_NONE 1 - - static int --ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name) -+ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, char *err_buf) - { - Slapi_Attr *attr; - const struct berval *attrValue; - Slapi_Value *sval; -+ char *edn = slapi_entry_get_dn(e); - - /* Get the name of the attribute to index which will be the value - * of the cn attribute. */ - if (slapi_entry_attr_find(e, "cn", &attr) != 0) { -- slapi_log_err(SLAPI_LOG_ERR, "ldbm_index_parse_entry", "Malformed index entry %s\n", -- slapi_entry_get_dn(e)); -+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, -+ "Error: malformed index entry %s\n", -+ edn); -+ slapi_log_err(SLAPI_LOG_ERR, -+ "ldbm_index_parse_entry", "Malformed index entry %s\n", -+ edn); - return LDAP_OPERATIONS_ERROR; - } - - slapi_attr_first_value(attr, &sval); - attrValue = slapi_value_get_berval(sval); - if (NULL == attrValue->bv_val || 0 == attrValue->bv_len) { -+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, -+ "Error: malformed index entry %s -- empty index name\n", -+ edn); - slapi_log_err(SLAPI_LOG_ERR, - "ldbm_index_parse_entry", "Malformed index entry %s -- empty index name\n", -- slapi_entry_get_dn(e)); -+ edn); - return LDAP_OPERATIONS_ERROR; - } - -@@ -59,16 +67,19 @@ ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_st - attrValue = slapi_value_get_berval(sval); - if (NULL == attrValue->bv_val || attrValue->bv_len == 0) { - /* missing the index type, error out */ -- slapi_log_err(SLAPI_LOG_ERR, -- "ldbm_index_parse_entry", "Malformed index entry %s -- empty nsIndexType\n", -- slapi_entry_get_dn(e)); -+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, -+ "Error: malformed index entry %s -- empty nsIndexType\n", -+ edn); -+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_index_parse_entry", -+ "Malformed index entry %s -- empty nsIndexType\n", -+ edn); - slapi_ch_free_string(index_name); - return LDAP_OPERATIONS_ERROR; - } - } - - /* ok the entry is good to process, pass it to attr_index_config */ -- if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0)) { -+ if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0, err_buf)) { - slapi_ch_free_string(index_name); - return LDAP_OPERATIONS_ERROR; - } -@@ -92,7 +103,7 @@ ldbm_index_init_entry_callback(Slapi_PBlock *pb __attribute__((unused)), - ldbm_instance *inst = (ldbm_instance *)arg; - - returntext[0] = '\0'; -- *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL); -+ *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, NULL); - if (*returncode == LDAP_SUCCESS) { - return SLAPI_DSE_CALLBACK_OK; - } else { -@@ -117,7 +128,7 @@ ldbm_instance_index_config_add_callback(Slapi_PBlock *pb __attribute__((unused)) - char *index_name = NULL; - - returntext[0] = '\0'; -- *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name); -+ *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, returntext); - if (*returncode == LDAP_SUCCESS) { - struct attrinfo *ai = NULL; - /* if the index is a "system" index, we assume it's being added by -@@ -179,7 +190,7 @@ ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb, - slapi_attr_first_value(attr, &sval); - attrValue = slapi_value_get_berval(sval); - -- attr_index_config(inst->inst_be, "From DSE delete", 0, e, 0, INDEXTYPE_NONE); -+ attr_index_config(inst->inst_be, "From DSE delete", 0, e, 0, INDEXTYPE_NONE, returntext); - - ainfo_get(inst->inst_be, attrValue->bv_val, &ainfo); - if (NULL == ainfo) { -@@ -213,14 +224,19 @@ ldbm_instance_index_config_modify_callback(Slapi_PBlock *pb __attribute__((unuse - Slapi_Value *sval; - const struct berval *attrValue; - struct attrinfo *ainfo = NULL; -+ char *edn = slapi_entry_get_dn(e); -+ char *edn_after = slapi_entry_get_dn(entryAfter); - - returntext[0] = '\0'; - *returncode = LDAP_SUCCESS; - - if (slapi_entry_attr_find(entryAfter, "cn", &attr) != 0) { -+ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE, -+ "Error: malformed index entry %s - missing cn attribute\n", -+ edn_after); - slapi_log_err(SLAPI_LOG_ERR, - "ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing cn attribute\n", -- slapi_entry_get_dn(entryAfter)); -+ edn_after); - *returncode = LDAP_OBJECT_CLASS_VIOLATION; - return SLAPI_DSE_CALLBACK_ERROR; - } -@@ -228,31 +244,40 @@ ldbm_instance_index_config_modify_callback(Slapi_PBlock *pb __attribute__((unuse - attrValue = slapi_value_get_berval(sval); - - if (NULL == attrValue->bv_val || 0 == attrValue->bv_len) { -+ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE, -+ "Error: malformed index entry %s - missing index name\n", -+ edn); - slapi_log_err(SLAPI_LOG_ERR, - "ldbm_instance_index_config_modify_callback", "Malformed index entry %s, missing index name\n", -- slapi_entry_get_dn(e)); -+ edn); - *returncode = LDAP_UNWILLING_TO_PERFORM; - return SLAPI_DSE_CALLBACK_ERROR; - } - - ainfo_get(inst->inst_be, attrValue->bv_val, &ainfo); - if (NULL == ainfo) { -+ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE, -+ "Error: malformed index entry %s - missing cn attribute info\n", -+ edn); - slapi_log_err(SLAPI_LOG_ERR, - "ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing cn attribute info\n", -- slapi_entry_get_dn(e)); -+ edn); - *returncode = LDAP_UNWILLING_TO_PERFORM; - return SLAPI_DSE_CALLBACK_ERROR; - } - - if (slapi_entry_attr_find(entryAfter, "nsIndexType", &attr) != 0) { -+ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE, -+ "Error: malformed index entry %s - missing nsIndexType attribute\n", -+ edn_after); - slapi_log_err(SLAPI_LOG_ERR, - "ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing nsIndexType attribute\n", -- slapi_entry_get_dn(entryAfter)); -+ edn_after); - *returncode = LDAP_OBJECT_CLASS_VIOLATION; - return SLAPI_DSE_CALLBACK_ERROR; - } - -- if (attr_index_config(inst->inst_be, "from DSE modify", 0, entryAfter, 0, 0)) { -+ if (attr_index_config(inst->inst_be, "from DSE modify", 0, entryAfter, 0, 0, returntext)) { - *returncode = LDAP_UNWILLING_TO_PERFORM; - return SLAPI_DSE_CALLBACK_ERROR; - } -@@ -364,7 +389,7 @@ ldbm_instance_index_config_enable_index(ldbm_instance *inst, Slapi_Entry *e) - ainfo_get(inst->inst_be, index_name, &ai); - } - if (!ai) { -- rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name); -+ rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, NULL); - } - if (rc == LDAP_SUCCESS) { - /* Assume the caller knows if it is OK to go online immediately */ -diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c -index 9d82c8228..f2ef5ecd4 100644 ---- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c -+++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c -@@ -291,7 +291,7 @@ db2index_add_indexed_attr(backend *be, char *attrString) - } - } - -- attr_index_config(be, "from db2index()", 0, e, 0, 0); -+ attr_index_config(be, "from db2index()", 0, e, 0, 0, NULL); - slapi_entry_free(e); - - return (0); -diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h -index 9a86c752b..a07acee5e 100644 ---- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h -+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h -@@ -24,7 +24,7 @@ void attrinfo_delete(struct attrinfo **pp); - void ainfo_get(backend *be, char *type, struct attrinfo **at); - void attr_masks(backend *be, char *type, int *indexmask, int *syntaxmask); - void attr_masks_ex(backend *be, char *type, int *indexmask, int *syntaxmask, struct attrinfo **at); --int attr_index_config(backend *be, char *fname, int lineno, Slapi_Entry *e, int init, int none); -+int attr_index_config(backend *be, char *fname, int lineno, Slapi_Entry *e, int init, int none, char *err_buf); - int db2index_add_indexed_attr(backend *be, char *attrString); - int ldbm_compute_init(void); - void attrinfo_deletetree(ldbm_instance *inst); --- -2.26.2 - diff --git a/SOURCES/0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch b/SOURCES/0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch new file mode 100644 index 0000000..de8c8a8 --- /dev/null +++ b/SOURCES/0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch @@ -0,0 +1,50 @@ +From 4c133d448f451b7c3b2ff1b42806c7516d623f09 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Mon, 7 Dec 2020 00:41:27 +0100 +Subject: [PATCH] Issue 4315: performance search rate: nagle triggers high rate + of setsocketopt (#4437) + +Bug description: + When a socket is set with NO_DELAY=0 (nagle), written pdu are buffered + until buffer is full or tcp_cork is set. This reduce network traffic when + the application writes partial pdu. + DS write complete pdu (results/entries/..) so it gives low benefit for DS. + In addition nagle being 'on' by default, DS sets/unset socket tcp_cork to send + immediately results/entries at each operation. This is an overhead of syscalls. + +Fix description: + Disable nagle by default + +relates: https://github.com/389ds/389-ds-base/issues/4315 + +Reviewed by: @mreynolds389, @Firstyear + +Platforms tested: F33 +--- + ldap/servers/slapd/libglobs.c | 9 ++++----- + 1 file changed, 4 insertions(+), 5 deletions(-) + +diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c +index 7d5374c90..f8cf162e6 100644 +--- a/ldap/servers/slapd/libglobs.c ++++ b/ldap/servers/slapd/libglobs.c +@@ -1635,12 +1635,11 @@ FrontendConfig_init(void) + #endif /* USE_SYSCONF */ + + init_accesscontrol = cfg->accesscontrol = LDAP_ON; +-#if defined(LINUX) +- /* On Linux, by default, we use TCP_CORK so we must enable nagle */ +- init_nagle = cfg->nagle = LDAP_ON; +-#else ++ ++ /* nagle triggers set/unset TCP_CORK setsockopt per operation ++ * as DS only sends complete PDU there is no benefit of nagle/tcp_cork ++ */ + init_nagle = cfg->nagle = LDAP_OFF; +-#endif + init_security = cfg->security = LDAP_OFF; + init_ssl_check_hostname = cfg->ssl_check_hostname = LDAP_ON; + cfg->tls_check_crl = TLS_CHECK_NONE; +-- +2.26.2 + diff --git a/SOURCES/0015-Issue-51157-Reindex-task-may-create-abandoned-index-.patch b/SOURCES/0015-Issue-51157-Reindex-task-may-create-abandoned-index-.patch deleted file mode 100644 index 8da8d8f..0000000 --- a/SOURCES/0015-Issue-51157-Reindex-task-may-create-abandoned-index-.patch +++ /dev/null @@ -1,213 +0,0 @@ -From 3b3faee01e645577ad77ff4f38429a9e0806231b Mon Sep 17 00:00:00 2001 -From: Simon Pichugin -Date: Tue, 16 Jun 2020 20:35:05 +0200 -Subject: [PATCH] Issue 51157 - Reindex task may create abandoned index file - -Bug Description: Recreating an index for the same attribute but changing -the case of for example 1 letter, results in abandoned indexfile. - -Fix Decsription: Add a test case to a newly created 'indexes' test suite. -When we remove the index config from the backend, - remove the attribute -info from LDBM instance attributes. - -https://pagure.io/389-ds-base/issue/51157 - -Reviewed by: firstyear, mreynolds (Thanks!) ---- - dirsrvtests/tests/suites/indexes/__init__.py | 3 + - .../tests/suites/indexes/regression_test.py | 125 ++++++++++++++++++ - ldap/servers/slapd/back-ldbm/ldbm_attr.c | 7 + - .../slapd/back-ldbm/ldbm_index_config.c | 3 + - .../servers/slapd/back-ldbm/proto-back-ldbm.h | 1 + - 5 files changed, 139 insertions(+) - create mode 100644 dirsrvtests/tests/suites/indexes/__init__.py - create mode 100644 dirsrvtests/tests/suites/indexes/regression_test.py - -diff --git a/dirsrvtests/tests/suites/indexes/__init__.py b/dirsrvtests/tests/suites/indexes/__init__.py -new file mode 100644 -index 000000000..04441667e ---- /dev/null -+++ b/dirsrvtests/tests/suites/indexes/__init__.py -@@ -0,0 +1,3 @@ -+""" -+ :Requirement: 389-ds-base: Indexes -+""" -diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py -new file mode 100644 -index 000000000..1a71f16e9 ---- /dev/null -+++ b/dirsrvtests/tests/suites/indexes/regression_test.py -@@ -0,0 +1,125 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2020 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+import time -+import os -+import pytest -+import ldap -+from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX -+from lib389.index import Indexes -+from lib389.backend import Backends -+from lib389.idm.user import UserAccounts -+from lib389.topologies import topology_st as topo -+ -+pytestmark = pytest.mark.tier1 -+ -+ -+def test_reindex_task_creates_abandoned_index_file(topo): -+ """ -+ Recreating an index for the same attribute but changing -+ the case of for example 1 letter, results in abandoned indexfile -+ -+ :id: 07ae5274-481a-4fa8-8074-e0de50d89ac6 -+ :setup: Standalone instance -+ :steps: -+ 1. Create a user object with additional attributes: -+ objectClass: mozillaabpersonalpha -+ mozillaCustom1: xyz -+ 2. Add an index entry mozillacustom1 -+ 3. Reindex the backend -+ 4. Check the content of the index (after it has been flushed to disk) mozillacustom1.db -+ 5. Remove the index -+ 6. Notice the mozillacustom1.db is removed -+ 7. Recreate the index but now use the exact case as mentioned in the schema -+ 8. Reindex the backend -+ 9. Check the content of the index (after it has been flushed to disk) mozillaCustom1.db -+ 10. Check that an ldapsearch does not return a result (mozillacustom1=xyz) -+ 11. Check that an ldapsearch returns the results (mozillaCustom1=xyz) -+ 12. Restart the instance -+ 13. Notice that an ldapsearch does not return a result(mozillacustom1=xyz) -+ 15. Check that an ldapsearch does not return a result (mozillacustom1=xyz) -+ 16. Check that an ldapsearch returns the results (mozillaCustom1=xyz) -+ 17. Reindex the backend -+ 18. Notice the second indexfile for this attribute -+ 19. Check the content of the index (after it has been flushed to disk) no mozillacustom1.db -+ 20. Check the content of the index (after it has been flushed to disk) mozillaCustom1.db -+ :expectedresults: -+ 1. Should Success. -+ 2. Should Success. -+ 3. Should Success. -+ 4. Should Success. -+ 5. Should Success. -+ 6. Should Success. -+ 7. Should Success. -+ 8. Should Success. -+ 9. Should Success. -+ 10. Should Success. -+ 11. Should Success. -+ 12. Should Success. -+ 13. Should Success. -+ 14. Should Success. -+ 15. Should Success. -+ 16. Should Success. -+ 17. Should Success. -+ 18. Should Success. -+ 19. Should Success. -+ 20. Should Success. -+ """ -+ -+ inst = topo.standalone -+ attr_name = "mozillaCustom1" -+ attr_value = "xyz" -+ -+ users = UserAccounts(inst, DEFAULT_SUFFIX) -+ user = users.create_test_user() -+ user.add("objectClass", "mozillaabpersonalpha") -+ user.add(attr_name, attr_value) -+ -+ backends = Backends(inst) -+ backend = backends.get(DEFAULT_BENAME) -+ indexes = backend.get_indexes() -+ index = indexes.create(properties={ -+ 'cn': attr_name.lower(), -+ 'nsSystemIndex': 'false', -+ 'nsIndexType': ['eq', 'pres'] -+ }) -+ -+ backend.reindex() -+ time.sleep(3) -+ assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db") -+ index.delete() -+ assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db") -+ -+ index = indexes.create(properties={ -+ 'cn': attr_name, -+ 'nsSystemIndex': 'false', -+ 'nsIndexType': ['eq', 'pres'] -+ }) -+ -+ backend.reindex() -+ time.sleep(3) -+ assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db") -+ assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name}.db") -+ -+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f"{attr_name}={attr_value}") -+ assert len(entries) > 0 -+ inst.restart() -+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f"{attr_name}={attr_value}") -+ assert len(entries) > 0 -+ -+ backend.reindex() -+ time.sleep(3) -+ assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db") -+ assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name}.db") -+ -+ -+if __name__ == "__main__": -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main("-s %s" % CURRENT_FILE) -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c -index f0d418572..688c4f137 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c -@@ -98,6 +98,13 @@ ainfo_cmp( - return (strcasecmp(a->ai_type, b->ai_type)); - } - -+void -+attrinfo_delete_from_tree(backend *be, struct attrinfo *ai) -+{ -+ ldbm_instance *inst = (ldbm_instance *)be->be_instance_info; -+ avl_delete(&inst->inst_attrs, ai, ainfo_cmp); -+} -+ - /* - * Called when a duplicate "index" line is encountered. - * -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c -index 720f93036..9722d0ce7 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c -@@ -201,7 +201,10 @@ ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb, - *returncode = LDAP_UNWILLING_TO_PERFORM; - rc = SLAPI_DSE_CALLBACK_ERROR; - } -+ attrinfo_delete_from_tree(inst->inst_be, ainfo); - } -+ /* Free attrinfo structure */ -+ attrinfo_delete(&ainfo); - bail: - return rc; - } -diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h -index a07acee5e..4d2524fd9 100644 ---- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h -+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h -@@ -21,6 +21,7 @@ - */ - struct attrinfo *attrinfo_new(void); - void attrinfo_delete(struct attrinfo **pp); -+void attrinfo_delete_from_tree(backend *be, struct attrinfo *ai); - void ainfo_get(backend *be, char *type, struct attrinfo **at); - void attr_masks(backend *be, char *type, int *indexmask, int *syntaxmask); - void attr_masks_ex(backend *be, char *type, int *indexmask, int *syntaxmask, struct attrinfo **at); --- -2.26.2 - diff --git a/SOURCES/0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch b/SOURCES/0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch new file mode 100644 index 0000000..a2cb4bd --- /dev/null +++ b/SOURCES/0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch @@ -0,0 +1,39 @@ +From 3007700a659ede03085f5390153cce483ce987a1 Mon Sep 17 00:00:00 2001 +From: Firstyear +Date: Fri, 4 Dec 2020 10:14:33 +1000 +Subject: [PATCH] Issue 4460 - BUG - add machine name to subject alt names in + SSCA (#4472) + +Bug Description: During SSCA creation, the server cert did not have +the machine name, which meant that the cert would not work without +reqcert = never. + +Fix Description: Add the machine name as an alt name during SSCA +creation. It is not guaranteed this value is correct, but it +is better than nothing. + +relates: https://github.com/389ds/389-ds-base/issues/4460 + +Author: William Brown + +Review by: mreynolds389, droideck +--- + src/lib389/lib389/instance/setup.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py +index 7d42ba292..e46f2d1e5 100644 +--- a/src/lib389/lib389/instance/setup.py ++++ b/src/lib389/lib389/instance/setup.py +@@ -887,7 +887,7 @@ class SetupDs(object): + tlsdb_inst = NssSsl(dbpath=os.path.join(etc_dirsrv_path, dir)) + tlsdb_inst.import_rsa_crt(ca) + +- csr = tlsdb.create_rsa_key_and_csr() ++ csr = tlsdb.create_rsa_key_and_csr(alt_names=[general['full_machine_name']]) + (ca, crt) = ssca.rsa_ca_sign_csr(csr) + tlsdb.import_rsa_crt(ca, crt) + if general['selinux']: +-- +2.26.2 + diff --git a/SOURCES/0016-Issue-51165-add-new-access-log-keywords-for-wtime-an.patch b/SOURCES/0016-Issue-51165-add-new-access-log-keywords-for-wtime-an.patch deleted file mode 100644 index 10c002c..0000000 --- a/SOURCES/0016-Issue-51165-add-new-access-log-keywords-for-wtime-an.patch +++ /dev/null @@ -1,668 +0,0 @@ -From 282edde7950ceb2515d74fdbcc0a188131769d74 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Tue, 23 Jun 2020 16:38:55 -0400 -Subject: [PATCH] Issue 51165 - add new access log keywords for wtime and - optime - -Description: In addition to the "etime" stat in the access we can also - add the time the operation spent in the work queue, and - how long the actual operation took. We now have "wtime" - and "optime" to track these stats in the access log. - - Also updated logconf for notes=F (related to a different - ticket), and stats for wtime and optime. - -relates: https://pagure.io/389-ds-base/issue/51165 - -Reviewed by: ? ---- - ldap/admin/src/logconv.pl | 187 +++++++++++++++++++++++++++--- - ldap/servers/slapd/add.c | 3 + - ldap/servers/slapd/bind.c | 4 + - ldap/servers/slapd/delete.c | 3 + - ldap/servers/slapd/modify.c | 3 + - ldap/servers/slapd/modrdn.c | 3 + - ldap/servers/slapd/operation.c | 24 ++++ - ldap/servers/slapd/opshared.c | 3 + - ldap/servers/slapd/result.c | 49 ++++---- - ldap/servers/slapd/slap.h | 13 ++- - ldap/servers/slapd/slapi-plugin.h | 26 ++++- - 11 files changed, 269 insertions(+), 49 deletions(-) - -diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl -index f4808a101..1ed44a888 100755 ---- a/ldap/admin/src/logconv.pl -+++ b/ldap/admin/src/logconv.pl -@@ -3,7 +3,7 @@ - # - # BEGIN COPYRIGHT BLOCK - # Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. --# Copyright (C) 2013 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -55,7 +55,7 @@ my $reportStats = ""; - my $dataLocation = "/tmp"; - my $startTLSoid = "1.3.6.1.4.1.1466.20037"; - my @statnames=qw(last last_str results srch add mod modrdn moddn cmp del abandon -- conns sslconns bind anonbind unbind notesA notesU etime); -+ conns sslconns bind anonbind unbind notesA notesU notesF etime); - my $s_stats; - my $m_stats; - my $verb = "no"; -@@ -211,6 +211,7 @@ my $sslClientBindCount = 0; - my $sslClientFailedCount = 0; - my $objectclassTopCount= 0; - my $pagedSearchCount = 0; -+my $invalidFilterCount = 0; - my $bindCount = 0; - my $filterCount = 0; - my $baseCount = 0; -@@ -258,7 +259,7 @@ map {$conn{$_} = $_} @conncodes; - # hash db-backed hashes - my @hashnames = qw(attr rc src rsrc excount conn_hash ip_hash conncount nentries - filter base ds6xbadpwd saslmech saslconnop bindlist etime oid -- start_time_of_connection end_time_of_connection -+ start_time_of_connection end_time_of_connection notesf_conn_op - notesa_conn_op notesu_conn_op etime_conn_op nentries_conn_op - optype_conn_op time_conn_op srch_conn_op del_conn_op mod_conn_op - mdn_conn_op cmp_conn_op bind_conn_op unbind_conn_op ext_conn_op -@@ -926,7 +927,7 @@ if ($verb eq "yes" || $usage =~ /u/ || $usage =~ /U/){ - } - while($op > 0){ - # The bind op is not the same as the search op that triggered the notes=A. -- # We have adjust the key by decrementing the op count until we find the last bind op. -+ # We have to adjust the key by decrementing the op count until we find the last bind op. - $op--; - $binddn_key = "$srvRstCnt,$conn,$op"; - if (exists($bind_conn_op->{$binddn_key}) && defined($bind_conn_op->{$binddn_key})) { -@@ -1049,9 +1050,60 @@ if ($verb eq "yes" || $usage =~ /u/ || $usage =~ /U/){ - } - } - } --} # end of unindexed search report -+ print "\n"; -+} -+ -+print "Invalid Attribute Filters: $invalidFilterCount\n"; -+if ($invalidFilterCount > 0 && $verb eq "yes"){ -+ my $conn_hash = $hashes->{conn_hash}; -+ my $notesf_conn_op = $hashes->{notesf_conn_op}; -+ my $time_conn_op = $hashes->{time_conn_op}; -+ my $etime_conn_op = $hashes->{etime_conn_op}; -+ my $nentries_conn_op = $hashes->{nentries_conn_op}; -+ my $filter_conn_op = $hashes->{filter_conn_op}; -+ my $bind_conn_op = $hashes->{bind_conn_op}; -+ my $notesCount = 1; -+ my $unindexedIp; -+ my $binddn_key; -+ my %uniqFilt = (); # hash of unique filters -+ my %uniqFilter = (); # hash of unique filters bind dn -+ my %uniqBindDNs = (); # hash of unique bind dn's -+ my %uniqBindFilters = (); # hash of filters for a bind DN -+ -+ while (my ($srcnt_conn_op, $count) = each %{$notesf_conn_op}) { -+ my ($srvRstCnt, $conn, $op) = split(",", $srcnt_conn_op); -+ my $attrIp = getIPfromConn($conn, $srvRstCnt); -+ print "\n Invalid Attribute Filter #".$notesCount." (notes=F)\n"; -+ print " - Date/Time: $time_conn_op->{$srcnt_conn_op}\n"; -+ print " - Connection Number: $conn\n"; -+ print " - Operation Number: $op\n"; -+ print " - Etime: $etime_conn_op->{$srcnt_conn_op}\n"; -+ print " - Nentries: $nentries_conn_op->{$srcnt_conn_op}\n"; -+ print " - IP Address: $attrIp\n"; -+ if (exists($filter_conn_op->{$srcnt_conn_op}) && defined($filter_conn_op->{$srcnt_conn_op})) { -+ print " - Search Filter: $filter_conn_op->{$srcnt_conn_op}\n"; -+ $uniqFilt{$filter_conn_op->{$srcnt_conn_op}}++; -+ } -+ while($op > 0){ -+ # The bind op is not the same as the search op that triggered the notes=A. -+ # We have to adjust the key by decrementing the op count until we find the last bind op. -+ $op--; -+ $binddn_key = "$srvRstCnt,$conn,$op"; -+ if (exists($bind_conn_op->{$binddn_key}) && defined($bind_conn_op->{$binddn_key})) { -+ print " - Bind DN: $bind_conn_op->{$binddn_key}\n"; -+ $uniqBindDNs{$bind_conn_op->{$binddn_key}}++; -+ if( $uniqFilt{$filter_conn_op->{$srcnt_conn_op}} && defined($filter_conn_op->{$srcnt_conn_op})) { -+ $uniqBindFilters{$bind_conn_op->{$binddn_key}}{$filter_conn_op->{$srcnt_conn_op}}++; -+ $uniqFilter{$filter_conn_op->{$srcnt_conn_op}}{$bind_conn_op->{$binddn_key}}++; -+ } -+ last; -+ } -+ } -+ $notesCount++; -+ } -+ print "\n"; -+} - --print "\n"; - print "FDs Taken: $fdTaken\n"; - print "FDs Returned: $fdReturned\n"; - print "Highest FD Taken: $highestFdTaken\n\n"; -@@ -1386,20 +1438,20 @@ if ($usage =~ /l/ || $verb eq "yes"){ - } - } - --######################################### --# # --# Gather and Process the unique etimes # --# # --######################################### -+############################################################## -+# # -+# Gather and Process the unique etimes, wtimes, and optimes # -+# # -+############################################################## - - my $first; - if ($usage =~ /t/i || $verb eq "yes"){ -+ # Print the elapsed times (etime) -+ - my $etime = $hashes->{etime}; - my @ekeys = keys %{$etime}; -- # - # print most often etimes -- # -- print "\n\n----- Top $sizeCount Most Frequent etimes -----\n\n"; -+ print "\n\n----- Top $sizeCount Most Frequent etimes (elapsed times) -----\n\n"; - my $eloop = 0; - my $retime = 0; - foreach my $et (sort { $etime->{$b} <=> $etime->{$a} } @ekeys) { -@@ -1411,16 +1463,84 @@ if ($usage =~ /t/i || $verb eq "yes"){ - printf "%-8s %-12s\n", $etime->{ $et }, "etime=$et"; - $eloop++; - } -- # -+ if ($eloop == 0) { -+ print "None"; -+ } - # print longest etimes -- # -- print "\n\n----- Top $sizeCount Longest etimes -----\n\n"; -+ print "\n\n----- Top $sizeCount Longest etimes (elapsed times) -----\n\n"; - $eloop = 0; - foreach my $et (sort { $b <=> $a } @ekeys) { - if ($eloop == $sizeCount) { last; } - printf "%-12s %-10s\n","etime=$et",$etime->{ $et }; - $eloop++; - } -+ if ($eloop == 0) { -+ print "None"; -+ } -+ -+ # Print the wait times (wtime) -+ -+ my $wtime = $hashes->{wtime}; -+ my @wkeys = keys %{$wtime}; -+ # print most often wtimes -+ print "\n\n----- Top $sizeCount Most Frequent wtimes (wait times) -----\n\n"; -+ $eloop = 0; -+ $retime = 0; -+ foreach my $et (sort { $wtime->{$b} <=> $wtime->{$a} } @wkeys) { -+ if ($eloop == $sizeCount) { last; } -+ if ($retime ne "2"){ -+ $first = $et; -+ $retime = "2"; -+ } -+ printf "%-8s %-12s\n", $wtime->{ $et }, "wtime=$et"; -+ $eloop++; -+ } -+ if ($eloop == 0) { -+ print "None"; -+ } -+ # print longest wtimes -+ print "\n\n----- Top $sizeCount Longest wtimes (wait times) -----\n\n"; -+ $eloop = 0; -+ foreach my $et (sort { $b <=> $a } @wkeys) { -+ if ($eloop == $sizeCount) { last; } -+ printf "%-12s %-10s\n","wtime=$et",$wtime->{ $et }; -+ $eloop++; -+ } -+ if ($eloop == 0) { -+ print "None"; -+ } -+ -+ # Print the operation times (optime) -+ -+ my $optime = $hashes->{optime}; -+ my @opkeys = keys %{$optime}; -+ # print most often optimes -+ print "\n\n----- Top $sizeCount Most Frequent optimes (actual operation times) -----\n\n"; -+ $eloop = 0; -+ $retime = 0; -+ foreach my $et (sort { $optime->{$b} <=> $optime->{$a} } @opkeys) { -+ if ($eloop == $sizeCount) { last; } -+ if ($retime ne "2"){ -+ $first = $et; -+ $retime = "2"; -+ } -+ printf "%-8s %-12s\n", $optime->{ $et }, "optime=$et"; -+ $eloop++; -+ } -+ if ($eloop == 0) { -+ print "None"; -+ } -+ # print longest optimes -+ print "\n\n----- Top $sizeCount Longest optimes (actual operation times) -----\n\n"; -+ $eloop = 0; -+ foreach my $et (sort { $b <=> $a } @opkeys) { -+ if ($eloop == $sizeCount) { last; } -+ printf "%-12s %-10s\n","optime=$et",$optime->{ $et }; -+ $eloop++; -+ } -+ if ($eloop == 0) { -+ print "None"; -+ } - } - - ####################################### -@@ -2152,6 +2272,26 @@ sub parseLineNormal - if (m/ RESULT err=/ && m/ notes=[A-Z,]*P/){ - $pagedSearchCount++; - } -+ if (m/ RESULT err=/ && m/ notes=[A-Z,]*F/){ -+ $invalidFilterCount++; -+ $con = ""; -+ if ($_ =~ /conn= *([0-9A-Z]+)/i){ -+ $con = $1; -+ if ($_ =~ /op= *([0-9\-]+)/i){ $op = $1;} -+ } -+ -+ if($reportStats){ inc_stats('notesF',$s_stats,$m_stats); } -+ if ($usage =~ /u/ || $usage =~ /U/ || $verb eq "yes"){ -+ if($_ =~ /etime= *([0-9.]+)/i ){ -+ if($1 >= $minEtime){ -+ $hashes->{etime_conn_op}->{"$serverRestartCount,$con,$op"} = $1; -+ $hashes->{notesf_conn_op}->{"$serverRestartCount,$con,$op"}++; -+ if ($_ =~ / *([0-9a-z:\/]+)/i){ $hashes->{time_conn_op}->{"$serverRestartCount,$con,$op"} = $1; } -+ if ($_ =~ /nentries= *([0-9]+)/i ){ $hashes->{nentries_conn_op}->{"$serverRestartCount,$con,$op"} = $1; } -+ } -+ } -+ } -+ } - if (m/ notes=[A-Z,]*A/){ - $con = ""; - if ($_ =~ /conn= *([0-9A-Z]+)/i){ -@@ -2435,6 +2575,16 @@ sub parseLineNormal - if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{etime}->{$etime_val}++; } - if ($reportStats){ inc_stats_val('etime',$etime_val,$s_stats,$m_stats); } - } -+ if ($_ =~ /wtime= *([0-9.]+)/ ) { -+ my $wtime_val = $1; -+ if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{wtime}->{$wtime_val}++; } -+ if ($reportStats){ inc_stats_val('wtime',$wtime_val,$s_stats,$m_stats); } -+ } -+ if ($_ =~ /optime= *([0-9.]+)/ ) { -+ my $optime_val = $1; -+ if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{optime}->{$optime_val}++; } -+ if ($reportStats){ inc_stats_val('optime',$optime_val,$s_stats,$m_stats); } -+ } - if ($_ =~ / tag=101 / || $_ =~ / tag=111 / || $_ =~ / tag=100 / || $_ =~ / tag=115 /){ - if ($_ =~ / nentries= *([0-9]+)/i ){ - my $nents = $1; -@@ -2555,7 +2705,7 @@ sub parseLineNormal - } - } - } -- if (/ RESULT err=/ && / tag=97 nentries=0 etime=/ && $_ =~ /dn=\"(.*)\"/i){ -+ if (/ RESULT err=/ && / tag=97 nentries=0 / && $_ =~ /dn=\"(.*)\"/i){ - # Check if this is a sasl bind, if see we need to add the RESULT's dn as a bind dn - my $binddn = $1; - my ($conn, $op); -@@ -2680,6 +2830,7 @@ print_stats_block - $stats->{'unbind'}, - $stats->{'notesA'}, - $stats->{'notesU'}, -+ $stats->{'notesF'}, - $stats->{'etime'}), - "\n" ); - } else { -diff --git a/ldap/servers/slapd/add.c b/ldap/servers/slapd/add.c -index 06ca1ee79..52c64fa3c 100644 ---- a/ldap/servers/slapd/add.c -+++ b/ldap/servers/slapd/add.c -@@ -441,6 +441,9 @@ op_shared_add(Slapi_PBlock *pb) - internal_op = operation_is_flag_set(operation, OP_FLAG_INTERNAL); - pwpolicy = new_passwdPolicy(pb, slapi_entry_get_dn(e)); - -+ /* Set the time we actually started the operation */ -+ slapi_operation_set_time_started(operation); -+ - /* target spec is used to decide which plugins are applicable for the operation */ - operation_set_target_spec(operation, slapi_entry_get_sdn(e)); - -diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c -index 310216e89..55f865077 100644 ---- a/ldap/servers/slapd/bind.c -+++ b/ldap/servers/slapd/bind.c -@@ -87,6 +87,10 @@ do_bind(Slapi_PBlock *pb) - send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, NULL, 0, NULL); - goto free_and_return; - } -+ -+ /* Set the time we actually started the operation */ -+ slapi_operation_set_time_started(pb_op); -+ - ber = pb_op->o_ber; - - /* -diff --git a/ldap/servers/slapd/delete.c b/ldap/servers/slapd/delete.c -index c0e61adf1..1a7209317 100644 ---- a/ldap/servers/slapd/delete.c -+++ b/ldap/servers/slapd/delete.c -@@ -236,6 +236,9 @@ op_shared_delete(Slapi_PBlock *pb) - slapi_pblock_get(pb, SLAPI_OPERATION, &operation); - internal_op = operation_is_flag_set(operation, OP_FLAG_INTERNAL); - -+ /* Set the time we actually started the operation */ -+ slapi_operation_set_time_started(operation); -+ - sdn = slapi_sdn_new_dn_byval(rawdn); - dn = slapi_sdn_get_dn(sdn); - slapi_pblock_set(pb, SLAPI_DELETE_TARGET_SDN, (void *)sdn); -diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c -index 259bedfff..a186dbde3 100644 ---- a/ldap/servers/slapd/modify.c -+++ b/ldap/servers/slapd/modify.c -@@ -626,6 +626,9 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw) - slapi_pblock_get(pb, SLAPI_SKIP_MODIFIED_ATTRS, &skip_modified_attrs); - slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn); - -+ /* Set the time we actually started the operation */ -+ slapi_operation_set_time_started(operation); -+ - if (sdn) { - passin_sdn = 1; - } else { -diff --git a/ldap/servers/slapd/modrdn.c b/ldap/servers/slapd/modrdn.c -index 3efe584a7..e04916b83 100644 ---- a/ldap/servers/slapd/modrdn.c -+++ b/ldap/servers/slapd/modrdn.c -@@ -417,6 +417,9 @@ op_shared_rename(Slapi_PBlock *pb, int passin_args) - internal_op = operation_is_flag_set(operation, OP_FLAG_INTERNAL); - slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn); - -+ /* Set the time we actually started the operation */ -+ slapi_operation_set_time_started(operation); -+ - /* - * If ownership has not been passed to this function, we replace the - * string input fields within the pblock with strdup'd copies. Why? -diff --git a/ldap/servers/slapd/operation.c b/ldap/servers/slapd/operation.c -index ff16cd906..4dd3481c7 100644 ---- a/ldap/servers/slapd/operation.c -+++ b/ldap/servers/slapd/operation.c -@@ -651,3 +651,27 @@ slapi_operation_time_expiry(Slapi_Operation *o, time_t timeout, struct timespec - { - slapi_timespec_expire_rel(timeout, &(o->o_hr_time_rel), expiry); - } -+ -+/* Set the time the operation actually started */ -+void -+slapi_operation_set_time_started(Slapi_Operation *o) -+{ -+ clock_gettime(CLOCK_MONOTONIC, &(o->o_hr_time_started_rel)); -+} -+ -+/* The time diff of how long the operation took once it actually started */ -+void -+slapi_operation_op_time_elapsed(Slapi_Operation *o, struct timespec *elapsed) -+{ -+ struct timespec o_hr_time_now; -+ clock_gettime(CLOCK_MONOTONIC, &o_hr_time_now); -+ -+ slapi_timespec_diff(&o_hr_time_now, &(o->o_hr_time_started_rel), elapsed); -+} -+ -+/* The time diff the operation waited in the work queue */ -+void -+slapi_operation_workq_time_elapsed(Slapi_Operation *o, struct timespec *elapsed) -+{ -+ slapi_timespec_diff(&(o->o_hr_time_started_rel), &(o->o_hr_time_rel), elapsed); -+} -diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c -index 9fe78655c..c0bc5dcd0 100644 ---- a/ldap/servers/slapd/opshared.c -+++ b/ldap/servers/slapd/opshared.c -@@ -284,6 +284,9 @@ op_shared_search(Slapi_PBlock *pb, int send_result) - slapi_pblock_get(pb, SLAPI_SEARCH_TARGET_SDN, &sdn); - slapi_pblock_get(pb, SLAPI_OPERATION, &operation); - -+ /* Set the time we actually started the operation */ -+ slapi_operation_set_time_started(operation); -+ - if (NULL == sdn) { - sdn = slapi_sdn_new_dn_byval(base); - slapi_pblock_set(pb, SLAPI_SEARCH_TARGET_SDN, sdn); -diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c -index 0b13c30e9..61efb6f8d 100644 ---- a/ldap/servers/slapd/result.c -+++ b/ldap/servers/slapd/result.c -@@ -1975,6 +1975,8 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries - CSN *operationcsn = NULL; - char csn_str[CSN_STRSIZE + 5]; - char etime[ETIME_BUFSIZ] = {0}; -+ char wtime[ETIME_BUFSIZ] = {0}; -+ char optime[ETIME_BUFSIZ] = {0}; - int pr_idx = -1; - int pr_cookie = -1; - uint32_t operation_notes; -@@ -1982,19 +1984,26 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries - int32_t op_id; - int32_t op_internal_id; - int32_t op_nested_count; -+ struct timespec o_hr_time_end; - - get_internal_conn_op(&connid, &op_id, &op_internal_id, &op_nested_count); -- - slapi_pblock_get(pb, SLAPI_PAGED_RESULTS_INDEX, &pr_idx); - slapi_pblock_get(pb, SLAPI_PAGED_RESULTS_COOKIE, &pr_cookie); -- - internal_op = operation_is_flag_set(op, OP_FLAG_INTERNAL); - -- struct timespec o_hr_time_end; -+ /* total elapsed time */ - slapi_operation_time_elapsed(op, &o_hr_time_end); -+ snprintf(etime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec); -+ -+ /* wait time */ -+ slapi_operation_workq_time_elapsed(op, &o_hr_time_end); -+ snprintf(wtime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec); -+ -+ /* op time */ -+ slapi_operation_op_time_elapsed(op, &o_hr_time_end); -+ snprintf(optime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec); - - -- snprintf(etime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec); - - operation_notes = slapi_pblock_get_operation_notes(pb); - -@@ -2025,16 +2034,16 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries - if (!internal_op) { - slapi_log_access(LDAP_DEBUG_STATS, - "conn=%" PRIu64 " op=%d RESULT err=%d" -- " tag=%" BERTAG_T " nentries=%d etime=%s%s%s" -+ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s" - ", SASL bind in progress\n", - op->o_connid, - op->o_opid, - err, tag, nentries, -- etime, -+ wtime, optime, etime, - notes_str, csn_str); - } else { - --#define LOG_SASLMSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s, SASL bind in progress\n" -+#define LOG_SASLMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s, SASL bind in progress\n" - slapi_log_access(LDAP_DEBUG_ARGS, - connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_SASLMSG_FMT : - LOG_CONN_OP_FMT_EXT_INT LOG_SASLMSG_FMT, -@@ -2043,7 +2052,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries - op_internal_id, - op_nested_count, - err, tag, nentries, -- etime, -+ wtime, optime, etime, - notes_str, csn_str); - } - } else if (op->o_tag == LDAP_REQ_BIND && err == LDAP_SUCCESS) { -@@ -2057,15 +2066,15 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries - if (!internal_op) { - slapi_log_access(LDAP_DEBUG_STATS, - "conn=%" PRIu64 " op=%d RESULT err=%d" -- " tag=%" BERTAG_T " nentries=%d etime=%s%s%s" -+ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s" - " dn=\"%s\"\n", - op->o_connid, - op->o_opid, - err, tag, nentries, -- etime, -+ wtime, optime, etime, - notes_str, csn_str, dn ? dn : ""); - } else { --#define LOG_BINDMSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s dn=\"%s\"\n" -+#define LOG_BINDMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s dn=\"%s\"\n" - slapi_log_access(LDAP_DEBUG_ARGS, - connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_BINDMSG_FMT : - LOG_CONN_OP_FMT_EXT_INT LOG_BINDMSG_FMT, -@@ -2074,7 +2083,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries - op_internal_id, - op_nested_count, - err, tag, nentries, -- etime, -+ wtime, optime, etime, - notes_str, csn_str, dn ? dn : ""); - } - slapi_ch_free((void **)&dn); -@@ -2083,15 +2092,15 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries - if (!internal_op) { - slapi_log_access(LDAP_DEBUG_STATS, - "conn=%" PRIu64 " op=%d RESULT err=%d" -- " tag=%" BERTAG_T " nentries=%d etime=%s%s%s" -+ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s" - " pr_idx=%d pr_cookie=%d\n", - op->o_connid, - op->o_opid, - err, tag, nentries, -- etime, -+ wtime, optime, etime, - notes_str, csn_str, pr_idx, pr_cookie); - } else { --#define LOG_PRMSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s pr_idx=%d pr_cookie=%d \n" -+#define LOG_PRMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s pr_idx=%d pr_cookie=%d \n" - slapi_log_access(LDAP_DEBUG_ARGS, - connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_PRMSG_FMT : - LOG_CONN_OP_FMT_EXT_INT LOG_PRMSG_FMT, -@@ -2100,7 +2109,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries - op_internal_id, - op_nested_count, - err, tag, nentries, -- etime, -+ wtime, optime, etime, - notes_str, csn_str, pr_idx, pr_cookie); - } - } else if (!internal_op) { -@@ -2114,11 +2123,11 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries - } - slapi_log_access(LDAP_DEBUG_STATS, - "conn=%" PRIu64 " op=%d RESULT err=%d" -- " tag=%" BERTAG_T " nentries=%d etime=%s%s%s%s\n", -+ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s%s\n", - op->o_connid, - op->o_opid, - err, tag, nentries, -- etime, -+ wtime, optime, etime, - notes_str, csn_str, ext_str); - if (pbtxt) { - /* if !pbtxt ==> ext_str == "". Don't free ext_str. */ -@@ -2126,7 +2135,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries - } - } else { - int optype; --#define LOG_MSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s\n" -+#define LOG_MSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s\n" - slapi_log_access(LDAP_DEBUG_ARGS, - connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_MSG_FMT : - LOG_CONN_OP_FMT_EXT_INT LOG_MSG_FMT, -@@ -2135,7 +2144,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries - op_internal_id, - op_nested_count, - err, tag, nentries, -- etime, -+ wtime, optime, etime, - notes_str, csn_str); - /* - * If this is an unindexed search we should log it in the error log if -diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h -index cef8c789c..8e76393c3 100644 ---- a/ldap/servers/slapd/slap.h -+++ b/ldap/servers/slapd/slap.h -@@ -1538,16 +1538,17 @@ typedef struct slapi_operation_results - */ - typedef struct op - { -- BerElement *o_ber; /* ber of the request */ -- ber_int_t o_msgid; /* msgid of the request */ -- ber_tag_t o_tag; /* tag of the request */ -+ BerElement *o_ber; /* ber of the request */ -+ ber_int_t o_msgid; /* msgid of the request */ -+ ber_tag_t o_tag; /* tag of the request */ - struct timespec o_hr_time_rel; /* internal system time op initiated */ - struct timespec o_hr_time_utc; /* utc system time op initiated */ -- int o_isroot; /* requestor is manager */ -+ struct timespec o_hr_time_started_rel; /* internal system time op started */ -+ int o_isroot; /* requestor is manager */ - Slapi_DN o_sdn; /* dn bound when op was initiated */ -- char *o_authtype; /* auth method used to bind dn */ -+ char *o_authtype; /* auth method used to bind dn */ - int o_ssf; /* ssf for this operation (highest between SASL and TLS/SSL) */ -- int o_opid; /* id of this operation */ -+ int o_opid; /* id of this operation */ - PRUint64 o_connid; /* id of conn initiating this op; for logging only */ - void *o_handler_data; - result_handler o_result_handler; -diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h -index 834a98742..8d9c3fa6a 100644 ---- a/ldap/servers/slapd/slapi-plugin.h -+++ b/ldap/servers/slapd/slapi-plugin.h -@@ -8210,13 +8210,29 @@ void slapi_operation_time_elapsed(Slapi_Operation *o, struct timespec *elapsed); - */ - void slapi_operation_time_initiated(Slapi_Operation *o, struct timespec *initiated); - /** -- * Given an operation and a timeout, return a populate struct with the expiry -- * time of the operation suitable for checking with slapi_timespec_expire_check -+ * Given an operation, determine the time elapsed since the op -+ * was actually started. - * -- * \param Slapi_Operation o - the operation that is in progress -- * \param time_t timeout the seconds relative to operation initiation to expiry at. -- * \param struct timespec *expiry the timespec to popluate with the relative expiry. -+ * \param Slapi_Operation o - the operation which is inprogress -+ * \param struct timespec *elapsed - location where the time difference will be -+ * placed. -+ */ -+void slapi_operation_op_time_elapsed(Slapi_Operation *o, struct timespec *elapsed); -+/** -+ * Given an operation, determine the time elapsed that the op spent -+ * in the work queue before actually being dispatched to a worker thread -+ * -+ * \param Slapi_Operation o - the operation which is inprogress -+ * \param struct timespec *elapsed - location where the time difference will be -+ * placed. -+ */ -+void slapi_operation_workq_time_elapsed(Slapi_Operation *o, struct timespec *elapsed); -+/** -+ * Set the time the operation actually started -+ * -+ * \param Slapi_Operation o - the operation which is inprogress - */ -+void slapi_operation_set_time_started(Slapi_Operation *o); - #endif - - /** --- -2.26.2 - diff --git a/SOURCES/0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch b/SOURCES/0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch new file mode 100644 index 0000000..067d06e --- /dev/null +++ b/SOURCES/0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch @@ -0,0 +1,50 @@ +From 1386b140d8cc81d37fdea6593487fe542587ccac Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 9 Dec 2020 09:52:08 -0500 +Subject: [PATCH] Issue 4483 - heap-use-after-free in slapi_be_getsuffix + +Description: heap-use-after-free in slapi_be_getsuffix after disk + monitoring runs. This feature is freeing a list of + backends which it does not need to do. + +Fixes: https://github.com/389ds/389-ds-base/issues/4483 + +Reviewed by: firstyear & tbordaz(Thanks!!) +--- + ldap/servers/slapd/daemon.c | 13 +------------ + 1 file changed, 1 insertion(+), 12 deletions(-) + +diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c +index 49199e4df..691f77570 100644 +--- a/ldap/servers/slapd/daemon.c ++++ b/ldap/servers/slapd/daemon.c +@@ -606,12 +606,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) + now = start; + while ((now - start) < grace_period) { + if (g_get_shutdown()) { +- be_index = 0; +- if (be_list[be_index] != NULL) { +- while ((be = be_list[be_index++])) { +- slapi_be_free(&be); +- } +- } + slapi_ch_array_free(dirs); + dirs = NULL; + return; +@@ -706,12 +700,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) + } + } + } +- be_index = 0; +- if (be_list[be_index] != NULL) { +- while ((be = be_list[be_index++])) { +- slapi_be_free(&be); +- } +- } ++ + slapi_ch_array_free(dirs); + dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */ + g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL); +-- +2.26.2 + diff --git a/SOURCES/0017-Issue-50912-pwdReset-can-be-modified-by-a-user.patch b/SOURCES/0017-Issue-50912-pwdReset-can-be-modified-by-a-user.patch deleted file mode 100644 index 56b0db4..0000000 --- a/SOURCES/0017-Issue-50912-pwdReset-can-be-modified-by-a-user.patch +++ /dev/null @@ -1,31 +0,0 @@ -From ec1714c81290a03ae9aa5fd10acf3e9be71596d7 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Thu, 11 Jun 2020 15:47:43 -0400 -Subject: [PATCH] Issue 50912 - pwdReset can be modified by a user - -Description: The attribute "pwdReset" should only be allowed to be set by the - server. Update schema definition to include NO-USER-MODIFICATION - -relates: https://pagure.io/389-ds-base/issue/50912 - -Reviewed by: mreynolds(one line commit rule) ---- - ldap/schema/02common.ldif | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif -index 966636bef..c6dc074db 100644 ---- a/ldap/schema/02common.ldif -+++ b/ldap/schema/02common.ldif -@@ -76,7 +76,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2349 NAME ( 'passwordDictCheck' 'pwdDict - attributeTypes: ( 2.16.840.1.113730.3.1.2350 NAME ( 'passwordDictPath' 'pwdDictPath' ) DESC '389 Directory Server password policy attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) - attributeTypes: ( 2.16.840.1.113730.3.1.2351 NAME ( 'passwordUserAttributes' 'pwdUserAttributes' ) DESC '389 Directory Server password policy attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) - attributeTypes: ( 2.16.840.1.113730.3.1.2352 NAME ( 'passwordBadWords' 'pwdBadWords' ) DESC '389 Directory Server password policy attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) --attributeTypes: ( 2.16.840.1.113730.3.1.2366 NAME 'pwdReset' DESC '389 Directory Server password policy attribute type' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE USAGE directoryOperation X-ORIGIN '389 Directory Server' ) -+attributeTypes: ( 2.16.840.1.113730.3.1.2366 NAME 'pwdReset' DESC '389 Directory Server password policy attribute type' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-ORIGIN '389 Directory Server' ) - attributeTypes: ( 2.16.840.1.113730.3.1.198 NAME 'memberURL' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 X-ORIGIN 'Netscape Directory Server' ) - attributeTypes: ( 2.16.840.1.113730.3.1.199 NAME 'memberCertificateDescription' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 X-ORIGIN 'Netscape Directory Server' ) - attributeTypes: ( 2.16.840.1.113730.3.1.207 NAME 'vlvBase' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'Netscape Directory Server' ) --- -2.26.2 - diff --git a/SOURCES/0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch b/SOURCES/0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch new file mode 100644 index 0000000..9acd229 --- /dev/null +++ b/SOURCES/0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch @@ -0,0 +1,65 @@ +From 6e827f6d5e64e0be316f4e17111b2884899d302c Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Wed, 16 Dec 2020 16:30:28 +0100 +Subject: [PATCH] Issue 4480 - Unexpected info returned to ldap request (#4491) + +Bug description: + If the bind entry does not exist, the bind result info + reports that 'No such entry'. It should not give any + information if the target entry exists or not + +Fix description: + Does not return any additional information during a bind + +relates: https://github.com/389ds/389-ds-base/issues/4480 + +Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all) + +Platforms tested: F31 +--- + dirsrvtests/tests/suites/basic/basic_test.py | 1 - + ldap/servers/slapd/back-ldbm/ldbm_config.c | 2 +- + ldap/servers/slapd/result.c | 2 +- + 3 files changed, 2 insertions(+), 3 deletions(-) + +diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py +index 120207321..1ae82dcdd 100644 +--- a/dirsrvtests/tests/suites/basic/basic_test.py ++++ b/dirsrvtests/tests/suites/basic/basic_test.py +@@ -1400,7 +1400,6 @@ def test_dscreate_multiple_dashes_name(dscreate_long_instance): + assert not dscreate_long_instance.exists() + + +- + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c +index 3fe86d567..10cef250f 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c +@@ -1234,7 +1234,7 @@ ldbm_config_search_entry_callback(Slapi_PBlock *pb __attribute__((unused)), + if (attrs) { + for (size_t i = 0; attrs[i]; i++) { + if (ldbm_config_moved_attr(attrs[i])) { +- slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry"); ++ slapi_pblock_set(pb, SLAPI_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry"); + break; + } + } +diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c +index 9daf3b151..ab0d79454 100644 +--- a/ldap/servers/slapd/result.c ++++ b/ldap/servers/slapd/result.c +@@ -355,7 +355,7 @@ send_ldap_result_ext( + if (text) { + pbtext = text; + } else { +- slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &pbtext); ++ slapi_pblock_get(pb, SLAPI_RESULT_TEXT, &pbtext); + } + + if (operation == NULL) { +-- +2.26.2 + diff --git a/SOURCES/0018-Issue-50791-Healthcheck-should-look-for-notes-A-F-in.patch b/SOURCES/0018-Issue-50791-Healthcheck-should-look-for-notes-A-F-in.patch deleted file mode 100644 index 5f4f6a3..0000000 --- a/SOURCES/0018-Issue-50791-Healthcheck-should-look-for-notes-A-F-in.patch +++ /dev/null @@ -1,202 +0,0 @@ -From a6a52365df26edd4f6b0028056395d943344d787 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Thu, 11 Jun 2020 15:30:28 -0400 -Subject: [PATCH] Issue 50791 - Healthcheck should look for notes=A/F in access - log - -Description: Add checks for notes=A (fully unindexed search) and - notes=F (Unknown attribute in search filter) in the - current access log. - -relates: https://pagure.io/389-ds-base/issue/50791 - -Reviewed by: firstyear(Thanks!) ---- - src/lib389/lib389/cli_ctl/health.py | 4 +- - src/lib389/lib389/dirsrv_log.py | 72 +++++++++++++++++++++++++++-- - src/lib389/lib389/lint.py | 26 ++++++++++- - 3 files changed, 96 insertions(+), 6 deletions(-) - -diff --git a/src/lib389/lib389/cli_ctl/health.py b/src/lib389/lib389/cli_ctl/health.py -index 6333a753a..89484a11b 100644 ---- a/src/lib389/lib389/cli_ctl/health.py -+++ b/src/lib389/lib389/cli_ctl/health.py -@@ -1,5 +1,5 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2019 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -18,6 +18,7 @@ from lib389.monitor import MonitorDiskSpace - from lib389.replica import Replica, Changelog5 - from lib389.nss_ssl import NssSsl - from lib389.dseldif import FSChecks, DSEldif -+from lib389.dirsrv_log import DirsrvAccessLog - from lib389 import lint - from lib389 import plugins - from lib389._constants import DSRC_HOME -@@ -37,6 +38,7 @@ CHECK_OBJECTS = [ - Changelog5, - DSEldif, - NssSsl, -+ DirsrvAccessLog, - ] - - -diff --git a/src/lib389/lib389/dirsrv_log.py b/src/lib389/lib389/dirsrv_log.py -index baac2a3c9..7bed4bb17 100644 ---- a/src/lib389/lib389/dirsrv_log.py -+++ b/src/lib389/lib389/dirsrv_log.py -@@ -1,5 +1,5 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2016 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -9,12 +9,17 @@ - """Helpers for managing the directory server internal logs. - """ - -+import copy - import re - import gzip - from dateutil.parser import parse as dt_parse - from glob import glob - from lib389.utils import ensure_bytes -- -+from lib389._mapped_object_lint import DSLint -+from lib389.lint import ( -+ DSLOGNOTES0001, # Unindexed search -+ DSLOGNOTES0002, # Unknown attr in search filter -+) - - # Because many of these settings can change live, we need to check for certain - # attributes all the time. -@@ -35,7 +40,7 @@ MONTH_LOOKUP = { - } - - --class DirsrvLog(object): -+class DirsrvLog(DSLint): - """Class of functions to working with the various DIrectory Server logs - """ - def __init__(self, dirsrv): -@@ -189,6 +194,67 @@ class DirsrvAccessLog(DirsrvLog): - self.full_regexs = [self.prog_m1, self.prog_con, self.prog_discon] - self.result_regexs = [self.prog_notes, self.prog_repl, - self.prog_result] -+ @classmethod -+ def lint_uid(cls): -+ return 'logs' -+ -+ def _log_get_search_stats(self, conn, op): -+ lines = self.match(f".* conn={conn} op={op} SRCH base=.*") -+ if len(lines) != 1: -+ return None -+ -+ quoted_vals = re.findall('"([^"]*)"', lines[0]) -+ return { -+ 'base': quoted_vals[0], -+ 'filter': quoted_vals[1], -+ 'timestamp': re.findall('\[(.*)\]', lines[0])[0], -+ 'scope': lines[0].split(' scope=', 1)[1].split(' ',1)[0] -+ } -+ -+ def _lint_notes(self): -+ """ -+ Check for notes=A (fully unindexed searches), and -+ notes=F (unknown attribute in filter) -+ """ -+ for pattern, lint_report in [(".* notes=A", DSLOGNOTES0001), (".* notes=F", DSLOGNOTES0002)]: -+ lines = self.match(pattern) -+ if len(lines) > 0: -+ count = 0 -+ searches = [] -+ for line in lines: -+ if ' RESULT err=' in line: -+ # Looks like a valid notes=A/F -+ conn = line.split(' conn=', 1)[1].split(' ',1)[0] -+ op = line.split(' op=', 1)[1].split(' ',1)[0] -+ etime = line.split(' etime=', 1)[1].split(' ',1)[0] -+ stats = self._log_get_search_stats(conn, op) -+ if stats is not None: -+ timestamp = stats['timestamp'] -+ base = stats['base'] -+ scope = stats['scope'] -+ srch_filter = stats['filter'] -+ count += 1 -+ if lint_report == DSLOGNOTES0001: -+ searches.append(f'\n [{count}] Unindexed Search\n' -+ f' - date: {timestamp}\n' -+ f' - conn/op: {conn}/{op}\n' -+ f' - base: {base}\n' -+ f' - scope: {scope}\n' -+ f' - filter: {srch_filter}\n' -+ f' - etime: {etime}\n') -+ else: -+ searches.append(f'\n [{count}] Invalid Attribute in Filter\n' -+ f' - date: {timestamp}\n' -+ f' - conn/op: {conn}/{op}\n' -+ f' - filter: {srch_filter}\n') -+ if len(searches) > 0: -+ report = copy.deepcopy(lint_report) -+ report['items'].append(self._get_log_path()) -+ report['detail'] = report['detail'].replace('NUMBER', str(count)) -+ for srch in searches: -+ report['detail'] += srch -+ yield report -+ - - def _get_log_path(self): - """Return the current log file location""" -diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py -index a103feec7..4b1700b92 100644 ---- a/src/lib389/lib389/lint.py -+++ b/src/lib389/lib389/lint.py -@@ -1,5 +1,5 @@ - # --- BEGIN COPYRIGHT BLOCK --- --# Copyright (C) 2019 Red Hat, Inc. -+# Copyright (C) 2020 Red Hat, Inc. - # All rights reserved. - # - # License: GPL (version 3 or any later version). -@@ -253,7 +253,7 @@ can use the CLI tool "dsconf" to resolve the conflict. Here is an example: - - Remove conflict entry and keep only the original/counterpart entry: - -- # dsconf slapd-YOUR_INSTANCE repl-conflict remove -+ # dsconf slapd-YOUR_INSTANCE repl-conflict delete - - Replace the original/counterpart entry with the conflict entry: - -@@ -418,3 +418,25 @@ until the time issues have been resolved: - Also look at https://access.redhat.com/documentation/en-us/red_hat_directory_server/11/html/administration_guide/managing_replication-troubleshooting_replication_related_problems - and find the paragraph "Too much time skew".""" - } -+ -+DSLOGNOTES0001 = { -+ 'dsle': 'DSLOGNOTES0001', -+ 'severity': 'Medium', -+ 'description': 'Unindexed Search', -+ 'items': ['Performance'], -+ 'detail': """Found NUMBER fully unindexed searches in the current access log. -+Unindexed searches can cause high CPU and slow down the entire server's performance.\n""", -+ 'fix': """Examine the searches that are unindexed, and either properly index the attributes -+in the filter, increase the nsslapd-idlistscanlimit, or stop using that filter.""" -+} -+ -+DSLOGNOTES0002 = { -+ 'dsle': 'DSLOGNOTES0002', -+ 'severity': 'Medium', -+ 'description': 'Unknown Attribute In Filter', -+ 'items': ['Possible Performance Impact'], -+ 'detail': """Found NUMBER searches in the current access log that are using an -+unknown attribute in the search filter.\n""", -+ 'fix': """Stop using this these unknown attributes in the filter, or add the schema -+to the server and make sure it's properly indexed.""" -+} --- -2.26.2 - diff --git a/SOURCES/0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch b/SOURCES/0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch new file mode 100644 index 0000000..6de8b9e --- /dev/null +++ b/SOURCES/0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch @@ -0,0 +1,108 @@ +From 1fef5649ce05a17a741789cafb65269c099b396b Mon Sep 17 00:00:00 2001 +From: progier389 <72748589+progier389@users.noreply.github.com> +Date: Wed, 16 Dec 2020 16:21:35 +0100 +Subject: [PATCH 2/3] Issue #4504 - Fix pytest test_dsconf_replication_monitor + (#4505) + +(cherry picked from commit 0b08e6f35b000d1383580be59f902ac813e940f2) +--- + .../tests/suites/clu/repl_monitor_test.py | 50 +++++++++++++------ + 1 file changed, 36 insertions(+), 14 deletions(-) + +diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +index b03d170c8..eb18d2da2 100644 +--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py ++++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +@@ -9,6 +9,7 @@ + import time + import subprocess + import pytest ++import re + + from lib389.cli_conf.replication import get_repl_monitor_info + from lib389.tasks import * +@@ -67,6 +68,25 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No + log.info('Reset log file') + f.truncate(0) + ++def get_hostnames_from_log(port1, port2): ++ # Get the supplier host names as displayed in replication monitor output ++ with open(LOG_FILE, 'r') as logfile: ++ logtext = logfile.read() ++ # search for Supplier :hostname:port ++ # and use \D to insure there is no more number is after ++ # the matched port (i.e that 10 is not matching 101) ++ regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)' ++ match=re.search(regexp, logtext) ++ host_m1 = 'localhost.localdomain' ++ if (match is not None): ++ host_m1 = match.group(2) ++ # Same for master 2 ++ regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)' ++ match=re.search(regexp, logtext) ++ host_m2 = 'localhost.localdomain' ++ if (match is not None): ++ host_m2 = match.group(2) ++ return (host_m1, host_m2) + + @pytest.mark.ds50545 + @pytest.mark.bz1739718 +@@ -95,9 +115,6 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): + m1 = topology_m2.ms["master1"] + m2 = topology_m2.ms["master2"] + +- alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')', +- 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')'] +- + connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port) + content_list = ['Replica Root: dc=example,dc=com', + 'Replica ID: 1', +@@ -160,20 +177,9 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): + '001', + m1.host + ':' + str(m1.port)] + +- dsrc_content = '[repl-monitor-connections]\n' \ +- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ +- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ +- '\n' \ +- '[repl-monitor-aliases]\n' \ +- 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \ +- 'M2 = ' + m2.host + ':' + str(m2.port) +- + connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] + +- aliases = ['M1=' + m1.host + ':' + str(m1.port), +- 'M2=' + m2.host + ':' + str(m2.port)] +- + args = FakeArgs() + args.connections = connections + args.aliases = None +@@ -181,8 +187,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): + + log.info('Run replication monitor with connections option') + get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) ++ (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port) + check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) + ++ # Prepare the data for next tests ++ aliases = ['M1=' + host_m1 + ':' + str(m1.port), ++ 'M2=' + host_m2 + ':' + str(m2.port)] ++ ++ alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')', ++ 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')'] ++ ++ dsrc_content = '[repl-monitor-connections]\n' \ ++ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ ++ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ ++ '\n' \ ++ '[repl-monitor-aliases]\n' \ ++ 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \ ++ 'M2 = ' + host_m2 + ':' + str(m2.port) ++ + log.info('Run replication monitor with aliases option') + args.aliases = aliases + get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) +-- +2.26.2 + diff --git a/SOURCES/0019-Issue-51144-dsctl-fails-with-instance-names-that-con.patch b/SOURCES/0019-Issue-51144-dsctl-fails-with-instance-names-that-con.patch deleted file mode 100644 index d2663da..0000000 --- a/SOURCES/0019-Issue-51144-dsctl-fails-with-instance-names-that-con.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 2844d4ad90cbbd23ae75309e50ae4d7145586bb7 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Wed, 10 Jun 2020 14:07:24 -0400 -Subject: [PATCH] Issue 51144 - dsctl fails with instance names that contain - slapd- - -Bug Description: If an instance name contains 'slapd-' the CLI breaks: - - slapd-test-slapd - -Fix Description: Only strip off "slapd-" from the front of the instance - name. - -relates: https://pagure.io/389-ds-base/issue/51144 - -Reviewed by: firstyear(Thanks!) ---- - src/lib389/lib389/__init__.py | 2 +- - src/lib389/lib389/dseldif.py | 3 ++- - 2 files changed, 3 insertions(+), 2 deletions(-) - -diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py -index 0ff1ab173..63d44b60a 100644 ---- a/src/lib389/lib389/__init__.py -+++ b/src/lib389/lib389/__init__.py -@@ -710,7 +710,7 @@ class DirSrv(SimpleLDAPObject, object): - # Don't need a default value now since it's set in init. - if serverid is None and hasattr(self, 'serverid'): - serverid = self.serverid -- elif serverid is not None: -+ elif serverid is not None and serverid.startswith('slapd-'): - serverid = serverid.replace('slapd-', '', 1) - - if self.serverid is None: -diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py -index 96c9af9d1..f2725add9 100644 ---- a/src/lib389/lib389/dseldif.py -+++ b/src/lib389/lib389/dseldif.py -@@ -40,7 +40,8 @@ class DSEldif(DSLint): - if serverid: - # Get the dse.ldif from the instance name - prefix = os.environ.get('PREFIX', ""), -- serverid = serverid.replace("slapd-", "") -+ if serverid.startswith("slapd-"): -+ serverid = serverid.replace("slapd-", "", 1) - self.path = "{}/etc/dirsrv/slapd-{}/dse.ldif".format(prefix[0], serverid) - else: - ds_paths = Paths(self._instance.serverid, self._instance) --- -2.26.2 - diff --git a/SOURCES/0020-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch b/SOURCES/0020-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch new file mode 100644 index 0000000..6906b5c --- /dev/null +++ b/SOURCES/0020-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch @@ -0,0 +1,374 @@ +From d7b49259ff2f9e0295bbfeaf128369ed33421974 Mon Sep 17 00:00:00 2001 +From: James Chapman +Date: Mon, 30 Nov 2020 15:28:05 +0000 +Subject: [PATCH 1/6] Issue 4418 - ldif2db - offline. Warn the user of skipped + entries + +Bug Description: During an ldif2db import entries that do not +conform to various constraints will be skipped and not imported. +On completition of an import with skipped entries, the server +returns a success exit code and logs the skipped entry detail to +the error logs. The success exit code could lead the user to +believe that all entries were successfully imported. + +Fix Description: If a skipped entry occurs during import, the +import will continue and a warning will be returned to the user. + +CLI tools for offline import updated to handle warning code. + +Test added to generate an incorrect ldif entry and perform an +import. + +Fixes: #4418 + +Reviewed by: Firstyear, droideck (Thanks) + +(cherry picked from commit a98fe54292e9b183a2163efbc7bdfe208d4abfb0) +--- + .../tests/suites/import/import_test.py | 54 ++++++++++++++++++- + .../slapd/back-ldbm/db-bdb/bdb_import.c | 22 ++++++-- + ldap/servers/slapd/main.c | 8 +++ + ldap/servers/slapd/pblock.c | 24 +++++++++ + ldap/servers/slapd/pblock_v3.h | 1 + + ldap/servers/slapd/slapi-private.h | 14 +++++ + src/lib389/lib389/__init__.py | 18 +++---- + src/lib389/lib389/_constants.py | 7 +++ + src/lib389/lib389/cli_ctl/dbtasks.py | 8 ++- + 9 files changed, 140 insertions(+), 16 deletions(-) + +diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py +index 3803ecf43..b47db96ed 100644 +--- a/dirsrvtests/tests/suites/import/import_test.py ++++ b/dirsrvtests/tests/suites/import/import_test.py +@@ -15,7 +15,7 @@ import pytest + import time + import glob + from lib389.topologies import topology_st as topo +-from lib389._constants import DEFAULT_SUFFIX ++from lib389._constants import DEFAULT_SUFFIX, TaskWarning + from lib389.dbgen import dbgen_users + from lib389.tasks import ImportTask + from lib389.index import Indexes +@@ -139,6 +139,38 @@ def _create_bogus_ldif(topo): + return import_ldif1 + + ++def _create_syntax_err_ldif(topo): ++ """ ++ Create an incorrect ldif entry that violates syntax check ++ """ ++ ldif_dir = topo.standalone.get_ldif_dir() ++ line1 = """dn: dc=example,dc=com ++objectClass: top ++objectClass: domain ++dc: example ++dn: ou=groups,dc=example,dc=com ++objectClass: top ++objectClass: organizationalUnit ++ou: groups ++dn: uid=JHunt,ou=groups,dc=example,dc=com ++objectClass: top ++objectClass: person ++objectClass: organizationalPerson ++objectClass: inetOrgPerson ++objectclass: inetUser ++cn: James Hunt ++sn: Hunt ++uid: JHunt ++givenName: ++""" ++ with open(f'{ldif_dir}/syntax_err.ldif', 'w') as out: ++ out.write(f'{line1}') ++ os.chmod(out.name, 0o777) ++ out.close() ++ import_ldif1 = ldif_dir + '/syntax_err.ldif' ++ return import_ldif1 ++ ++ + def test_import_with_index(topo, _import_clean): + """ + Add an index, then import via cn=tasks +@@ -214,6 +246,26 @@ def test_ldif2db_allows_entries_without_a_parent_to_be_imported(topo, _import_cl + topo.standalone.start() + + ++def test_ldif2db_syntax_check(topo): ++ """ldif2db should return a warning when a skipped entry has occured. ++ :id: 85e75670-42c5-4062-9edc-7f117c97a06f ++ :setup: ++ 1. Standalone Instance ++ 2. Ldif entry that violates syntax check rule (empty givenname) ++ :steps: ++ 1. Create an ldif file which violates the syntax checking rule ++ 2. Stop the server and import ldif file with ldif2db ++ :expected results: ++ 1. ldif2db import returns a warning to signify skipped entries ++ """ ++ import_ldif1 = _create_syntax_err_ldif(topo) ++ # Import the offending LDIF data - offline ++ topo.standalone.stop() ++ ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1) ++ assert ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY ++ topo.standalone.start() ++ ++ + def test_issue_a_warning_if_the_cache_size_is_smaller(topo, _import_clean): + """Report during startup if nsslapd-cachememsize is too small + +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c +index e7da0517f..1e4830e99 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c +@@ -2563,7 +2563,7 @@ error: + slapi_task_dec_refcount(job->task); + } + import_all_done(job, ret); +- ret = 1; ++ ret |= WARN_UPGARDE_DN_FORMAT_ALL; + } else if (NEED_DN_NORM == ret) { + import_log_notice(job, SLAPI_LOG_NOTICE, "bdb_import_main", + "%s complete. %s needs upgradednformat.", +@@ -2572,7 +2572,7 @@ error: + slapi_task_dec_refcount(job->task); + } + import_all_done(job, ret); +- ret = 2; ++ ret |= WARN_UPGRADE_DN_FORMAT; + } else if (NEED_DN_NORM_SP == ret) { + import_log_notice(job, SLAPI_LOG_NOTICE, "bdb_import_main", + "%s complete. %s needs upgradednformat spaces.", +@@ -2581,7 +2581,7 @@ error: + slapi_task_dec_refcount(job->task); + } + import_all_done(job, ret); +- ret = 3; ++ ret |= WARN_UPGRADE_DN_FORMAT_SPACE; + } else { + ret = -1; + if (job->task != NULL) { +@@ -2600,6 +2600,11 @@ error: + import_all_done(job, ret); + } + ++ /* set task warning if there are no errors */ ++ if((!ret) && (job->skipped)) { ++ ret |= WARN_SKIPPED_IMPORT_ENTRY; ++ } ++ + /* This instance isn't busy anymore */ + instance_set_not_busy(job->inst); + +@@ -2637,6 +2642,7 @@ bdb_back_ldif2db(Slapi_PBlock *pb) + int total_files, i; + int up_flags = 0; + PRThread *thread = NULL; ++ int ret = 0; + + slapi_pblock_get(pb, SLAPI_BACKEND, &be); + if (be == NULL) { +@@ -2764,7 +2770,15 @@ bdb_back_ldif2db(Slapi_PBlock *pb) + } + + /* old style -- do it all synchronously (THIS IS GOING AWAY SOON) */ +- return import_main_offline((void *)job); ++ ret = import_main_offline((void *)job); ++ ++ /* no error just warning, reset ret */ ++ if(ret &= WARN_SKIPPED_IMPORT_ENTRY) { ++ slapi_pblock_set_task_warning(pb, WARN_SKIPPED_IMPORT_ENTRY); ++ ret = 0; ++ } ++ ++ return ret; + } + + struct _import_merge_thang +diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c +index 694375b22..104f6826c 100644 +--- a/ldap/servers/slapd/main.c ++++ b/ldap/servers/slapd/main.c +@@ -2069,6 +2069,14 @@ slapd_exemode_ldif2db(struct main_config *mcfg) + plugin->plg_name); + return_value = -1; + } ++ ++ /* check for task warnings */ ++ if(!return_value) { ++ if((return_value = slapi_pblock_get_task_warning(pb))) { ++ slapi_log_err(SLAPI_LOG_INFO, "slapd_exemode_ldif2db","returning task warning: %d\n", return_value); ++ } ++ } ++ + slapi_pblock_destroy(pb); + charray_free(instances); + charray_free(mcfg->cmd_line_instance_names); +diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c +index 454ea9cc3..1ad9d0399 100644 +--- a/ldap/servers/slapd/pblock.c ++++ b/ldap/servers/slapd/pblock.c +@@ -28,12 +28,14 @@ + #define SLAPI_LDIF_DUMP_REPLICA 2003 + #define SLAPI_PWDPOLICY 2004 + #define SLAPI_PW_ENTRY 2005 ++#define SLAPI_TASK_WARNING 2006 + + /* Used for checking assertions about pblocks in some cases. */ + #define SLAPI_HINT 9999 + + static PRLock *pblock_analytics_lock = NULL; + ++ + static PLHashNumber + hash_int_func(const void *key) + { +@@ -4315,6 +4317,28 @@ slapi_pblock_set_ldif_dump_replica(Slapi_PBlock *pb, int32_t dump_replica) + pb->pb_task->ldif_dump_replica = dump_replica; + } + ++int32_t ++slapi_pblock_get_task_warning(Slapi_PBlock *pb) ++{ ++#ifdef PBLOCK_ANALYTICS ++ pblock_analytics_record(pb, SLAPI_TASK_WARNING); ++#endif ++ if (pb->pb_task != NULL) { ++ return pb->pb_task->task_warning; ++ } ++ return 0; ++} ++ ++void ++slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warning) ++{ ++#ifdef PBLOCK_ANALYTICS ++ pblock_analytics_record(pb, SLAPI_TASK_WARNING); ++#endif ++ _pblock_assert_pb_task(pb); ++ pb->pb_task->task_warning = warning; ++} ++ + void * + slapi_pblock_get_vattr_context(Slapi_PBlock *pb) + { +diff --git a/ldap/servers/slapd/pblock_v3.h b/ldap/servers/slapd/pblock_v3.h +index 90498c0b0..b35d78565 100644 +--- a/ldap/servers/slapd/pblock_v3.h ++++ b/ldap/servers/slapd/pblock_v3.h +@@ -67,6 +67,7 @@ typedef struct _slapi_pblock_task + int ldif2db_noattrindexes; + int ldif_printkey; + int task_flags; ++ int32_t task_warning; + int import_state; + + int server_running; /* indicate that server is running */ +diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h +index c98c1947c..31cb33472 100644 +--- a/ldap/servers/slapd/slapi-private.h ++++ b/ldap/servers/slapd/slapi-private.h +@@ -1465,6 +1465,20 @@ void slapi_pblock_set_operation_notes(Slapi_PBlock *pb, uint32_t opnotes); + void slapi_pblock_set_flag_operation_notes(Slapi_PBlock *pb, uint32_t opflag); + void slapi_pblock_set_result_text_if_empty(Slapi_PBlock *pb, char *text); + ++/* task warnings */ ++typedef enum task_warning_t{ ++ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0), ++ WARN_UPGRADE_DN_FORMAT = (1 << 1), ++ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2), ++ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) ++} task_warning; ++ ++int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); ++void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); ++ ++ ++int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name); ++ + #ifdef __cplusplus + } + #endif +diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py +index 4e6a1905a..5b36a79e1 100644 +--- a/src/lib389/lib389/__init__.py ++++ b/src/lib389/lib389/__init__.py +@@ -2683,7 +2683,7 @@ class DirSrv(SimpleLDAPObject, object): + # server is stopped) + # + def ldif2db(self, bename, suffixes, excludeSuffixes, encrypt, +- import_file): ++ import_file, import_cl): + """ + @param bename - The backend name of the database to import + @param suffixes - List/tuple of suffixes to import +@@ -2731,14 +2731,14 @@ class DirSrv(SimpleLDAPObject, object): + try: + result = subprocess.check_output(cmd, encoding='utf-8') + except subprocess.CalledProcessError as e: +- self.log.debug("Command: %s failed with the return code %s and the error %s", +- format_cmd_list(cmd), e.returncode, e.output) +- return False +- +- self.log.debug("ldif2db output: BEGIN") +- for line in result.split("\n"): +- self.log.debug(line) +- self.log.debug("ldif2db output: END") ++ if e.returncode == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY: ++ self.log.debug("Command: %s skipped import entry warning %s", ++ format_cmd_list(cmd), e.returncode) ++ return e.returncode ++ else: ++ self.log.debug("Command: %s failed with the return code %s and the error %s", ++ format_cmd_list(cmd), e.returncode, e.output) ++ return False + + return True + +diff --git a/src/lib389/lib389/_constants.py b/src/lib389/lib389/_constants.py +index e28c602a3..38ba04565 100644 +--- a/src/lib389/lib389/_constants.py ++++ b/src/lib389/lib389/_constants.py +@@ -162,6 +162,13 @@ DB2BAK = 'db2bak' + DB2INDEX = 'db2index' + DBSCAN = 'dbscan' + ++# Task warnings ++class TaskWarning(IntEnum): ++ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0) ++ WARN_UPGRADE_DN_FORMAT = (1 << 1) ++ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2) ++ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) ++ + RDN_REPLICA = "cn=replica" + + RETROCL_SUFFIX = "cn=changelog" +diff --git a/src/lib389/lib389/cli_ctl/dbtasks.py b/src/lib389/lib389/cli_ctl/dbtasks.py +index 590a1ea0e..02830239c 100644 +--- a/src/lib389/lib389/cli_ctl/dbtasks.py ++++ b/src/lib389/lib389/cli_ctl/dbtasks.py +@@ -7,6 +7,7 @@ + # See LICENSE for details. + # --- END COPYRIGHT BLOCK --- + ++from lib389._constants import TaskWarning + + def dbtasks_db2index(inst, log, args): + if not inst.db2index(bename=args.backend): +@@ -44,10 +45,13 @@ def dbtasks_db2ldif(inst, log, args): + + + def dbtasks_ldif2db(inst, log, args): +- if not inst.ldif2db(bename=args.backend, encrypt=args.encrypted, import_file=args.ldif, +- suffixes=None, excludeSuffixes=None): ++ ret = inst.ldif2db(bename=args.backend, encrypt=args.encrypted, import_file=args.ldif, ++ suffixes=None, excludeSuffixes=None, import_cl=False) ++ if not ret: + log.fatal("ldif2db failed") + return False ++ elif ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY: ++ log.warn("ldif2db successful with skipped entries") + else: + log.info("ldif2db successful") + +-- +2.26.2 + diff --git a/SOURCES/0020-Ticket-49859-A-distinguished-value-can-be-missing-in.patch b/SOURCES/0020-Ticket-49859-A-distinguished-value-can-be-missing-in.patch deleted file mode 100644 index 8d25933..0000000 --- a/SOURCES/0020-Ticket-49859-A-distinguished-value-can-be-missing-in.patch +++ /dev/null @@ -1,520 +0,0 @@ -From 6cd4b1c60dbd3d7b74adb19a2434585d50553f39 Mon Sep 17 00:00:00 2001 -From: Thierry Bordaz -Date: Fri, 5 Jun 2020 12:14:51 +0200 -Subject: [PATCH] Ticket 49859 - A distinguished value can be missing in an - entry - -Bug description: - According to RFC 4511 (see ticket), the values of the RDN attributes - should be present in an entry. - With a set of replicated operations, it is possible that those values - would be missing - -Fix description: - MOD and MODRDN update checks that the RDN values are presents. - If they are missing they are added to the resulting entry. In addition - the set of modifications to add those values are also indexed. - The specific case of single-valued attributes, where the final and unique value - can not be the RDN value, the attribute nsds5ReplConflict is added. - -https://pagure.io/389-ds-base/issue/49859 - -Reviewed by: Mark Reynolds, William Brown - -Platforms tested: F31 ---- - .../replication/conflict_resolve_test.py | 174 +++++++++++++++++- - ldap/servers/slapd/back-ldbm/ldbm_modify.c | 136 ++++++++++++++ - ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 37 +++- - .../servers/slapd/back-ldbm/proto-back-ldbm.h | 1 + - 4 files changed, 343 insertions(+), 5 deletions(-) - -diff --git a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py -index 99a072935..48d0067db 100644 ---- a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py -+++ b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py -@@ -10,10 +10,11 @@ import time - import logging - import ldap - import pytest -+import re - from itertools import permutations - from lib389._constants import * - from lib389.idm.nscontainer import nsContainers --from lib389.idm.user import UserAccounts -+from lib389.idm.user import UserAccounts, UserAccount - from lib389.idm.group import Groups - from lib389.idm.organizationalunit import OrganizationalUnits - from lib389.replica import ReplicationManager -@@ -763,6 +764,177 @@ class TestTwoMasters: - user_dns_m2 = [user.dn for user in test_users_m2.list()] - assert set(user_dns_m1) == set(user_dns_m2) - -+ def test_conflict_attribute_multi_valued(self, topology_m2, base_m2): -+ """A RDN attribute being multi-valued, checks that after several operations -+ MODRDN and MOD_REPL its RDN values are the same on both servers -+ -+ :id: 225b3522-8ed7-4256-96f9-5fab9b7044a5 -+ :setup: Two master replication, -+ audit log, error log for replica and access log for internal -+ :steps: -+ 1. Create a test entry uid=user_test_1000,... -+ 2. Pause all replication agreements -+ 3. On M1 rename it into uid=foo1,... -+ 4. On M2 rename it into uid=foo2,... -+ 5. On M1 MOD_REPL uid:foo1 -+ 6. Resume all replication agreements -+ 7. Check that entry on M1 has uid=foo1, foo2 -+ 8. Check that entry on M2 has uid=foo1, foo2 -+ 9. Check that entry on M1 and M2 has the same uid values -+ :expectedresults: -+ 1. It should pass -+ 2. It should pass -+ 3. It should pass -+ 4. It should pass -+ 5. It should pass -+ 6. It should pass -+ 7. It should pass -+ 8. It should pass -+ 9. It should pass -+ """ -+ -+ M1 = topology_m2.ms["master1"] -+ M2 = topology_m2.ms["master2"] -+ -+ # add a test user -+ test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) -+ user_1 = test_users_m1.create_test_user(uid=1000) -+ test_users_m2 = UserAccount(M2, user_1.dn) -+ # Waiting fo the user to be replicated -+ for i in range(0,4): -+ time.sleep(1) -+ if test_users_m2.exists(): -+ break -+ assert(test_users_m2.exists()) -+ -+ # Stop replication agreements -+ topology_m2.pause_all_replicas() -+ -+ # On M1 rename test entry in uid=foo1 -+ original_dn = user_1.dn -+ user_1.rename('uid=foo1') -+ time.sleep(1) -+ -+ # On M2 rename test entry in uid=foo2 -+ M2.rename_s(original_dn, 'uid=foo2') -+ time.sleep(2) -+ -+ # on M1 MOD_REPL uid into foo1 -+ user_1.replace('uid', 'foo1') -+ -+ # resume replication agreements -+ topology_m2.resume_all_replicas() -+ time.sleep(5) -+ -+ # check that on M1, the entry 'uid' has two values 'foo1' and 'foo2' -+ final_dn = re.sub('^.*1000,', 'uid=foo2,', original_dn) -+ final_user_m1 = UserAccount(M1, final_dn) -+ for val in final_user_m1.get_attr_vals_utf8('uid'): -+ log.info("Check %s is on M1" % val) -+ assert(val in ['foo1', 'foo2']) -+ -+ # check that on M2, the entry 'uid' has two values 'foo1' and 'foo2' -+ final_user_m2 = UserAccount(M2, final_dn) -+ for val in final_user_m2.get_attr_vals_utf8('uid'): -+ log.info("Check %s is on M1" % val) -+ assert(val in ['foo1', 'foo2']) -+ -+ # check that the entry have the same uid values -+ for val in final_user_m1.get_attr_vals_utf8('uid'): -+ log.info("Check M1.uid %s is also on M2" % val) -+ assert(val in final_user_m2.get_attr_vals_utf8('uid')) -+ -+ for val in final_user_m2.get_attr_vals_utf8('uid'): -+ log.info("Check M2.uid %s is also on M1" % val) -+ assert(val in final_user_m1.get_attr_vals_utf8('uid')) -+ -+ def test_conflict_attribute_single_valued(self, topology_m2, base_m2): -+ """A RDN attribute being signle-valued, checks that after several operations -+ MODRDN and MOD_REPL its RDN values are the same on both servers -+ -+ :id: c38ae613-5d1e-47cf-b051-c7284e64b817 -+ :setup: Two master replication, test container for entries, enable plugin logging, -+ audit log, error log for replica and access log for internal -+ :steps: -+ 1. Create a test entry uid=user_test_1000,... -+ 2. Pause all replication agreements -+ 3. On M1 rename it into employeenumber=foo1,... -+ 4. On M2 rename it into employeenumber=foo2,... -+ 5. On M1 MOD_REPL employeenumber:foo1 -+ 6. Resume all replication agreements -+ 7. Check that entry on M1 has employeenumber=foo1 -+ 8. Check that entry on M2 has employeenumber=foo1 -+ 9. Check that entry on M1 and M2 has the same employeenumber values -+ :expectedresults: -+ 1. It should pass -+ 2. It should pass -+ 3. It should pass -+ 4. It should pass -+ 5. It should pass -+ 6. It should pass -+ 7. It should pass -+ 8. It should pass -+ 9. It should pass -+ """ -+ -+ M1 = topology_m2.ms["master1"] -+ M2 = topology_m2.ms["master2"] -+ -+ # add a test user with a dummy 'uid' extra value because modrdn removes -+ # uid that conflict with 'account' objectclass -+ test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) -+ user_1 = test_users_m1.create_test_user(uid=1000) -+ user_1.add('objectclass', 'extensibleobject') -+ user_1.add('uid', 'dummy') -+ test_users_m2 = UserAccount(M2, user_1.dn) -+ -+ # Waiting fo the user to be replicated -+ for i in range(0,4): -+ time.sleep(1) -+ if test_users_m2.exists(): -+ break -+ assert(test_users_m2.exists()) -+ -+ # Stop replication agreements -+ topology_m2.pause_all_replicas() -+ -+ # On M1 rename test entry in employeenumber=foo1 -+ original_dn = user_1.dn -+ user_1.rename('employeenumber=foo1') -+ time.sleep(1) -+ -+ # On M2 rename test entry in employeenumber=foo2 -+ M2.rename_s(original_dn, 'employeenumber=foo2') -+ time.sleep(2) -+ -+ # on M1 MOD_REPL uid into foo1 -+ user_1.replace('employeenumber', 'foo1') -+ -+ # resume replication agreements -+ topology_m2.resume_all_replicas() -+ time.sleep(5) -+ -+ # check that on M1, the entry 'employeenumber' has value 'foo1' -+ final_dn = re.sub('^.*1000,', 'employeenumber=foo2,', original_dn) -+ final_user_m1 = UserAccount(M1, final_dn) -+ for val in final_user_m1.get_attr_vals_utf8('employeenumber'): -+ log.info("Check %s is on M1" % val) -+ assert(val in ['foo1']) -+ -+ # check that on M2, the entry 'employeenumber' has values 'foo1' -+ final_user_m2 = UserAccount(M2, final_dn) -+ for val in final_user_m2.get_attr_vals_utf8('employeenumber'): -+ log.info("Check %s is on M2" % val) -+ assert(val in ['foo1']) -+ -+ # check that the entry have the same uid values -+ for val in final_user_m1.get_attr_vals_utf8('employeenumber'): -+ log.info("Check M1.uid %s is also on M2" % val) -+ assert(val in final_user_m2.get_attr_vals_utf8('employeenumber')) -+ -+ for val in final_user_m2.get_attr_vals_utf8('employeenumber'): -+ log.info("Check M2.uid %s is also on M1" % val) -+ assert(val in final_user_m1.get_attr_vals_utf8('employeenumber')) - - class TestThreeMasters: - def test_nested_entries(self, topology_m3, base_m3): -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c -index e9d7e87e3..a507f3c31 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c -@@ -213,6 +213,112 @@ error: - return retval; - } - -+int32_t -+entry_get_rdn_mods(Slapi_PBlock *pb, Slapi_Entry *entry, CSN *csn, int repl_op, Slapi_Mods **smods_ret) -+{ -+ unsigned long op_type = SLAPI_OPERATION_NONE; -+ char *new_rdn = NULL; -+ char **dns = NULL; -+ char **rdns = NULL; -+ Slapi_Mods *smods = NULL; -+ char *type = NULL; -+ struct berval *bvp[2] = {0}; -+ struct berval bv; -+ Slapi_Attr *attr = NULL; -+ const char *entry_dn = NULL; -+ -+ *smods_ret = NULL; -+ entry_dn = slapi_entry_get_dn_const(entry); -+ /* Do not bother to check that RDN is present, no one rename RUV or change its nsuniqueid */ -+ if (strcasestr(entry_dn, RUV_STORAGE_ENTRY_UNIQUEID)) { -+ return 0; -+ } -+ -+ /* First get the RDNs of the operation */ -+ slapi_pblock_get(pb, SLAPI_OPERATION_TYPE, &op_type); -+ switch (op_type) { -+ case SLAPI_OPERATION_MODIFY: -+ dns = slapi_ldap_explode_dn(entry_dn, 0); -+ if (dns == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "entry_get_rdn_mods", -+ "Fails to split DN \"%s\" into components\n", entry_dn); -+ return -1; -+ } -+ rdns = slapi_ldap_explode_rdn(dns[0], 0); -+ slapi_ldap_value_free(dns); -+ -+ break; -+ case SLAPI_OPERATION_MODRDN: -+ slapi_pblock_get(pb, SLAPI_MODRDN_NEWRDN, &new_rdn); -+ rdns = slapi_ldap_explode_rdn(new_rdn, 0); -+ break; -+ default: -+ break; -+ } -+ if (rdns == NULL || rdns[0] == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "entry_get_rdn_mods", -+ "Fails to split RDN \"%s\" into components\n", slapi_entry_get_dn_const(entry)); -+ return -1; -+ } -+ -+ /* Update the entry to add RDNs values if they are missing */ -+ smods = slapi_mods_new(); -+ -+ bvp[0] = &bv; -+ bvp[1] = NULL; -+ for (size_t rdns_count = 0; rdns[rdns_count]; rdns_count++) { -+ Slapi_Value *value; -+ attr = NULL; -+ slapi_rdn2typeval(rdns[rdns_count], &type, &bv); -+ -+ /* Check if the RDN value exists */ -+ if ((slapi_entry_attr_find(entry, type, &attr) != 0) || -+ (slapi_attr_value_find(attr, &bv))) { -+ const CSN *csn_rdn_add; -+ const CSN *adcsn = attr_get_deletion_csn(attr); -+ -+ /* It is missing => adds it */ -+ if (slapi_attr_flag_is_set(attr, SLAPI_ATTR_FLAG_SINGLE)) { -+ if (csn_compare(adcsn, csn) >= 0) { -+ /* this is a single valued attribute and the current value -+ * (that is different from RDN value) is more recent than -+ * the RDN value we want to apply. -+ * Keep the current value and add a conflict flag -+ */ -+ -+ type = ATTR_NSDS5_REPLCONFLICT; -+ bv.bv_val = "RDN value may be missing because it is single-valued"; -+ bv.bv_len = strlen(bv.bv_val); -+ slapi_entry_add_string(entry, type, bv.bv_val); -+ slapi_mods_add_modbvps(smods, LDAP_MOD_ADD, type, bvp); -+ continue; -+ } -+ } -+ /* if a RDN value needs to be forced, make sure it csn is ahead */ -+ slapi_mods_add_modbvps(smods, LDAP_MOD_ADD, type, bvp); -+ csn_rdn_add = csn_max(adcsn, csn); -+ -+ if (entry_apply_mods_wsi(entry, smods, csn_rdn_add, repl_op)) { -+ slapi_log_err(SLAPI_LOG_ERR, "entry_get_rdn_mods", -+ "Fails to set \"%s\" in \"%s\"\n", type, slapi_entry_get_dn_const(entry)); -+ slapi_ldap_value_free(rdns); -+ slapi_mods_free(&smods); -+ return -1; -+ } -+ /* Make the RDN value a distinguished value */ -+ attr_value_find_wsi(attr, &bv, &value); -+ value_update_csn(value, CSN_TYPE_VALUE_DISTINGUISHED, csn_rdn_add); -+ } -+ } -+ slapi_ldap_value_free(rdns); -+ if (smods->num_mods == 0) { -+ /* smods_ret already NULL, just free the useless smods */ -+ slapi_mods_free(&smods); -+ } else { -+ *smods_ret = smods; -+ } -+ return 0; -+} - /** - Apply the mods to the ec entry. Check for syntax, schema problems. - Check for abandon. -@@ -269,6 +375,8 @@ modify_apply_check_expand( - goto done; - } - -+ -+ - /* - * If the objectClass attribute type was modified in any way, expand - * the objectClass values to reflect the inheritance hierarchy. -@@ -414,6 +522,7 @@ ldbm_back_modify(Slapi_PBlock *pb) - int result_sent = 0; - int32_t parent_op = 0; - struct timespec parent_time; -+ Slapi_Mods *smods_add_rdn = NULL; - - slapi_pblock_get(pb, SLAPI_BACKEND, &be); - slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li); -@@ -731,6 +840,15 @@ ldbm_back_modify(Slapi_PBlock *pb) - } - } /* else if new_mod_count == mod_count then betxnpremod plugin did nothing */ - -+ /* time to check if applying a replicated operation removed -+ * the RDN value from the entry. Assuming that only replicated update -+ * can lead to that bad result -+ */ -+ if (entry_get_rdn_mods(pb, ec->ep_entry, opcsn, repl_op, &smods_add_rdn)) { -+ goto error_return; -+ } -+ -+ - /* - * Update the ID to Entry index. - * Note that id2entry_add replaces the entry, so the Entry ID -@@ -764,6 +882,23 @@ ldbm_back_modify(Slapi_PBlock *pb) - MOD_SET_ERROR(ldap_result_code, LDAP_OPERATIONS_ERROR, retry_count); - goto error_return; - } -+ -+ if (smods_add_rdn && slapi_mods_get_num_mods(smods_add_rdn) > 0) { -+ retval = index_add_mods(be, (LDAPMod **) slapi_mods_get_ldapmods_byref(smods_add_rdn), e, ec, &txn); -+ if (DB_LOCK_DEADLOCK == retval) { -+ /* Abort and re-try */ -+ slapi_mods_free(&smods_add_rdn); -+ continue; -+ } -+ if (retval != 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_modify", -+ "index_add_mods (rdn) failed, err=%d %s\n", -+ retval, (msg = dblayer_strerror(retval)) ? msg : ""); -+ MOD_SET_ERROR(ldap_result_code, LDAP_OPERATIONS_ERROR, retry_count); -+ slapi_mods_free(&smods_add_rdn); -+ goto error_return; -+ } -+ } - /* - * Remove the old entry from the Virtual List View indexes. - * Add the new entry to the Virtual List View indexes. -@@ -978,6 +1113,7 @@ error_return: - - common_return: - slapi_mods_done(&smods); -+ slapi_mods_free(&smods_add_rdn); - - if (inst) { - if (ec_locked || cache_is_in_cache(&inst->inst_cache, ec)) { -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c -index fde83c99f..e97b7a5f6 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c -@@ -21,7 +21,7 @@ static void moddn_unlock_and_return_entry(backend *be, struct backentry **target - static int moddn_newrdn_mods(Slapi_PBlock *pb, const char *olddn, struct backentry *ec, Slapi_Mods *smods_wsi, int is_repl_op); - static IDList *moddn_get_children(back_txn *ptxn, Slapi_PBlock *pb, backend *be, struct backentry *parententry, Slapi_DN *parentdn, struct backentry ***child_entries, struct backdn ***child_dns, int is_resurect_operation); - static int moddn_rename_children(back_txn *ptxn, Slapi_PBlock *pb, backend *be, IDList *children, Slapi_DN *dn_parentdn, Slapi_DN *dn_newsuperiordn, struct backentry *child_entries[]); --static int modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li, struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3); -+static int modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li, struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3, Slapi_Mods *smods4); - static void mods_remove_nsuniqueid(Slapi_Mods *smods); - - #define MOD_SET_ERROR(rc, error, count) \ -@@ -100,6 +100,7 @@ ldbm_back_modrdn(Slapi_PBlock *pb) - Connection *pb_conn = NULL; - int32_t parent_op = 0; - struct timespec parent_time; -+ Slapi_Mods *smods_add_rdn = NULL; - - if (slapi_pblock_get(pb, SLAPI_CONN_ID, &conn_id) < 0) { - conn_id = 0; /* connection is NULL */ -@@ -842,6 +843,15 @@ ldbm_back_modrdn(Slapi_PBlock *pb) - goto error_return; - } - } -+ -+ /* time to check if applying a replicated operation removed -+ * the RDN value from the entry. Assuming that only replicated update -+ * can lead to that bad result -+ */ -+ if (entry_get_rdn_mods(pb, ec->ep_entry, opcsn, is_replicated_operation, &smods_add_rdn)) { -+ goto error_return; -+ } -+ - /* check that the entry still obeys the schema */ - if (slapi_entry_schema_check(pb, ec->ep_entry) != 0) { - ldap_result_code = LDAP_OBJECT_CLASS_VIOLATION; -@@ -1003,7 +1013,7 @@ ldbm_back_modrdn(Slapi_PBlock *pb) - /* - * Update the indexes for the entry. - */ -- retval = modrdn_rename_entry_update_indexes(&txn, pb, li, e, &ec, &smods_generated, &smods_generated_wsi, &smods_operation_wsi); -+ retval = modrdn_rename_entry_update_indexes(&txn, pb, li, e, &ec, &smods_generated, &smods_generated_wsi, &smods_operation_wsi, smods_add_rdn); - if (DB_LOCK_DEADLOCK == retval) { - /* Retry txn */ - continue; -@@ -1497,6 +1507,7 @@ common_return: - slapi_mods_done(&smods_operation_wsi); - slapi_mods_done(&smods_generated); - slapi_mods_done(&smods_generated_wsi); -+ slapi_mods_free(&smods_add_rdn); - slapi_ch_free((void **)&child_entries); - slapi_ch_free((void **)&child_dns); - if (ldap_result_matcheddn && 0 != strcmp(ldap_result_matcheddn, "NULL")) -@@ -1778,7 +1789,7 @@ mods_remove_nsuniqueid(Slapi_Mods *smods) - * mods contains the list of attribute change made. - */ - static int --modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li __attribute__((unused)), struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3) -+modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li __attribute__((unused)), struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3, Slapi_Mods *smods4) - { - backend *be; - ldbm_instance *inst; -@@ -1874,6 +1885,24 @@ modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbm - goto error_return; - } - } -+ if (smods4 != NULL && slapi_mods_get_num_mods(smods4) > 0) { -+ /* -+ * update the indexes: lastmod, rdn, etc. -+ */ -+ retval = index_add_mods(be, slapi_mods_get_ldapmods_byref(smods4), e, *ec, ptxn); -+ if (DB_LOCK_DEADLOCK == retval) { -+ /* Retry txn */ -+ slapi_log_err(SLAPI_LOG_BACKLDBM, "modrdn_rename_entry_update_indexes", -+ "index_add_mods4 deadlock\n"); -+ goto error_return; -+ } -+ if (retval != 0) { -+ slapi_log_err(SLAPI_LOG_TRACE, "modrdn_rename_entry_update_indexes", -+ "index_add_mods 4 failed, err=%d %s\n", -+ retval, (msg = dblayer_strerror(retval)) ? msg : ""); -+ goto error_return; -+ } -+ } - /* - * Remove the old entry from the Virtual List View indexes. - * Add the new entry to the Virtual List View indexes. -@@ -1991,7 +2020,7 @@ moddn_rename_child_entry( - * Update all the indexes. - */ - retval = modrdn_rename_entry_update_indexes(ptxn, pb, li, e, ec, -- smodsp, NULL, NULL); -+ smodsp, NULL, NULL, NULL); - /* JCMREPL - Should the children get updated modifiersname and lastmodifiedtime? */ - slapi_mods_done(&smods); - } -diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h -index 4d2524fd9..e2f1100ed 100644 ---- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h -+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h -@@ -324,6 +324,7 @@ int get_parent_rdn(DB *db, ID parentid, Slapi_RDN *srdn); - /* - * modify.c - */ -+int32_t entry_get_rdn_mods(Slapi_PBlock *pb, Slapi_Entry *entry, CSN *csn, int repl_op, Slapi_Mods **smods_ret); - int modify_update_all(backend *be, Slapi_PBlock *pb, modify_context *mc, back_txn *txn); - void modify_init(modify_context *mc, struct backentry *old_entry); - int modify_apply_mods(modify_context *mc, Slapi_Mods *smods); --- -2.26.2 - diff --git a/SOURCES/0021-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch b/SOURCES/0021-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch new file mode 100644 index 0000000..6e77682 --- /dev/null +++ b/SOURCES/0021-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch @@ -0,0 +1,52 @@ +From 97bdef2d562e447d521202beb485c3948b0e7214 Mon Sep 17 00:00:00 2001 +From: James Chapman +Date: Mon, 30 Nov 2020 15:28:05 +0000 +Subject: [PATCH 2/6] Issue 4418 - ldif2db - offline. Warn the user of skipped + entries + +Bug Description: During an ldif2db import entries that do not +conform to various constraints will be skipped and not imported. +On completition of an import with skipped entries, the server +returns a success exit code and logs the skipped entry detail to +the error logs. The success exit code could lead the user to +believe that all entries were successfully imported. + +Fix Description: If a skipped entry occurs during import, the +import will continue and a warning will be returned to the user. + +CLI tools for offline import updated to handle warning code. + +Test added to generate an incorrect ldif entry and perform an +import. + +Fixes: #4418 + +Reviewed by: Firstyear, droideck (Thanks) +--- + ldap/servers/slapd/slapi-private.h | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h +index 31cb33472..e0092d571 100644 +--- a/ldap/servers/slapd/slapi-private.h ++++ b/ldap/servers/slapd/slapi-private.h +@@ -1476,6 +1476,16 @@ typedef enum task_warning_t{ + int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); + void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); + ++/* task warnings */ ++typedef enum task_warning_t{ ++ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0), ++ WARN_UPGRADE_DN_FORMAT = (1 << 1), ++ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2), ++ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) ++} task_warning; ++ ++int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); ++void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); + + int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name); + +-- +2.26.2 + diff --git a/SOURCES/0021-Issue-49256-log-warning-when-thread-number-is-very-d.patch b/SOURCES/0021-Issue-49256-log-warning-when-thread-number-is-very-d.patch deleted file mode 100644 index 2e20c8c..0000000 --- a/SOURCES/0021-Issue-49256-log-warning-when-thread-number-is-very-d.patch +++ /dev/null @@ -1,128 +0,0 @@ -From 2be9d1b4332d3b9b55a2d285e9610813100e235f Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Mon, 22 Jun 2020 17:49:10 -0400 -Subject: [PATCH] Issue 49256 - log warning when thread number is very - different from autotuned value - -Description: To help prevent customers from setting incorrect values for - the thread number it would be useful to warn them that the - configured value is either way too low or way too high. - -relates: https://pagure.io/389-ds-base/issue/49256 - -Reviewed by: firstyear(Thanks!) ---- - .../tests/suites/config/autotuning_test.py | 28 +++++++++++++++ - ldap/servers/slapd/libglobs.c | 34 ++++++++++++++++++- - ldap/servers/slapd/slap.h | 3 ++ - 3 files changed, 64 insertions(+), 1 deletion(-) - -diff --git a/dirsrvtests/tests/suites/config/autotuning_test.py b/dirsrvtests/tests/suites/config/autotuning_test.py -index d1c751444..540761250 100644 ---- a/dirsrvtests/tests/suites/config/autotuning_test.py -+++ b/dirsrvtests/tests/suites/config/autotuning_test.py -@@ -43,6 +43,34 @@ def test_threads_basic(topo): - assert topo.standalone.config.get_attr_val_int("nsslapd-threadnumber") > 0 - - -+def test_threads_warning(topo): -+ """Check that we log a warning if the thread number is too high or low -+ -+ :id: db92412b-2812-49de-84b0-00f452cd254f -+ :setup: Standalone Instance -+ :steps: -+ 1. Get autotuned thread number -+ 2. Set threads way higher than hw threads, and find a warning in the log -+ 3. Set threads way lower than hw threads, and find a warning in the log -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ """ -+ topo.standalone.config.set("nsslapd-threadnumber", "-1") -+ autotuned_value = topo.standalone.config.get_attr_val_utf8("nsslapd-threadnumber") -+ -+ topo.standalone.config.set("nsslapd-threadnumber", str(int(autotuned_value) * 4)) -+ time.sleep(.5) -+ assert topo.standalone.ds_error_log.match('.*higher.*hurt server performance.*') -+ -+ if int(autotuned_value) > 1: -+ # If autotuned is 1, there isn't anything to test here -+ topo.standalone.config.set("nsslapd-threadnumber", "1") -+ time.sleep(.5) -+ assert topo.standalone.ds_error_log.match('.*lower.*hurt server performance.*') -+ -+ - @pytest.mark.parametrize("invalid_value", ('-2', '0', 'invalid')) - def test_threads_invalid_value(topo, invalid_value): - """Check nsslapd-threadnumber for an invalid values -diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c -index fbf90d92d..88676a303 100644 ---- a/ldap/servers/slapd/libglobs.c -+++ b/ldap/servers/slapd/libglobs.c -@@ -4374,6 +4374,7 @@ config_set_threadnumber(const char *attrname, char *value, char *errorbuf, int a - { - int retVal = LDAP_SUCCESS; - int32_t threadnum = 0; -+ int32_t hw_threadnum = 0; - char *endp = NULL; - - slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); -@@ -4386,8 +4387,39 @@ config_set_threadnumber(const char *attrname, char *value, char *errorbuf, int a - threadnum = strtol(value, &endp, 10); - - /* Means we want to re-run the hardware detection. */ -+ hw_threadnum = util_get_hardware_threads(); - if (threadnum == -1) { -- threadnum = util_get_hardware_threads(); -+ threadnum = hw_threadnum; -+ } else { -+ /* -+ * Log a message if the user defined thread number is very different -+ * from the hardware threads as this is probably not the optimal -+ * value. -+ */ -+ if (threadnum >= hw_threadnum) { -+ if (threadnum > MIN_THREADS && threadnum / hw_threadnum >= 4) { -+ /* We're over the default minimum and way higher than the hw -+ * threads. */ -+ slapi_log_err(SLAPI_LOG_NOTICE, "config_set_threadnumber", -+ "The configured thread number (%d) is significantly " -+ "higher than the number of hardware threads (%d). " -+ "This can potentially hurt server performance. If " -+ "you are unsure how to tune \"nsslapd-threadnumber\" " -+ "then set it to \"-1\" and the server will tune it " -+ "according to the system hardware\n", -+ threadnum, hw_threadnum); -+ } -+ } else if (threadnum < MIN_THREADS) { -+ /* The thread number should never be less than the minimum and -+ * hardware threads. */ -+ slapi_log_err(SLAPI_LOG_WARNING, "config_set_threadnumber", -+ "The configured thread number (%d) is lower than the number " -+ "of hardware threads (%d). This will hurt server performance. " -+ "If you are unsure how to tune \"nsslapd-threadnumber\" then " -+ "set it to \"-1\" and the server will tune it according to the " -+ "system hardware\n", -+ threadnum, hw_threadnum); -+ } - } - - if (*endp != '\0' || errno == ERANGE || threadnum < 1 || threadnum > 65535) { -diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h -index 8e76393c3..894efd29c 100644 ---- a/ldap/servers/slapd/slap.h -+++ b/ldap/servers/slapd/slap.h -@@ -403,6 +403,9 @@ typedef void (*VFPV)(); /* takes undefined arguments */ - #define SLAPD_DEFAULT_PW_MAX_CLASS_CHARS_ATTRIBUTE 0 - #define SLAPD_DEFAULT_PW_MAX_CLASS_CHARS_ATTRIBUTE_STR "0" - -+#define MIN_THREADS 16 -+#define MAX_THREADS 512 -+ - - /* Default password values. */ - --- -2.26.2 - diff --git a/SOURCES/0022-Fix-cherry-pick-erorr.patch b/SOURCES/0022-Fix-cherry-pick-erorr.patch new file mode 100644 index 0000000..a078160 --- /dev/null +++ b/SOURCES/0022-Fix-cherry-pick-erorr.patch @@ -0,0 +1,34 @@ +From 22fb8b2690a5fa364d252846f06b77b5fec8c602 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 7 Jan 2021 10:27:43 -0500 +Subject: [PATCH 3/6] Fix cherry-pick erorr + +--- + ldap/servers/slapd/slapi-private.h | 11 ----------- + 1 file changed, 11 deletions(-) + +diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h +index e0092d571..d5abe8ac1 100644 +--- a/ldap/servers/slapd/slapi-private.h ++++ b/ldap/servers/slapd/slapi-private.h +@@ -1476,17 +1476,6 @@ typedef enum task_warning_t{ + int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); + void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); + +-/* task warnings */ +-typedef enum task_warning_t{ +- WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0), +- WARN_UPGRADE_DN_FORMAT = (1 << 1), +- WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2), +- WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) +-} task_warning; +- +-int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); +-void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); +- + int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name); + + #ifdef __cplusplus +-- +2.26.2 + diff --git a/SOURCES/0022-Issue-51188-db2ldif-crashes-when-LDIF-file-can-t-be-.patch b/SOURCES/0022-Issue-51188-db2ldif-crashes-when-LDIF-file-can-t-be-.patch deleted file mode 100644 index 94c3f34..0000000 --- a/SOURCES/0022-Issue-51188-db2ldif-crashes-when-LDIF-file-can-t-be-.patch +++ /dev/null @@ -1,34 +0,0 @@ -From d24381488a997dda0006b603fb2b452b726757c0 Mon Sep 17 00:00:00 2001 -From: Simon Pichugin -Date: Thu, 25 Jun 2020 10:45:16 +0200 -Subject: [PATCH] Issue 51188 - db2ldif crashes when LDIF file can't be - accessed - -Bug Description: db2ldif crashes when we set '-a LDIF_PATH' to a place that -can't be accessed by the user (dirsrv by default) - -Fix Description: Don't attempt to close DB if we bail after a failed -attempt to open LDIF file. - -https://pagure.io/389-ds-base/issue/51188 - -Reviewed by: mreynolds (Thanks!) ---- - ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c -index 542147c3d..9ffd877cb 100644 ---- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c -+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c -@@ -871,6 +871,7 @@ bdb_db2ldif(Slapi_PBlock *pb) - slapi_log_err(SLAPI_LOG_ERR, "bdb_db2ldif", - "db2ldif: %s: can't open %s: %d (%s) while running as user \"%s\"\n", - inst->inst_name, fname, errno, dblayer_strerror(errno), slapdFrontendConfig->localuserinfo->pw_name); -+ we_start_the_backends = 0; - return_value = -1; - goto bye; - } --- -2.26.2 - diff --git a/SOURCES/0023-Issue-4419-Warn-users-of-skipped-entries-during-ldif.patch b/SOURCES/0023-Issue-4419-Warn-users-of-skipped-entries-during-ldif.patch new file mode 100644 index 0000000..81e2612 --- /dev/null +++ b/SOURCES/0023-Issue-4419-Warn-users-of-skipped-entries-during-ldif.patch @@ -0,0 +1,393 @@ +From 43f8a317bcd9040874b27cad905347a9e6bc8a6f Mon Sep 17 00:00:00 2001 +From: James Chapman +Date: Wed, 9 Dec 2020 22:42:59 +0000 +Subject: [PATCH 4/6] Issue 4419 - Warn users of skipped entries during ldif2db + online import (#4476) + +Bug Description: During an online ldif2db import entries that do not + conform to various constraints will be skipped and + not imported. On completition of an import with skipped + entries, the server responds with a success message + and logs the skipped entry detail to the error logs. + The success messgae could lead the user to believe + that all entries were successfully imported. + +Fix Description: If a skipped entry occurs during import, the import + will continue and a warning message will be displayed. + The schema is extended with a nsTaskWarning attribute + which is used to capture and retrieve any task + warnings. + + CLI tools for online import updated. + + Test added to generate an incorrect ldif entry and perform an + online import. + +Fixes: https://github.com/389ds/389-ds-base/issues/4419 + +Reviewed by: tbordaz, mreynolds389, droideck, Firstyear (Thanks) +--- + .../tests/suites/import/import_test.py | 39 +++++++++++++++++-- + ldap/schema/02common.ldif | 3 +- + .../back-ldbm/db-bdb/bdb_import_threads.c | 5 +++ + ldap/servers/slapd/slap.h | 1 + + ldap/servers/slapd/slapi-plugin.h | 11 ++++++ + ldap/servers/slapd/slapi-private.h | 8 ---- + ldap/servers/slapd/task.c | 29 +++++++++++++- + src/lib389/lib389/cli_conf/backend.py | 6 ++- + src/lib389/lib389/tasks.py | 23 +++++++++-- + 9 files changed, 108 insertions(+), 17 deletions(-) + +diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py +index b47db96ed..77c915026 100644 +--- a/dirsrvtests/tests/suites/import/import_test.py ++++ b/dirsrvtests/tests/suites/import/import_test.py +@@ -65,6 +65,9 @@ def _import_clean(request, topo): + import_ldif = ldif_dir + '/basic_import.ldif' + if os.path.exists(import_ldif): + os.remove(import_ldif) ++ syntax_err_ldif = ldif_dir + '/syntax_err.dif' ++ if os.path.exists(syntax_err_ldif): ++ os.remove(syntax_err_ldif) + + request.addfinalizer(finofaci) + +@@ -141,17 +144,19 @@ def _create_bogus_ldif(topo): + + def _create_syntax_err_ldif(topo): + """ +- Create an incorrect ldif entry that violates syntax check ++ Create an ldif file, which contains an entry that violates syntax check + """ + ldif_dir = topo.standalone.get_ldif_dir() + line1 = """dn: dc=example,dc=com + objectClass: top + objectClass: domain + dc: example ++ + dn: ou=groups,dc=example,dc=com + objectClass: top + objectClass: organizationalUnit + ou: groups ++ + dn: uid=JHunt,ou=groups,dc=example,dc=com + objectClass: top + objectClass: person +@@ -201,6 +206,34 @@ def test_import_with_index(topo, _import_clean): + assert f'{place}/userRoot/roomNumber.db' in glob.glob(f'{place}/userRoot/*.db', recursive=True) + + ++def test_online_import_with_warning(topo, _import_clean): ++ """ ++ Import an ldif file with syntax errors, verify skipped entry warning code ++ ++ :id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb8 ++ :setup: Standalone Instance ++ :steps: ++ 1. Create standalone Instance ++ 2. Create an ldif file with an entry that violates syntax check (empty givenname) ++ 3. Online import of troublesome ldif file ++ :expected results: ++ 1. Successful import with skipped entry warning ++ """ ++ topo.standalone.restart() ++ ++ import_task = ImportTask(topo.standalone) ++ import_ldif1 = _create_syntax_err_ldif(topo) ++ ++ # Importing the offending ldif file - online ++ import_task.import_suffix_from_ldif(ldiffile=import_ldif1, suffix=DEFAULT_SUFFIX) ++ ++ # There is just a single entry in this ldif ++ import_task.wait(5) ++ ++ # Check for the task nsTaskWarning attr, make sure its set to skipped entry code ++ assert import_task.present('nstaskwarning') ++ assert TaskWarning.WARN_SKIPPED_IMPORT_ENTRY == import_task.get_task_warn() ++ + def test_crash_on_ldif2db(topo, _import_clean): + """ + Delete the cn=monitor entry for an LDBM backend instance. Doing this will +@@ -246,7 +279,7 @@ def test_ldif2db_allows_entries_without_a_parent_to_be_imported(topo, _import_cl + topo.standalone.start() + + +-def test_ldif2db_syntax_check(topo): ++def test_ldif2db_syntax_check(topo, _import_clean): + """ldif2db should return a warning when a skipped entry has occured. + :id: 85e75670-42c5-4062-9edc-7f117c97a06f + :setup: +@@ -261,7 +294,7 @@ def test_ldif2db_syntax_check(topo): + import_ldif1 = _create_syntax_err_ldif(topo) + # Import the offending LDIF data - offline + topo.standalone.stop() +- ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1) ++ ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1, None) + assert ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY + topo.standalone.start() + +diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif +index c6dc074db..821640d03 100644 +--- a/ldap/schema/02common.ldif ++++ b/ldap/schema/02common.ldif +@@ -145,6 +145,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2356 NAME 'nsTaskExitCode' DESC 'Slapi T + attributeTypes: ( 2.16.840.1.113730.3.1.2357 NAME 'nsTaskCurrentItem' DESC 'Slapi Task item' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) + attributeTypes: ( 2.16.840.1.113730.3.1.2358 NAME 'nsTaskTotalItems' DESC 'Slapi Task total items' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) + attributeTypes: ( 2.16.840.1.113730.3.1.2359 NAME 'nsTaskCreated' DESC 'Slapi Task creation date' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) ++attributeTypes: ( 2.16.840.1.113730.3.1.2375 NAME 'nsTaskWarning' DESC 'Slapi Task warning code' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' ) + # + # objectclasses: + # +@@ -177,5 +178,5 @@ objectClasses: ( 2.16.840.1.113730.3.2.503 NAME 'nsDSWindowsReplicationAgreement + objectClasses: ( 2.16.840.1.113730.3.2.128 NAME 'costemplate' DESC 'Netscape defined objectclass' SUP top MAY ( cn $ cospriority ) X-ORIGIN 'Netscape Directory Server' ) + objectClasses: ( 2.16.840.1.113730.3.2.304 NAME 'nsView' DESC 'Netscape defined objectclass' SUP top AUXILIARY MAY ( nsViewFilter $ description ) X-ORIGIN 'Netscape Directory Server' ) + objectClasses: ( 2.16.840.1.113730.3.2.316 NAME 'nsAttributeEncryption' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ nsEncryptionAlgorithm ) X-ORIGIN 'Netscape Directory Server' ) +-objectClasses: ( 2.16.840.1.113730.3.2.335 NAME 'nsSlapiTask' DESC 'Slapi_Task objectclass' SUP top MUST ( cn ) MAY ( ttl $ nsTaskLog $ nsTaskStatus $ nsTaskExitCode $ nsTaskCurrentItem $ nsTaskTotalItems $ nsTaskCreated ) X-ORIGIN '389 Directory Server' ) ++objectClasses: ( 2.16.840.1.113730.3.2.335 NAME 'nsSlapiTask' DESC 'Slapi_Task objectclass' SUP top MUST ( cn ) MAY ( ttl $ nsTaskLog $ nsTaskStatus $ nsTaskExitCode $ nsTaskCurrentItem $ nsTaskTotalItems $ nsTaskCreated $ nsTaskWarning ) X-ORIGIN '389 Directory Server' ) + +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c +index 310893884..5c7d9c8f7 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c +@@ -747,6 +747,11 @@ import_producer(void *param) + } + } + ++ /* capture skipped entry warnings for this task */ ++ if((job) && (job->skipped)) { ++ slapi_task_set_warning(job->task, WARN_SKIPPED_IMPORT_ENTRY); ++ } ++ + slapi_value_free(&(job->usn_value)); + import_free_ldif(&c); + info->state = FINISHED; +diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h +index 53c9161d1..be4d38739 100644 +--- a/ldap/servers/slapd/slap.h ++++ b/ldap/servers/slapd/slap.h +@@ -1753,6 +1753,7 @@ typedef struct slapi_task + int task_progress; /* number between 0 and task_work */ + int task_work; /* "units" of work to be done */ + int task_flags; /* (see above) */ ++ task_warning task_warn; /* task warning */ + char *task_status; /* transient status info */ + char *task_log; /* appended warnings, etc */ + char task_date[SLAPI_TIMESTAMP_BUFSIZE]; /* Date/time when task was created */ +diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h +index 96313ef2c..ddb11bc7c 100644 +--- a/ldap/servers/slapd/slapi-plugin.h ++++ b/ldap/servers/slapd/slapi-plugin.h +@@ -6638,6 +6638,15 @@ int slapi_config_remove_callback(int operation, int flags, const char *base, int + /* task flags (set by the task-control code) */ + #define SLAPI_TASK_DESTROYING 0x01 /* queued event for destruction */ + ++/* task warnings */ ++typedef enum task_warning_t{ ++ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0), ++ WARN_UPGRADE_DN_FORMAT = (1 << 1), ++ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2), ++ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) ++} task_warning; ++ ++ + int slapi_task_register_handler(const char *name, dseCallbackFn func); + int slapi_plugin_task_register_handler(const char *name, dseCallbackFn func, Slapi_PBlock *plugin_pb); + int slapi_plugin_task_unregister_handler(const char *name, dseCallbackFn func); +@@ -6654,6 +6663,8 @@ int slapi_task_get_refcount(Slapi_Task *task); + void slapi_task_set_destructor_fn(Slapi_Task *task, TaskCallbackFn func); + void slapi_task_set_cancel_fn(Slapi_Task *task, TaskCallbackFn func); + void slapi_task_status_changed(Slapi_Task *task); ++void slapi_task_set_warning(Slapi_Task *task, task_warning warn); ++int slapi_task_get_warning(Slapi_Task *task); + void slapi_task_log_status(Slapi_Task *task, char *format, ...) + #ifdef __GNUC__ + __attribute__((format(printf, 2, 3))); +diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h +index d5abe8ac1..b956ebe63 100644 +--- a/ldap/servers/slapd/slapi-private.h ++++ b/ldap/servers/slapd/slapi-private.h +@@ -1465,14 +1465,6 @@ void slapi_pblock_set_operation_notes(Slapi_PBlock *pb, uint32_t opnotes); + void slapi_pblock_set_flag_operation_notes(Slapi_PBlock *pb, uint32_t opflag); + void slapi_pblock_set_result_text_if_empty(Slapi_PBlock *pb, char *text); + +-/* task warnings */ +-typedef enum task_warning_t{ +- WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0), +- WARN_UPGRADE_DN_FORMAT = (1 << 1), +- WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2), +- WARN_SKIPPED_IMPORT_ENTRY = (1 << 3) +-} task_warning; +- + int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb); + void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn); + +diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c +index 936c64920..806077a16 100644 +--- a/ldap/servers/slapd/task.c ++++ b/ldap/servers/slapd/task.c +@@ -46,6 +46,7 @@ static uint64_t shutting_down = 0; + #define TASK_PROGRESS_NAME "nsTaskCurrentItem" + #define TASK_WORK_NAME "nsTaskTotalItems" + #define TASK_DATE_NAME "nsTaskCreated" ++#define TASK_WARNING_NAME "nsTaskWarning" + + #define DEFAULT_TTL "3600" /* seconds */ + #define TASK_SYSCONFIG_FILE_ATTR "sysconfigfile" /* sysconfig reload task file attr */ +@@ -332,7 +333,7 @@ slapi_task_status_changed(Slapi_Task *task) + LDAPMod modlist[20]; + LDAPMod *mod[20]; + int cur = 0, i; +- char s1[20], s2[20], s3[20]; ++ char s1[20], s2[20], s3[20], s4[20]; + + if (shutting_down) { + /* don't care about task status updates anymore */ +@@ -346,9 +347,11 @@ slapi_task_status_changed(Slapi_Task *task) + sprintf(s1, "%d", task->task_exitcode); + sprintf(s2, "%d", task->task_progress); + sprintf(s3, "%d", task->task_work); ++ sprintf(s4, "%d", task->task_warn); + NEXTMOD(TASK_PROGRESS_NAME, s2); + NEXTMOD(TASK_WORK_NAME, s3); + NEXTMOD(TASK_DATE_NAME, task->task_date); ++ NEXTMOD(TASK_WARNING_NAME, s4); + /* only add the exit code when the job is done */ + if ((task->task_state == SLAPI_TASK_FINISHED) || + (task->task_state == SLAPI_TASK_CANCELLED)) { +@@ -452,6 +455,30 @@ slapi_task_get_refcount(Slapi_Task *task) + return 0; /* return value not currently used */ + } + ++/* ++ * Return task warning ++ */ ++int ++slapi_task_get_warning(Slapi_Task *task) ++{ ++ if (task) { ++ return task->task_warn; ++ } ++ ++ return 0; /* return value not currently used */ ++} ++ ++/* ++ * Set task warning ++ */ ++void ++slapi_task_set_warning(Slapi_Task *task, task_warning warn) ++{ ++ if (task) { ++ return task->task_warn |= warn; ++ } ++} ++ + int + slapi_plugin_task_unregister_handler(const char *name, dseCallbackFn func) + { +diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py +index d7a6e670c..6bfbcb036 100644 +--- a/src/lib389/lib389/cli_conf/backend.py ++++ b/src/lib389/lib389/cli_conf/backend.py +@@ -243,9 +243,13 @@ def backend_import(inst, basedn, log, args): + exclude_suffixes=args.exclude_suffixes) + task.wait(timeout=None) + result = task.get_exit_code() ++ warning = task.get_task_warn() + + if task.is_complete() and result == 0: +- log.info("The import task has finished successfully") ++ if warning is None or (warning == 0): ++ log.info("The import task has finished successfully") ++ else: ++ log.info("The import task has finished successfully, with warning code {}, check the logs for more detail".format(warning)) + else: + raise ValueError("Import task failed\n-------------------------\n{}".format(ensure_str(task.get_task_log()))) + +diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py +index dc7bb9206..bf20d1e61 100644 +--- a/src/lib389/lib389/tasks.py ++++ b/src/lib389/lib389/tasks.py +@@ -38,6 +38,7 @@ class Task(DSLdapObject): + self._protected = False + self._exit_code = None + self._task_log = "" ++ self._task_warn = None + + def status(self): + """Return the decoded status of the task +@@ -49,6 +50,7 @@ class Task(DSLdapObject): + + self._exit_code = self.get_attr_val_utf8("nsTaskExitCode") + self._task_log = self.get_attr_val_utf8("nsTaskLog") ++ self._task_warn = self.get_attr_val_utf8("nsTaskWarning") + if not self.exists(): + self._log.debug("complete: task has self cleaned ...") + # The task cleaned it self up. +@@ -77,6 +79,15 @@ class Task(DSLdapObject): + return None + return None + ++ def get_task_warn(self): ++ """Return task's warning code if task is complete, else None.""" ++ if self.is_complete(): ++ try: ++ return int(self._task_warn) ++ except TypeError: ++ return None ++ return None ++ + def wait(self, timeout=120): + """Wait until task is complete.""" + +@@ -390,14 +401,17 @@ class Tasks(object): + running, true if done - if true, second is the exit code - if dowait + is True, this function will block until the task is complete''' + attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', +- 'nsTaskCurrentItem', 'nsTaskTotalItems'] ++ 'nsTaskCurrentItem', 'nsTaskTotalItems', 'nsTaskWarning'] + done = False + exitCode = 0 ++ warningCode = 0 + dn = entry.dn + while not done: + entry = self.conn.getEntry(dn, attrlist=attrlist) + self.log.debug("task entry %r", entry) + ++ if entry.nsTaskWarning: ++ warningCode = int(entry.nsTaskWarning) + if entry.nsTaskExitCode: + exitCode = int(entry.nsTaskExitCode) + done = True +@@ -405,7 +419,7 @@ class Tasks(object): + time.sleep(1) + else: + break +- return (done, exitCode) ++ return (done, exitCode, warningCode) + + def importLDIF(self, suffix=None, benamebase=None, input_file=None, + args=None): +@@ -461,8 +475,9 @@ class Tasks(object): + self.conn.add_s(entry) + + exitCode = 0 ++ warningCode = 0 + if args and args.get(TASK_WAIT, False): +- (done, exitCode) = self.conn.tasks.checkTask(entry, True) ++ (done, exitCode, warningCode) = self.conn.tasks.checkTask(entry, True) + + if exitCode: + self.log.error("Error: import task %s for file %s exited with %d", +@@ -470,6 +485,8 @@ class Tasks(object): + else: + self.log.info("Import task %s for file %s completed successfully", + cn, input_file) ++ if warningCode: ++ self.log.info("with warning code %d", warningCode) + self.dn = dn + self.entry = entry + return exitCode +-- +2.26.2 + diff --git a/SOURCES/0024-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch b/SOURCES/0024-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch new file mode 100644 index 0000000..8f90863 --- /dev/null +++ b/SOURCES/0024-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch @@ -0,0 +1,149 @@ +From 61d82ef842e0e4e013937bf05d7f640be2d2fc09 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Wed, 16 Dec 2020 16:30:28 +0100 +Subject: [PATCH 5/6] Issue 4480 - Unexpected info returned to ldap request + (#4491) + +Bug description: + If the bind entry does not exist, the bind result info + reports that 'No such entry'. It should not give any + information if the target entry exists or not + +Fix description: + Does not return any additional information during a bind + +relates: https://github.com/389ds/389-ds-base/issues/4480 + +Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all) + +Platforms tested: F31 +--- + dirsrvtests/tests/suites/basic/basic_test.py | 112 +++++++++++++++++++ + 1 file changed, 112 insertions(+) + +diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py +index 1ae82dcdd..02b73ee85 100644 +--- a/dirsrvtests/tests/suites/basic/basic_test.py ++++ b/dirsrvtests/tests/suites/basic/basic_test.py +@@ -1400,6 +1400,118 @@ def test_dscreate_multiple_dashes_name(dscreate_long_instance): + assert not dscreate_long_instance.exists() + + ++@pytest.fixture(scope="module", params=('c=uk', 'cn=test_user', 'dc=example,dc=com', 'o=south', 'ou=sales', 'wrong=some_value')) ++def dscreate_test_rdn_value(request): ++ template_file = "/tmp/dssetup.inf" ++ template_text = f"""[general] ++config_version = 2 ++# This invalid hostname ... ++full_machine_name = localhost.localdomain ++# Means we absolutely require this. ++strict_host_checking = False ++# In tests, we can be run in containers, NEVER trust ++# that systemd is there, or functional in any capacity ++systemd = False ++ ++[slapd] ++instance_name = test_different_rdn ++root_dn = cn=directory manager ++root_password = someLongPassword_123 ++# We do not have access to high ports in containers, ++# so default to something higher. ++port = 38999 ++secure_port = 63699 ++ ++[backend-userroot] ++create_suffix_entry = True ++suffix = {request.param} ++""" ++ ++ with open(template_file, "w") as template_fd: ++ template_fd.write(template_text) ++ ++ # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389 ++ tmp_env = os.environ ++ if "PYTHONPATH" in tmp_env: ++ del tmp_env["PYTHONPATH"] ++ ++ def fin(): ++ os.remove(template_file) ++ if request.param != "wrong=some_value": ++ try: ++ subprocess.check_call(['dsctl', 'test_different_rdn', 'remove', '--do-it']) ++ except subprocess.CalledProcessError as e: ++ log.fatal(f"Failed to remove test instance Error ({e.returncode}) {e.output}") ++ else: ++ log.info("Wrong RDN is passed, instance not created") ++ request.addfinalizer(fin) ++ return template_file, tmp_env, request.param, ++ ++ ++@pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.0.0'), ++ reason="This test is only required with new admin cli, and requires root.") ++@pytest.mark.bz1807419 ++@pytest.mark.ds50928 ++def test_dscreate_with_different_rdn(dscreate_test_rdn_value): ++ """Test that dscreate works with different RDN attributes as suffix ++ ++ :id: 77ed6300-6a2f-4e79-a862-1f1105f1e3ef ++ :parametrized: yes ++ :setup: None ++ :steps: ++ 1. Create template file for dscreate with different RDN attributes as suffix ++ 2. Create instance using template file ++ 3. Create instance with 'wrong=some_value' as suffix's RDN attribute ++ :expectedresults: ++ 1. Should succeeds ++ 2. Should succeeds ++ 3. Should fail ++ """ ++ try: ++ subprocess.check_call([ ++ 'dscreate', ++ 'from-file', ++ dscreate_test_rdn_value[0] ++ ], env=dscreate_test_rdn_value[1]) ++ except subprocess.CalledProcessError as e: ++ log.fatal(f"dscreate failed! Error ({e.returncode}) {e.output}") ++ if dscreate_test_rdn_value[2] != "wrong=some_value": ++ assert False ++ else: ++ assert True ++ ++def test_bind_invalid_entry(topology_st): ++ """Test the failing bind does not return information about the entry ++ ++ :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f ++ ++ :setup: Standalone instance ++ ++ :steps: ++ 1: bind as non existing entry ++ 2: check that bind info does not report 'No such entry' ++ ++ :expectedresults: ++ 1: pass ++ 2: pass ++ """ ++ ++ topology_st.standalone.restart() ++ INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX ++ try: ++ topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD) ++ except ldap.LDAPError as e: ++ log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY) ++ log.info('exception description: ' + e.args[0]['desc']) ++ if 'info' in e.args[0]: ++ log.info('exception info: ' + e.args[0]['info']) ++ assert e.args[0]['desc'] == 'Invalid credentials' ++ assert 'info' not in e.args[0] ++ pass ++ ++ log.info('test_bind_invalid_entry: PASSED') ++ ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +-- +2.26.2 + diff --git a/SOURCES/0025-Issue-4414-disk-monitoring-prevent-division-by-zero-.patch b/SOURCES/0025-Issue-4414-disk-monitoring-prevent-division-by-zero-.patch new file mode 100644 index 0000000..1d3b1a9 --- /dev/null +++ b/SOURCES/0025-Issue-4414-disk-monitoring-prevent-division-by-zero-.patch @@ -0,0 +1,99 @@ +From 3c74f736c657d007770fe866842b08d0a74772ca Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 9 Dec 2020 15:21:11 -0500 +Subject: [PATCH 6/6] Issue 4414 - disk monitoring - prevent division by zero + crash + +Bug Description: If a disk mount has zero total space or zero used + space then a division by zero can occur and the + server will crash. + + It has also been observed that sometimes a system + can return the wrong disk entirely, and when that + happens the incorrect disk also has zero available + space which triggers the disk monitioring thread to + immediately shut the server down. + +Fix Description: Check the total and used space for zero and do not + divide, just ignore it. As a preemptive measure + ignore disks from /dev, /proc, /sys (except /dev/shm). + Yes it's a bit hacky, but the true underlying cause + is not known yet. So better to be safe than sorry. + +Relates: https://github.com/389ds/389-ds-base/issues/4414 + +Reviewed by: firstyear(Thanks!) +--- + ldap/servers/slapd/daemon.c | 22 +++++++++++++++++++++- + ldap/servers/slapd/monitor.c | 13 +++++-------- + 2 files changed, 26 insertions(+), 9 deletions(-) + +diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c +index 691f77570..bfd965263 100644 +--- a/ldap/servers/slapd/daemon.c ++++ b/ldap/servers/slapd/daemon.c +@@ -221,7 +221,27 @@ disk_mon_get_mount_point(char *dir) + } + if (s.st_dev == dev_id) { + endmntent(fp); +- return (slapi_ch_strdup(mnt->mnt_dir)); ++ ++ if ((strncmp(mnt->mnt_dir, "/dev", 4) == 0 && strncmp(mnt->mnt_dir, "/dev/shm", 8) != 0) || ++ strncmp(mnt->mnt_dir, "/proc", 4) == 0 || ++ strncmp(mnt->mnt_dir, "/sys", 4) == 0) ++ { ++ /* ++ * Ignore "mount directories" starting with /dev (except ++ * /dev/shm), /proc, /sys For some reason these mounts are ++ * occasionally/incorrectly returned. Only seen this at a ++ * customer site once. When it happens it causes disk ++ * monitoring to think the server has 0 disk space left, and ++ * it abruptly/unexpectedly shuts the server down. At this ++ * point it looks like a bug in stat(), setmntent(), or ++ * getmntent(), but there is no way to prove that since there ++ * is no way to reproduce the original issue. For now just ++ * return NULL to be safe. ++ */ ++ return NULL; ++ } else { ++ return (slapi_ch_strdup(mnt->mnt_dir)); ++ } + } + } + endmntent(fp); +diff --git a/ldap/servers/slapd/monitor.c b/ldap/servers/slapd/monitor.c +index 562721bed..65f082986 100644 +--- a/ldap/servers/slapd/monitor.c ++++ b/ldap/servers/slapd/monitor.c +@@ -131,7 +131,6 @@ monitor_disk_info (Slapi_PBlock *pb __attribute__((unused)), + { + int32_t rc = LDAP_SUCCESS; + char **dirs = NULL; +- char buf[BUFSIZ]; + struct berval val; + struct berval *vals[2]; + uint64_t total_space; +@@ -143,15 +142,13 @@ monitor_disk_info (Slapi_PBlock *pb __attribute__((unused)), + + disk_mon_get_dirs(&dirs); + +- for (uint16_t i = 0; dirs && dirs[i]; i++) { ++ for (size_t i = 0; dirs && dirs[i]; i++) { ++ char buf[BUFSIZ] = {0}; + rc = disk_get_info(dirs[i], &total_space, &avail_space, &used_space); +- if (rc) { +- slapi_log_err(SLAPI_LOG_WARNING, "monitor_disk_info", +- "Unable to get 'cn=disk space,cn=monitor' stats for %s\n", dirs[i]); +- } else { ++ if (rc == 0 && total_space > 0 && used_space > 0) { + val.bv_len = snprintf(buf, sizeof(buf), +- "partition=\"%s\" size=\"%" PRIu64 "\" used=\"%" PRIu64 "\" available=\"%" PRIu64 "\" use%%=\"%" PRIu64 "\"", +- dirs[i], total_space, used_space, avail_space, used_space * 100 / total_space); ++ "partition=\"%s\" size=\"%" PRIu64 "\" used=\"%" PRIu64 "\" available=\"%" PRIu64 "\" use%%=\"%" PRIu64 "\"", ++ dirs[i], total_space, used_space, avail_space, used_space * 100 / total_space); + val.bv_val = buf; + attrlist_merge(&e->e_attrs, "dsDisk", vals); + } +-- +2.26.2 + diff --git a/SOURCES/0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch b/SOURCES/0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch new file mode 100644 index 0000000..fb3211a --- /dev/null +++ b/SOURCES/0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch @@ -0,0 +1,132 @@ +From 48b30739f33d1eb526dbdd45c820129c4a4c4bcb Mon Sep 17 00:00:00 2001 +From: progier389 <72748589+progier389@users.noreply.github.com> +Date: Tue, 12 Jan 2021 11:06:24 +0100 +Subject: [PATCH] Issue 4504 - Insure ldapi is enabled in repl_monitor_test.py + (Needed on RHEL) (#4527) + +(cherry picked from commit 279556bc78ed743d7a053069621d999ec045866f) +--- + .../tests/suites/clu/repl_monitor_test.py | 67 +++++++++---------- + 1 file changed, 31 insertions(+), 36 deletions(-) + +diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +index eb18d2da2..b2cb840b3 100644 +--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py ++++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +@@ -9,7 +9,6 @@ + import time + import subprocess + import pytest +-import re + + from lib389.cli_conf.replication import get_repl_monitor_info + from lib389.tasks import * +@@ -18,6 +17,8 @@ from lib389.topologies import topology_m2 + from lib389.cli_base import FakeArgs + from lib389.cli_base.dsrc import dsrc_arg_concat + from lib389.cli_base import connect_instance ++from lib389.replica import Replicas ++ + + pytestmark = pytest.mark.tier0 + +@@ -68,25 +69,6 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No + log.info('Reset log file') + f.truncate(0) + +-def get_hostnames_from_log(port1, port2): +- # Get the supplier host names as displayed in replication monitor output +- with open(LOG_FILE, 'r') as logfile: +- logtext = logfile.read() +- # search for Supplier :hostname:port +- # and use \D to insure there is no more number is after +- # the matched port (i.e that 10 is not matching 101) +- regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)' +- match=re.search(regexp, logtext) +- host_m1 = 'localhost.localdomain' +- if (match is not None): +- host_m1 = match.group(2) +- # Same for master 2 +- regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)' +- match=re.search(regexp, logtext) +- host_m2 = 'localhost.localdomain' +- if (match is not None): +- host_m2 = match.group(2) +- return (host_m1, host_m2) + + @pytest.mark.ds50545 + @pytest.mark.bz1739718 +@@ -115,6 +97,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): + m1 = topology_m2.ms["master1"] + m2 = topology_m2.ms["master2"] + ++ # Enable ldapi if not already done. ++ for inst in [topology_m2.ms["master1"], topology_m2.ms["master2"]]: ++ if not inst.can_autobind(): ++ # Update ns-slapd instance ++ inst.config.set('nsslapd-ldapilisten', 'on') ++ inst.config.set('nsslapd-ldapiautobind', 'on') ++ inst.restart() ++ # Ensure that updates have been sent both ways. ++ replicas = Replicas(m1) ++ replica = replicas.get(DEFAULT_SUFFIX) ++ replica.test_replication([m2]) ++ replicas = Replicas(m2) ++ replica = replicas.get(DEFAULT_SUFFIX) ++ replica.test_replication([m1]) ++ ++ alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')', ++ 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')'] ++ + connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port) + content_list = ['Replica Root: dc=example,dc=com', + 'Replica ID: 1', +@@ -177,9 +177,20 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): + '001', + m1.host + ':' + str(m1.port)] + ++ dsrc_content = '[repl-monitor-connections]\n' \ ++ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ ++ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ ++ '\n' \ ++ '[repl-monitor-aliases]\n' \ ++ 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \ ++ 'M2 = ' + m2.host + ':' + str(m2.port) ++ + connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] + ++ aliases = ['M1=' + m1.host + ':' + str(m1.port), ++ 'M2=' + m2.host + ':' + str(m2.port)] ++ + args = FakeArgs() + args.connections = connections + args.aliases = None +@@ -187,24 +198,8 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): + + log.info('Run replication monitor with connections option') + get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) +- (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port) + check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) + +- # Prepare the data for next tests +- aliases = ['M1=' + host_m1 + ':' + str(m1.port), +- 'M2=' + host_m2 + ':' + str(m2.port)] +- +- alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')', +- 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')'] +- +- dsrc_content = '[repl-monitor-connections]\n' \ +- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ +- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ +- '\n' \ +- '[repl-monitor-aliases]\n' \ +- 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \ +- 'M2 = ' + host_m2 + ':' + str(m2.port) +- + log.info('Run replication monitor with aliases option') + args.aliases = aliases + get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) +-- +2.26.2 + diff --git a/SOURCES/0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch b/SOURCES/0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch new file mode 100644 index 0000000..44636c8 --- /dev/null +++ b/SOURCES/0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch @@ -0,0 +1,51 @@ +From f84e75de9176218d3b47a447d07fe8fb7ca3d72f Mon Sep 17 00:00:00 2001 +From: Barbora Simonova +Date: Mon, 11 Jan 2021 15:51:24 +0100 +Subject: [PATCH] Issue 4315 - performance search rate: nagle triggers high + rate of setsocketopt + +Description: +The config value of nsslapd-nagle is now set to 'off' by default. +Added a test case, that checks the value. + +Relates: https://github.com/389ds/389-ds-base/issues/4315 + +Reviewed by: droideck (Thanks!) +--- + .../tests/suites/config/config_test.py | 20 +++++++++++++++++++ + 1 file changed, 20 insertions(+) + +diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py +index 38d1ed9ac..fda16a530 100644 +--- a/dirsrvtests/tests/suites/config/config_test.py ++++ b/dirsrvtests/tests/suites/config/config_test.py +@@ -41,6 +41,26 @@ def big_file(): + return TEMP_BIG_FILE + + ++@pytest.mark.bz1897248 ++@pytest.mark.ds4315 ++@pytest.mark.skipif(ds_is_older('1.4.3.16'), reason="This config setting exists in 1.4.3.16 and higher") ++def test_nagle_default_value(topo): ++ """Test that nsslapd-nagle attribute is off by default ++ ++ :id: 00361f5d-d638-4d39-8231-66fa52637203 ++ :setup: Standalone instance ++ :steps: ++ 1. Create instance ++ 2. Check the value of nsslapd-nagle ++ :expectedresults: ++ 1. Success ++ 2. The value of nsslapd-nagle should be off ++ """ ++ ++ log.info('Check the value of nsslapd-nagle attribute is off by default') ++ assert topo.standalone.config.get_attr_val_utf8('nsslapd-nagle') == 'off' ++ ++ + def test_maxbersize_repl(topology_m2, big_file): + """maxbersize is ignored in the replicated operations. + +-- +2.26.2 + diff --git a/SOURCES/0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch b/SOURCES/0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch new file mode 100644 index 0000000..ba8f9d2 --- /dev/null +++ b/SOURCES/0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch @@ -0,0 +1,98 @@ +From 00ccec335792e3fa44712427463c64eb1ff9c5be Mon Sep 17 00:00:00 2001 +From: progier389 +Date: Tue, 12 Jan 2021 17:45:41 +0100 +Subject: [PATCH] Issue 4504 - insure that repl_monitor_test use ldapi (for + RHEL) - fix merge issue (#4533) + +(cherry picked from commit a880fddc192414d6283ea6832491b7349e5471dc) +--- + .../tests/suites/clu/repl_monitor_test.py | 47 ++++++++++++++----- + 1 file changed, 36 insertions(+), 11 deletions(-) + +diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +index b2cb840b3..caf6a9099 100644 +--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py ++++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +@@ -9,6 +9,7 @@ + import time + import subprocess + import pytest ++import re + + from lib389.cli_conf.replication import get_repl_monitor_info + from lib389.tasks import * +@@ -69,6 +70,25 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No + log.info('Reset log file') + f.truncate(0) + ++def get_hostnames_from_log(port1, port2): ++ # Get the supplier host names as displayed in replication monitor output ++ with open(LOG_FILE, 'r') as logfile: ++ logtext = logfile.read() ++ # search for Supplier :hostname:port ++ # and use \D to insure there is no more number is after ++ # the matched port (i.e that 10 is not matching 101) ++ regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)' ++ match=re.search(regexp, logtext) ++ host_m1 = 'localhost.localdomain' ++ if (match is not None): ++ host_m1 = match.group(2) ++ # Same for master 2 ++ regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)' ++ match=re.search(regexp, logtext) ++ host_m2 = 'localhost.localdomain' ++ if (match is not None): ++ host_m2 = match.group(2) ++ return (host_m1, host_m2) + + @pytest.mark.ds50545 + @pytest.mark.bz1739718 +@@ -177,20 +197,9 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): + '001', + m1.host + ':' + str(m1.port)] + +- dsrc_content = '[repl-monitor-connections]\n' \ +- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ +- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ +- '\n' \ +- '[repl-monitor-aliases]\n' \ +- 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \ +- 'M2 = ' + m2.host + ':' + str(m2.port) +- + connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] + +- aliases = ['M1=' + m1.host + ':' + str(m1.port), +- 'M2=' + m2.host + ':' + str(m2.port)] +- + args = FakeArgs() + args.connections = connections + args.aliases = None +@@ -198,8 +207,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): + + log.info('Run replication monitor with connections option') + get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) ++ (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port) + check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) + ++ # Prepare the data for next tests ++ aliases = ['M1=' + host_m1 + ':' + str(m1.port), ++ 'M2=' + host_m2 + ':' + str(m2.port)] ++ ++ alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')', ++ 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')'] ++ ++ dsrc_content = '[repl-monitor-connections]\n' \ ++ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ ++ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ ++ '\n' \ ++ '[repl-monitor-aliases]\n' \ ++ 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \ ++ 'M2 = ' + host_m2 + ':' + str(m2.port) ++ + log.info('Run replication monitor with aliases option') + args.aliases = aliases + get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) +-- +2.26.2 + diff --git a/SOURCES/0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch b/SOURCES/0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch new file mode 100644 index 0000000..593e2cd --- /dev/null +++ b/SOURCES/0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch @@ -0,0 +1,70 @@ +From 2afc65fd1750afcb1667545da5625f5a932aacdd Mon Sep 17 00:00:00 2001 +From: Simon Pichugin +Date: Wed, 13 Jan 2021 15:16:08 +0100 +Subject: [PATCH] Issue 4528 - Fix cn=monitor SCOPE_ONE search (#4529) + +Bug Description: While doing a ldapsearch on "cn=monitor" is +throwing err=32 with -s one. + +Fix Description: 'cn=monitor' is not a real entry so we should not +trying to check if the searched suffix (cm=monitor or its children) +belongs to the searched backend. + +Fixes: #4528 + +Reviewed by: @mreynolds389 @Firstyear @tbordaz (Thanks!) +--- + ldap/servers/slapd/opshared.c | 15 ++++++++++----- + 1 file changed, 10 insertions(+), 5 deletions(-) + +diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c +index c0bc5dcd0..f5ed71144 100644 +--- a/ldap/servers/slapd/opshared.c ++++ b/ldap/servers/slapd/opshared.c +@@ -240,6 +240,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) + int rc = 0; + int internal_op; + Slapi_DN *basesdn = NULL; ++ Slapi_DN monitorsdn = {0}; + Slapi_DN *sdn = NULL; + Slapi_Operation *operation = NULL; + Slapi_Entry *referral = NULL; +@@ -765,9 +766,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result) + } + } else { + /* be_suffix null means that we are searching the default backend +- * -> don't change the search parameters in pblock +- */ +- if (be_suffix != NULL) { ++ * -> don't change the search parameters in pblock ++ * Also, we skip this block for 'cn=monitor' search and its subsearches ++ * as they are done by callbacks from monitor.c */ ++ slapi_sdn_init_dn_byref(&monitorsdn, "cn=monitor"); ++ if (!((be_suffix == NULL) || slapi_sdn_issuffix(basesdn, &monitorsdn))) { + if ((be_name == NULL) && (scope == LDAP_SCOPE_ONELEVEL)) { + /* one level searches + * - depending on the suffix of the backend we might have to +@@ -789,8 +792,10 @@ op_shared_search(Slapi_PBlock *pb, int send_result) + } else if (slapi_sdn_issuffix(basesdn, be_suffix)) { + int tmp_scope = LDAP_SCOPE_ONELEVEL; + slapi_pblock_set(pb, SLAPI_SEARCH_SCOPE, &tmp_scope); +- } else ++ } else { ++ slapi_sdn_done(&monitorsdn); + goto next_be; ++ } + } + + /* subtree searches : +@@ -811,7 +816,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) + } + } + } +- ++ slapi_sdn_done(&monitorsdn); + slapi_pblock_set(pb, SLAPI_BACKEND, be); + slapi_pblock_set(pb, SLAPI_PLUGIN, be->be_database); + slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, NULL); +-- +2.26.2 + diff --git a/SOURCES/0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch b/SOURCES/0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch new file mode 100644 index 0000000..7133049 --- /dev/null +++ b/SOURCES/0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch @@ -0,0 +1,3866 @@ +From 6969181628f2c664d5f82c89c15bbc0a2487e21f Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 19 Nov 2020 15:46:19 -0500 +Subject: [PATCH 1/2] Issue 4384 - Use MONOTONIC clock for all timing events + and conditions + +Bug Description: All of the server's event handling and replication were + based on REALTIME clocks, which can be influenced by the + system changing. This could causes massive delays, and + simply cause unexpected behavior. + +Fix Description: Move all condition variables to use pthread instead of NSPR + functions. Also make sure we use MONOTONIC clocks when we + get the current time when checking for timeouts and other + timed events. + +Relates: https://github.com/389ds/389-ds-base/issues/4384 + +Reviewed by: elkris, firstyear, and tbordaz (Thanks!!!) + +Apply firstyear's sugestions + +Apply Firstyear's other suggestions + +Apply Thierry's suggestions +--- + Makefile.am | 2 +- + .../tests/suites/plugins/entryusn_test.py | 3 + + ldap/servers/plugins/chainingdb/cb_add.c | 2 +- + ldap/servers/plugins/chainingdb/cb_compare.c | 2 +- + .../plugins/chainingdb/cb_conn_stateless.c | 16 +- + ldap/servers/plugins/chainingdb/cb_delete.c | 2 +- + ldap/servers/plugins/chainingdb/cb_instance.c | 3 +- + ldap/servers/plugins/chainingdb/cb_modify.c | 2 +- + ldap/servers/plugins/chainingdb/cb_modrdn.c | 2 +- + ldap/servers/plugins/chainingdb/cb_search.c | 8 +- + ldap/servers/plugins/cos/cos_cache.c | 4 +- + ldap/servers/plugins/dna/dna.c | 2 +- + ldap/servers/plugins/passthru/ptconn.c | 2 +- + ldap/servers/plugins/referint/referint.c | 85 +++++--- + ldap/servers/plugins/replication/repl5.h | 3 +- + .../plugins/replication/repl5_backoff.c | 4 +- + .../plugins/replication/repl5_connection.c | 12 +- + .../plugins/replication/repl5_inc_protocol.c | 91 ++++---- + .../plugins/replication/repl5_mtnode_ext.c | 3 +- + .../plugins/replication/repl5_prot_private.h | 6 +- + .../plugins/replication/repl5_replica.c | 10 +- + .../replication/repl5_replica_config.c | 197 +++++++++++------- + .../plugins/replication/repl5_tot_protocol.c | 71 ++++--- + ldap/servers/plugins/replication/repl_extop.c | 4 +- + .../plugins/replication/windows_connection.c | 2 +- + .../replication/windows_inc_protocol.c | 82 +++++--- + .../replication/windows_tot_protocol.c | 24 ++- + ldap/servers/plugins/retrocl/retrocl_trim.c | 2 +- + ldap/servers/plugins/roles/roles_cache.c | 4 +- + ldap/servers/plugins/sync/sync.h | 4 +- + ldap/servers/plugins/sync/sync_persist.c | 54 +++-- + .../slapd/back-ldbm/db-bdb/bdb_import.c | 49 ++--- + .../back-ldbm/db-bdb/bdb_import_threads.c | 29 +-- + .../back-ldbm/db-bdb/bdb_instance_config.c | 8 +- + .../slapd/back-ldbm/db-bdb/bdb_layer.c | 129 +++++++----- + .../slapd/back-ldbm/db-bdb/bdb_layer.h | 10 +- + ldap/servers/slapd/back-ldbm/import.h | 6 +- + ldap/servers/slapd/connection.c | 88 ++++---- + ldap/servers/slapd/daemon.c | 64 ++++-- + ldap/servers/slapd/eventq.c | 132 ++++++++---- + ldap/servers/slapd/house.c | 58 ++++-- + ldap/servers/slapd/libmakefile | 2 +- + ldap/servers/slapd/psearch.c | 63 +++--- + ldap/servers/slapd/regex.c | 2 +- + ldap/servers/slapd/slapi-plugin.h | 7 + + .../slapd/{slapi2nspr.c => slapi2runtime.c} | 87 +++++--- + ldap/servers/slapd/task.c | 4 +- + ldap/servers/slapd/time.c | 10 +- + 48 files changed, 877 insertions(+), 579 deletions(-) + rename ldap/servers/slapd/{slapi2nspr.c => slapi2runtime.c} (69%) + +diff --git a/Makefile.am b/Makefile.am +index 0e5f04f91..f7bf1c44c 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -1455,7 +1455,7 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \ + ldap/servers/slapd/security_wrappers.c \ + ldap/servers/slapd/slapd_plhash.c \ + ldap/servers/slapd/slapi_counter.c \ +- ldap/servers/slapd/slapi2nspr.c \ ++ ldap/servers/slapd/slapi2runtime.c \ + ldap/servers/slapd/snmp_collator.c \ + ldap/servers/slapd/sort.c \ + ldap/servers/slapd/ssl.c \ +diff --git a/dirsrvtests/tests/suites/plugins/entryusn_test.py b/dirsrvtests/tests/suites/plugins/entryusn_test.py +index ad3d7f209..da0538f74 100644 +--- a/dirsrvtests/tests/suites/plugins/entryusn_test.py ++++ b/dirsrvtests/tests/suites/plugins/entryusn_test.py +@@ -6,9 +6,11 @@ + # See LICENSE for details. + # --- END COPYRIGHT BLOCK --- + # ++import os + import ldap + import logging + import pytest ++import time + from lib389._constants import DEFAULT_SUFFIX + from lib389.config import Config + from lib389.plugins import USNPlugin, MemberOfPlugin +@@ -211,6 +213,7 @@ def test_entryusn_after_repl_delete(topology_m2): + user_usn = user_1.get_attr_val_int('entryusn') + + user_1.delete() ++ time.sleep(1) # Gives a little time for tombstone creation to complete + + ts = tombstones.get(user_rdn) + ts_usn = ts.get_attr_val_int('entryusn') +diff --git a/ldap/servers/plugins/chainingdb/cb_add.c b/ldap/servers/plugins/chainingdb/cb_add.c +index a9f9c0f87..b7ae7267d 100644 +--- a/ldap/servers/plugins/chainingdb/cb_add.c ++++ b/ldap/servers/plugins/chainingdb/cb_add.c +@@ -130,7 +130,7 @@ chaining_back_add(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + /* Send LDAP operation to the remote host */ +diff --git a/ldap/servers/plugins/chainingdb/cb_compare.c b/ldap/servers/plugins/chainingdb/cb_compare.c +index 25dfa87b5..8d7fdd06b 100644 +--- a/ldap/servers/plugins/chainingdb/cb_compare.c ++++ b/ldap/servers/plugins/chainingdb/cb_compare.c +@@ -126,7 +126,7 @@ chaining_back_compare(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + /* +diff --git a/ldap/servers/plugins/chainingdb/cb_conn_stateless.c b/ldap/servers/plugins/chainingdb/cb_conn_stateless.c +index 9beb459ef..a2003221e 100644 +--- a/ldap/servers/plugins/chainingdb/cb_conn_stateless.c ++++ b/ldap/servers/plugins/chainingdb/cb_conn_stateless.c +@@ -453,7 +453,7 @@ cb_get_connection(cb_conn_pool *pool, + conn->ld = ld; + conn->status = CB_CONNSTATUS_OK; + conn->refcount = 0; /* incremented below */ +- conn->opentime = slapi_current_utc_time(); ++ conn->opentime = slapi_current_rel_time_t(); + conn->ThreadId = PR_MyThreadId(); /* store the thread id */ + conn->next = NULL; + if (secure) { +@@ -488,7 +488,7 @@ cb_get_connection(cb_conn_pool *pool, + } + + if (!secure) +- slapi_wait_condvar(pool->conn.conn_list_cv, NULL); ++ slapi_wait_condvar_pt(pool->conn.conn_list_cv, pool->conn.conn_list_mutex, NULL); + + if (cb_debug_on()) { + slapi_log_err(SLAPI_LOG_PLUGIN, CB_PLUGIN_SUBSYSTEM, +@@ -639,7 +639,7 @@ cb_check_for_stale_connections(cb_conn_pool *pool) + slapi_lock_mutex(pool->conn.conn_list_mutex); + + if (connlifetime > 0) +- curtime = slapi_current_utc_time(); ++ curtime = slapi_current_rel_time_t(); + + if (pool->secure) { + myself = PR_ThreadSelf(); +@@ -860,7 +860,7 @@ cb_ping_farm(cb_backend_instance *cb, cb_outgoing_conn *cnx, time_t end_time) + if (cnx && (cnx->status != CB_CONNSTATUS_OK)) /* Known problem */ + return LDAP_SERVER_DOWN; + +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + if (end_time && ((now <= end_time) || (end_time < 0))) + return LDAP_SUCCESS; + +@@ -905,7 +905,7 @@ cb_update_failed_conn_cpt(cb_backend_instance *cb) + slapi_unlock_mutex(cb->monitor_availability.cpt_lock); + if (cb->monitor_availability.cpt >= CB_NUM_CONN_BEFORE_UNAVAILABILITY) { + /* we reach the limit of authorized failed connections => we setup the chaining BE state to unavailable */ +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + slapi_lock_mutex(cb->monitor_availability.lock_timeLimit); + cb->monitor_availability.unavailableTimeLimit = now + CB_UNAVAILABLE_PERIOD; + slapi_unlock_mutex(cb->monitor_availability.lock_timeLimit); +@@ -938,7 +938,7 @@ cb_check_availability(cb_backend_instance *cb, Slapi_PBlock *pb) + time_t now; + if (cb->monitor_availability.farmserver_state == FARMSERVER_UNAVAILABLE) { + slapi_lock_mutex(cb->monitor_availability.lock_timeLimit); +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + if (now >= cb->monitor_availability.unavailableTimeLimit) { + cb->monitor_availability.unavailableTimeLimit = now + CB_INFINITE_TIME; /* to be sure only one thread can do the test */ + slapi_unlock_mutex(cb->monitor_availability.lock_timeLimit); +@@ -951,7 +951,7 @@ cb_check_availability(cb_backend_instance *cb, Slapi_PBlock *pb) + "cb_check_availability - ping the farm server and check if it's still unavailable"); + if (cb_ping_farm(cb, NULL, 0) != LDAP_SUCCESS) { /* farm still unavailable... Just change the timelimit */ + slapi_lock_mutex(cb->monitor_availability.lock_timeLimit); +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + cb->monitor_availability.unavailableTimeLimit = now + CB_UNAVAILABLE_PERIOD; + slapi_unlock_mutex(cb->monitor_availability.lock_timeLimit); + cb_send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, "FARM SERVER TEMPORARY UNAVAILABLE", 0, NULL); +@@ -961,7 +961,7 @@ cb_check_availability(cb_backend_instance *cb, Slapi_PBlock *pb) + } else { + /* farm is back !*/ + slapi_lock_mutex(cb->monitor_availability.lock_timeLimit); +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + cb->monitor_availability.unavailableTimeLimit = now; /* the unavailable period is finished */ + slapi_unlock_mutex(cb->monitor_availability.lock_timeLimit); + /* The farmer server state backs to FARMSERVER_AVAILABLE, but this already done in cb_ping_farm, and also the reset of cpt*/ +diff --git a/ldap/servers/plugins/chainingdb/cb_delete.c b/ldap/servers/plugins/chainingdb/cb_delete.c +index e76fb6b95..94f84b55d 100644 +--- a/ldap/servers/plugins/chainingdb/cb_delete.c ++++ b/ldap/servers/plugins/chainingdb/cb_delete.c +@@ -117,7 +117,7 @@ chaining_back_delete(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + /* +diff --git a/ldap/servers/plugins/chainingdb/cb_instance.c b/ldap/servers/plugins/chainingdb/cb_instance.c +index cd5abb834..bc1864c1a 100644 +--- a/ldap/servers/plugins/chainingdb/cb_instance.c ++++ b/ldap/servers/plugins/chainingdb/cb_instance.c +@@ -1947,7 +1947,8 @@ cb_instance_add_config_callback(Slapi_PBlock *pb __attribute__((unused)), + * we can't call recursively into the DSE to do more adds, they'll + * silently fail. instead, schedule the adds to happen in 1 second. + */ +- inst->eq_ctx = slapi_eq_once(cb_instance_add_monitor_later, (void *)inst, time(NULL) + 1); ++ inst->eq_ctx = slapi_eq_once(cb_instance_add_monitor_later, (void *)inst, ++ slapi_current_rel_time_t() + 1); + } + + /* Get the list of operational attrs defined in the schema */ +diff --git a/ldap/servers/plugins/chainingdb/cb_modify.c b/ldap/servers/plugins/chainingdb/cb_modify.c +index f81edf4a6..e53da9e40 100644 +--- a/ldap/servers/plugins/chainingdb/cb_modify.c ++++ b/ldap/servers/plugins/chainingdb/cb_modify.c +@@ -125,7 +125,7 @@ chaining_back_modify(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + /* +diff --git a/ldap/servers/plugins/chainingdb/cb_modrdn.c b/ldap/servers/plugins/chainingdb/cb_modrdn.c +index 95a068be7..d648253c7 100644 +--- a/ldap/servers/plugins/chainingdb/cb_modrdn.c ++++ b/ldap/servers/plugins/chainingdb/cb_modrdn.c +@@ -129,7 +129,7 @@ chaining_back_modrdn(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + /* +diff --git a/ldap/servers/plugins/chainingdb/cb_search.c b/ldap/servers/plugins/chainingdb/cb_search.c +index d47cbc8e4..ffc8f56f8 100644 +--- a/ldap/servers/plugins/chainingdb/cb_search.c ++++ b/ldap/servers/plugins/chainingdb/cb_search.c +@@ -236,7 +236,7 @@ chainingdb_build_candidate_list(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + rc = ldap_search_ext(ld, target, scope, filter, attrs, attrsonly, +@@ -503,7 +503,7 @@ chainingdb_next_search_entry(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + while (1) { +@@ -579,7 +579,7 @@ chainingdb_next_search_entry(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + /* The server sent one of the entries found by the search */ +@@ -611,7 +611,7 @@ chainingdb_next_search_entry(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + parse_rc = ldap_parse_reference(ctx->ld, res, &referrals, NULL, 1); +diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c +index eb9bd77f9..d404ff901 100644 +--- a/ldap/servers/plugins/cos/cos_cache.c ++++ b/ldap/servers/plugins/cos/cos_cache.c +@@ -346,7 +346,7 @@ cos_cache_init(void) + if (ret == 0) { + slapi_lock_mutex(start_lock); + while (!started) { +- while (slapi_wait_condvar(start_cond, NULL) == 0) ++ while (slapi_wait_condvar_pt(start_cond, start_lock, NULL) == 0) + ; + } + slapi_unlock_mutex(start_lock); +@@ -401,7 +401,7 @@ cos_cache_wait_on_change(void *arg __attribute__((unused))) + * thread notifies our condvar, and so we will not miss any + * notifications, including the shutdown notification. + */ +- slapi_wait_condvar(something_changed, NULL); ++ slapi_wait_condvar_pt(something_changed, change_lock, NULL); + } else { + /* Something to do...do it below */ + } +diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c +index 16c625bb0..1cb54580b 100644 +--- a/ldap/servers/plugins/dna/dna.c ++++ b/ldap/servers/plugins/dna/dna.c +@@ -907,7 +907,7 @@ dna_load_plugin_config(Slapi_PBlock *pb, int use_eventq) + * performing the operation at this point when + * starting up would cause the change to not + * get changelogged. */ +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + eq_ctx = slapi_eq_once(dna_update_config_event, NULL, now + 30); + } else { + dna_update_config_event(0, NULL); +diff --git a/ldap/servers/plugins/passthru/ptconn.c b/ldap/servers/plugins/passthru/ptconn.c +index 49040f651..637d33843 100644 +--- a/ldap/servers/plugins/passthru/ptconn.c ++++ b/ldap/servers/plugins/passthru/ptconn.c +@@ -233,7 +233,7 @@ passthru_get_connection(PassThruServer *srvr, LDAP **ldp) + slapi_log_err(SLAPI_LOG_PLUGIN, PASSTHRU_PLUGIN_SUBSYSTEM, + "... passthru_get_connection waiting for conn to free up\n"); + #endif +- slapi_wait_condvar(srvr->ptsrvr_connlist_cv, NULL); ++ slapi_wait_condvar_pt(srvr->ptsrvr_connlist_cv, srvr->ptsrvr_connlist_mutex, NULL); + + #ifdef PASSTHRU_VERBOSE_LOGGING + slapi_log_err(SLAPI_LOG_PLUGIN, PASSTHRU_PLUGIN_SUBSYSTEM, +diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c +index eb4b089fb..fd5356d72 100644 +--- a/ldap/servers/plugins/referint/referint.c ++++ b/ldap/servers/plugins/referint/referint.c +@@ -71,8 +71,9 @@ void referint_get_config(int *delay, char **logfile); + /* global thread control stuff */ + static PRLock *referint_mutex = NULL; + static PRThread *referint_tid = NULL; +-static PRLock *keeprunning_mutex = NULL; +-static PRCondVar *keeprunning_cv = NULL; ++static pthread_mutex_t keeprunning_mutex; ++static pthread_cond_t keeprunning_cv; ++ + static int keeprunning = 0; + static referint_config *config = NULL; + static Slapi_DN *_ConfigAreaDN = NULL; +@@ -1302,12 +1303,38 @@ referint_postop_start(Slapi_PBlock *pb) + * -1 = integrity off + */ + if (referint_get_delay() > 0) { ++ pthread_condattr_t condAttr; ++ + /* initialize the cv and lock */ + if (!use_txn && (NULL == referint_mutex)) { + referint_mutex = PR_NewLock(); + } +- keeprunning_mutex = PR_NewLock(); +- keeprunning_cv = PR_NewCondVar(keeprunning_mutex); ++ if ((rc = pthread_mutex_init(&keeprunning_mutex, NULL)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "referint_postop_start", ++ "cannot create new lock. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(1); ++ } ++ if ((rc = pthread_condattr_init(&condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "referint_postop_start", ++ "cannot create new condition attribute variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(1); ++ } ++ if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "referint_postop_start", ++ "cannot set condition attr clock. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(1); ++ } ++ if ((rc = pthread_cond_init(&keeprunning_cv, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "referint_postop_start", ++ "cannot create new condition variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(1); ++ } ++ pthread_condattr_destroy(&condAttr); /* no longer needed */ ++ + keeprunning = 1; + + referint_tid = PR_CreateThread(PR_USER_THREAD, +@@ -1337,13 +1364,11 @@ int + referint_postop_close(Slapi_PBlock *pb __attribute__((unused))) + { + /* signal the thread to exit */ +- if (NULL != keeprunning_mutex) { +- PR_Lock(keeprunning_mutex); ++ if (referint_get_delay() > 0) { ++ pthread_mutex_lock(&keeprunning_mutex); + keeprunning = 0; +- if (NULL != keeprunning_cv) { +- PR_NotifyCondVar(keeprunning_cv); +- } +- PR_Unlock(keeprunning_mutex); ++ pthread_cond_signal(&keeprunning_cv); ++ pthread_mutex_unlock(&keeprunning_mutex); + } + + slapi_destroy_rwlock(config_rwlock); +@@ -1369,6 +1394,7 @@ referint_thread_func(void *arg __attribute__((unused))) + char *iter = NULL; + Slapi_DN *sdn = NULL; + Slapi_DN *tmpsuperior = NULL; ++ struct timespec current_time = {0}; + int delay; + int no_changes; + +@@ -1383,20 +1409,22 @@ referint_thread_func(void *arg __attribute__((unused))) + no_changes = 1; + while (no_changes) { + +- PR_Lock(keeprunning_mutex); ++ pthread_mutex_lock(&keeprunning_mutex); + if (keeprunning == 0) { +- PR_Unlock(keeprunning_mutex); ++ pthread_mutex_unlock(&keeprunning_mutex); + break; + } +- PR_Unlock(keeprunning_mutex); ++ pthread_mutex_unlock(&keeprunning_mutex); + + referint_lock(); + if ((prfd = PR_Open(logfilename, PR_RDONLY, REFERINT_DEFAULT_FILE_MODE)) == NULL) { + referint_unlock(); + /* go back to sleep and wait for this file */ +- PR_Lock(keeprunning_mutex); +- PR_WaitCondVar(keeprunning_cv, PR_SecondsToInterval(delay)); +- PR_Unlock(keeprunning_mutex); ++ pthread_mutex_lock(&keeprunning_mutex); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += delay; ++ pthread_cond_timedwait(&keeprunning_cv, &keeprunning_mutex, ¤t_time); ++ pthread_mutex_unlock(&keeprunning_mutex); + } else { + no_changes = 0; + } +@@ -1407,12 +1435,12 @@ referint_thread_func(void *arg __attribute__((unused))) + * loop before trying to do the changes. The server + * will pick them up on next startup as file still exists + */ +- PR_Lock(keeprunning_mutex); ++ pthread_mutex_lock(&keeprunning_mutex); + if (keeprunning == 0) { +- PR_Unlock(keeprunning_mutex); ++ pthread_mutex_unlock(&keeprunning_mutex); + break; + } +- PR_Unlock(keeprunning_mutex); ++ pthread_mutex_unlock(&keeprunning_mutex); + + while (GetNextLine(thisline, MAX_LINE, prfd)) { + ptoken = ldap_utf8strtok_r(thisline, delimiter, &iter); +@@ -1459,21 +1487,16 @@ referint_thread_func(void *arg __attribute__((unused))) + referint_unlock(); + + /* wait on condition here */ +- PR_Lock(keeprunning_mutex); +- PR_WaitCondVar(keeprunning_cv, PR_SecondsToInterval(delay)); +- PR_Unlock(keeprunning_mutex); ++ pthread_mutex_lock(&keeprunning_mutex); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += delay; ++ pthread_cond_timedwait(&keeprunning_cv, &keeprunning_mutex, ¤t_time); ++ pthread_mutex_unlock(&keeprunning_mutex); + } + + /* cleanup resources allocated in start */ +- if (NULL != keeprunning_mutex) { +- PR_DestroyLock(keeprunning_mutex); +- } +- if (NULL != referint_mutex) { +- PR_DestroyLock(referint_mutex); +- } +- if (NULL != keeprunning_cv) { +- PR_DestroyCondVar(keeprunning_cv); +- } ++ pthread_mutex_destroy(&keeprunning_mutex); ++ pthread_cond_destroy(&keeprunning_cv); + slapi_ch_free_string(&logfilename); + } + +diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h +index f1c596a3f..06e747811 100644 +--- a/ldap/servers/plugins/replication/repl5.h ++++ b/ldap/servers/plugins/replication/repl5.h +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2010 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. + * All rights reserved. + * +@@ -28,6 +28,7 @@ + #include "llist.h" + #include "repl5_ruv.h" + #include "plstr.h" ++#include + + #define START_UPDATE_DELAY 2 /* 2 second */ + #define REPLICA_TYPE_WINDOWS 1 +diff --git a/ldap/servers/plugins/replication/repl5_backoff.c b/ldap/servers/plugins/replication/repl5_backoff.c +index 40848b96d..40ec75dd7 100644 +--- a/ldap/servers/plugins/replication/repl5_backoff.c ++++ b/ldap/servers/plugins/replication/repl5_backoff.c +@@ -110,7 +110,7 @@ backoff_reset(Backoff_Timer *bt, slapi_eq_fn_t callback, void *callback_data) + bt->next_interval = bt->initial_interval; + } + /* Schedule the callback */ +- bt->last_fire_time = slapi_current_utc_time(); ++ bt->last_fire_time = slapi_current_rel_time_t(); + return_value = bt->last_fire_time + bt->next_interval; + bt->pending_event = slapi_eq_once(bt->callback, bt->callback_arg, + return_value); +@@ -177,7 +177,7 @@ backoff_expired(Backoff_Timer *bt, int margin) + + PR_ASSERT(NULL != bt); + PR_Lock(bt->lock); +- return_value = (slapi_current_utc_time() >= (bt->last_fire_time + bt->next_interval + margin)); ++ return_value = (slapi_current_rel_time_t() >= (bt->last_fire_time + bt->next_interval + margin)); + PR_Unlock(bt->lock); + return return_value; + } +diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c +index cf57c2156..bc9ca424b 100644 +--- a/ldap/servers/plugins/replication/repl5_connection.c ++++ b/ldap/servers/plugins/replication/repl5_connection.c +@@ -402,7 +402,7 @@ conn_read_result_ex(Repl_Connection *conn, char **retoidp, struct berval **retda + } + if (block) { + /* Did the connection's timeout expire ? */ +- time_now = slapi_current_utc_time(); ++ time_now = slapi_current_rel_time_t(); + if (conn->timeout.tv_sec <= (time_now - start_time)) { + /* We timed out */ + rc = 0; +@@ -676,7 +676,7 @@ conn_is_available(Repl_Connection *conn) + { + time_t poll_timeout_sec = 1; /* Polling for 1sec */ + time_t yield_delay_msec = 100; /* Delay to wait */ +- time_t start_time = slapi_current_utc_time(); ++ time_t start_time = slapi_current_rel_time_t(); + time_t time_now; + ConnResult return_value = CONN_OPERATION_SUCCESS; + +@@ -686,7 +686,7 @@ conn_is_available(Repl_Connection *conn) + /* in case of timeout we return CONN_TIMEOUT only + * if the RA.timeout is exceeded + */ +- time_now = slapi_current_utc_time(); ++ time_now = slapi_current_rel_time_t(); + if (conn->timeout.tv_sec <= (time_now - start_time)) { + break; + } else { +@@ -1010,7 +1010,7 @@ linger_timeout(time_t event_time __attribute__((unused)), void *arg) + void + conn_start_linger(Repl_Connection *conn) + { +- time_t now; ++ time_t now = slapi_current_rel_time_t(); + + PR_ASSERT(NULL != conn); + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, +@@ -1022,7 +1022,7 @@ conn_start_linger(Repl_Connection *conn) + agmt_get_long_name(conn->agmt)); + return; + } +- now = slapi_current_utc_time(); ++ + PR_Lock(conn->lock); + if (conn->linger_active) { + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, +@@ -1989,7 +1989,7 @@ repl5_start_debug_timeout(int *setlevel) + { + Slapi_Eq_Context eqctx = 0; + if (s_debug_timeout && s_debug_level) { +- time_t now = slapi_current_utc_time(); ++ time_t now = slapi_current_rel_time_t(); + eqctx = slapi_eq_once(repl5_debug_timeout_callback, setlevel, + s_debug_timeout + now); + } +diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c +index af5e5897c..4bb384882 100644 +--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c ++++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -129,7 +129,7 @@ typedef struct result_data + * don't see any updates for a period equal to this interval, + * we go ahead and start a replication session, just to be safe + */ +-#define MAX_WAIT_BETWEEN_SESSIONS PR_SecondsToInterval(60 * 5) /* 5 minutes */ ++#define MAX_WAIT_BETWEEN_SESSIONS 300 /* 5 minutes */ + + /* + * tests if the protocol has been shutdown and we need to quit +@@ -145,7 +145,7 @@ typedef struct result_data + /* Forward declarations */ + static PRUint32 event_occurred(Private_Repl_Protocol *prp, PRUint32 event); + static void reset_events(Private_Repl_Protocol *prp); +-static void protocol_sleep(Private_Repl_Protocol *prp, PRIntervalTime duration); ++static void protocol_sleep(Private_Repl_Protocol *prp, int32_t duration); + static int send_updates(Private_Repl_Protocol *prp, RUV *ruv, PRUint32 *num_changes_sent); + static void repl5_inc_backoff_expired(time_t timer_fire_time, void *arg); + static int examine_update_vector(Private_Repl_Protocol *prp, RUV *ruv); +@@ -253,7 +253,7 @@ repl5_inc_result_threadmain(void *param) + char *uniqueid = NULL; + char *ldap_error_string = NULL; + time_t time_now = 0; +- time_t start_time = slapi_current_utc_time(); ++ time_t start_time = slapi_current_rel_time_t(); + int connection_error = 0; + int operation_code = 0; + int backoff_time = 1; +@@ -275,7 +275,7 @@ repl5_inc_result_threadmain(void *param) + /* We need to a) check that the 'real' timeout hasn't expired and + * b) implement a backoff sleep to avoid spinning */ + /* Did the connection's timeout expire ? */ +- time_now = slapi_current_utc_time(); ++ time_now = slapi_current_rel_time_t(); + if (conn_get_timeout(conn) <= (time_now - start_time)) { + /* We timed out */ + conres = CONN_TIMEOUT; +@@ -358,7 +358,7 @@ repl5_inc_result_threadmain(void *param) + /* Should we stop ? */ + PR_Lock(rd->lock); + if (!finished && yield_session && rd->abort != SESSION_ABORTED && rd->abort_time == 0) { +- rd->abort_time = slapi_current_utc_time(); ++ rd->abort_time = slapi_current_rel_time_t(); + rd->abort = SESSION_ABORTED; /* only set the abort time once */ + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "repl5_inc_result_threadmain - " + "Abort control detected, setting abort time...(%s)\n", +@@ -532,13 +532,11 @@ repl5_inc_delete(Private_Repl_Protocol **prpp) + (*prpp)->stop(*prpp); + } + /* Then, delete all resources used by the protocol */ +- if ((*prpp)->lock) { +- PR_DestroyLock((*prpp)->lock); +- (*prpp)->lock = NULL; ++ if (&((*prpp)->lock)) { ++ pthread_mutex_destroy(&((*prpp)->lock)); + } +- if ((*prpp)->cvar) { +- PR_DestroyCondVar((*prpp)->cvar); +- (*prpp)->cvar = NULL; ++ if (&((*prpp)->cvar)) { ++ pthread_cond_destroy(&(*prpp)->cvar); + } + slapi_ch_free((void **)&(*prpp)->private); + slapi_ch_free((void **)prpp); +@@ -712,7 +710,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) + conn_set_agmt_changed(prp->conn); + } else if (event_occurred(prp, EVENT_TRIGGERING_CRITERIA_MET)) { /* change available */ + /* just ignore it and go to sleep */ +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } else if ((e1 = event_occurred(prp, EVENT_WINDOW_CLOSED)) || + event_occurred(prp, EVENT_BACKOFF_EXPIRED)) { + /* this events - should not occur - log a warning and go to sleep */ +@@ -720,13 +718,13 @@ repl5_inc_run(Private_Repl_Protocol *prp) + "repl5_inc_run - %s: " + "Event %s should not occur in state %s; going to sleep\n", + agmt_get_long_name(prp->agmt), e1 ? event2name(EVENT_WINDOW_CLOSED) : event2name(EVENT_BACKOFF_EXPIRED), state2name(current_state)); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } else { + /* wait until window opens or an event occurs */ + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "repl5_inc_run - %s: Waiting for update window to open\n", + agmt_get_long_name(prp->agmt)); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } + break; + +@@ -850,7 +848,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) + } + next_state = STATE_BACKOFF; + backoff_reset(prp_priv->backoff, repl5_inc_backoff_expired, (void *)prp); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + use_busy_backoff_timer = PR_FALSE; + } + break; +@@ -899,13 +897,13 @@ repl5_inc_run(Private_Repl_Protocol *prp) + */ + if (STATE_BACKOFF == next_state) { + /* Step the backoff timer */ +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + next_fire_time = backoff_step(prp_priv->backoff); + /* And go back to sleep */ + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "repl5_inc_run - %s: Replication session backing off for %ld seconds\n", + agmt_get_long_name(prp->agmt), next_fire_time - now); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } else { + /* Destroy the backoff timer, since we won't need it anymore */ + backoff_delete(&prp_priv->backoff); +@@ -923,7 +921,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) + next_state = STATE_READY_TO_ACQUIRE; + } else { + /* ignore changes and go to sleep */ +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } + } else if (event_occurred(prp, EVENT_WINDOW_OPENED)) { + /* this should never happen - log an error and go to sleep */ +@@ -931,7 +929,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) + "Event %s should not occur in state %s; going to sleep\n", + agmt_get_long_name(prp->agmt), event2name(EVENT_WINDOW_OPENED), + state2name(current_state)); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } + break; + +@@ -1178,7 +1176,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) + reset_events(prp); + } + +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + break; + + case STATE_STOP_NORMAL_TERMINATION: +@@ -1209,20 +1207,28 @@ repl5_inc_run(Private_Repl_Protocol *prp) + * Go to sleep until awakened. + */ + static void +-protocol_sleep(Private_Repl_Protocol *prp, PRIntervalTime duration) ++protocol_sleep(Private_Repl_Protocol *prp, int32_t duration) + { + PR_ASSERT(NULL != prp); +- PR_Lock(prp->lock); ++ pthread_mutex_lock(&(prp->lock)); + /* we should not go to sleep if there are events available to be processed. + Otherwise, we can miss the event that suppose to wake us up */ +- if (prp->eventbits == 0) +- PR_WaitCondVar(prp->cvar, duration); +- else { ++ if (prp->eventbits == 0) { ++ if (duration > 0) { ++ struct timespec current_time = {0}; ++ /* get the current monotonic time and add our interval */ ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += duration; ++ pthread_cond_timedwait(&(prp->cvar), &(prp->lock), ¤t_time); ++ } else { ++ pthread_cond_wait(&(prp->cvar), &(prp->lock)); ++ } ++ } else { + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "protocol_sleep - %s: Can't go to sleep: event bits - %x\n", + agmt_get_long_name(prp->agmt), prp->eventbits); + } +- PR_Unlock(prp->lock); ++ pthread_mutex_unlock(&(prp->lock)); + } + + /* +@@ -1235,10 +1241,10 @@ static void + event_notify(Private_Repl_Protocol *prp, PRUint32 event) + { + PR_ASSERT(NULL != prp); +- PR_Lock(prp->lock); ++ pthread_mutex_lock(&(prp->lock)); + prp->eventbits |= event; +- PR_NotifyCondVar(prp->cvar); +- PR_Unlock(prp->lock); ++ pthread_cond_signal(&(prp->cvar)); ++ pthread_mutex_unlock(&(prp->lock)); + } + + /* +@@ -1250,10 +1256,10 @@ event_occurred(Private_Repl_Protocol *prp, PRUint32 event) + { + PRUint32 return_value; + PR_ASSERT(NULL != prp); +- PR_Lock(prp->lock); ++ pthread_mutex_lock(&(prp->lock)); + return_value = (prp->eventbits & event); + prp->eventbits &= ~event; /* Clear event */ +- PR_Unlock(prp->lock); ++ pthread_mutex_unlock(&(prp->lock)); + return return_value; + } + +@@ -1261,9 +1267,9 @@ static void + reset_events(Private_Repl_Protocol *prp) + { + PR_ASSERT(NULL != prp); +- PR_Lock(prp->lock); ++ pthread_mutex_lock(&(prp->lock)); + prp->eventbits = 0; +- PR_Unlock(prp->lock); ++ pthread_mutex_unlock(&(prp->lock)); + } + + /* +@@ -1882,7 +1888,7 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu + /* See if the result thread has hit a problem */ + + if (!finished && rd->abort_time) { +- time_t current_time = slapi_current_utc_time(); ++ time_t current_time = slapi_current_rel_time_t(); + if ((current_time - rd->abort_time) >= release_timeout) { + rd->result = UPDATE_YIELD; + return_value = UPDATE_YIELD; +@@ -2088,7 +2094,9 @@ Private_Repl_Protocol * + Repl_5_Inc_Protocol_new(Repl_Protocol *rp) + { + repl5_inc_private *rip = NULL; +- Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_malloc(sizeof(Private_Repl_Protocol)); ++ pthread_condattr_t cattr; /* the pthread condition attr */ ++ Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_calloc(1, sizeof(Private_Repl_Protocol)); ++ + prp->delete = repl5_inc_delete; + prp->run = repl5_inc_run; + prp->stop = repl5_inc_stop; +@@ -2099,12 +2107,19 @@ Repl_5_Inc_Protocol_new(Repl_Protocol *rp) + prp->notify_window_closed = repl5_inc_notify_window_closed; + prp->update_now = repl5_inc_update_now; + prp->replica = prot_get_replica(rp); +- if ((prp->lock = PR_NewLock()) == NULL) { ++ if (pthread_mutex_init(&(prp->lock), NULL) != 0) { ++ goto loser; ++ } ++ if (pthread_condattr_init(&cattr) != 0) { ++ goto loser; ++ } ++ if (pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC) != 0) { + goto loser; + } +- if ((prp->cvar = PR_NewCondVar(prp->lock)) == NULL) { ++ if (pthread_cond_init(&(prp->cvar), &cattr) != 0) { + goto loser; + } ++ pthread_condattr_destroy(&cattr); + prp->stopped = 0; + prp->terminate = 0; + prp->eventbits = 0; +diff --git a/ldap/servers/plugins/replication/repl5_mtnode_ext.c b/ldap/servers/plugins/replication/repl5_mtnode_ext.c +index 08a58613b..82e230958 100644 +--- a/ldap/servers/plugins/replication/repl5_mtnode_ext.c ++++ b/ldap/servers/plugins/replication/repl5_mtnode_ext.c +@@ -82,7 +82,8 @@ multimaster_mtnode_construct_replicas() + } + } + /* Wait a few seconds for everything to startup before resuming any replication tasks */ +- slapi_eq_once(replica_check_for_tasks, (void *)replica_get_root(r), time(NULL) + 5); ++ slapi_eq_once(replica_check_for_tasks, (void *)replica_get_root(r), ++ slapi_current_rel_time_t() + 5); + } + } + } +diff --git a/ldap/servers/plugins/replication/repl5_prot_private.h b/ldap/servers/plugins/replication/repl5_prot_private.h +index 5b2e1b3ca..0673f1978 100644 +--- a/ldap/servers/plugins/replication/repl5_prot_private.h ++++ b/ldap/servers/plugins/replication/repl5_prot_private.h +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -32,8 +32,6 @@ typedef struct private_repl_protocol + void (*notify_window_opened)(struct private_repl_protocol *); + void (*notify_window_closed)(struct private_repl_protocol *); + void (*update_now)(struct private_repl_protocol *); +- PRLock *lock; +- PRCondVar *cvar; + int stopped; + int terminate; + PRUint32 eventbits; +@@ -46,6 +44,8 @@ typedef struct private_repl_protocol + int repl50consumer; /* Flag to tell us if this is a 5.0-style consumer we're talking to */ + int repl71consumer; /* Flag to tell us if this is a 7.1-style consumer we're talking to */ + int repl90consumer; /* Flag to tell us if this is a 9.0-style consumer we're talking to */ ++ pthread_mutex_t lock; ++ pthread_cond_t cvar; + } Private_Repl_Protocol; + + extern Private_Repl_Protocol *Repl_5_Inc_Protocol_new(Repl_Protocol *rp); +diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c +index 7e56d6557..c1d376c72 100644 +--- a/ldap/servers/plugins/replication/repl5_replica.c ++++ b/ldap/servers/plugins/replication/repl5_replica.c +@@ -232,7 +232,7 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, + In that case the updated would fail but nothing bad would happen. The next + scheduled update would save the state */ + r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name, +- slapi_current_utc_time() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); ++ slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); + + if (r->tombstone_reap_interval > 0) { + /* +@@ -240,7 +240,7 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, + * This will allow the server to fully start before consuming resources. + */ + r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, r->repl_name, +- slapi_current_utc_time() + r->tombstone_reap_interval, ++ slapi_current_rel_time_t() + r->tombstone_reap_interval, + 1000 * r->tombstone_reap_interval); + } + +@@ -1088,7 +1088,7 @@ replica_is_updatedn(Replica *r, const Slapi_DN *sdn) + if (r->groupdn_list) { + /* check and rebuild groupdns */ + if (r->updatedn_group_check_interval > -1) { +- time_t now = slapi_current_utc_time(); ++ time_t now = slapi_current_rel_time_t(); + if (now - r->updatedn_group_last_check > r->updatedn_group_check_interval) { + Slapi_ValueSet *updatedn_groups_copy = NULL; + ReplicaUpdateDNList groupdn_list = replica_updatedn_list_new(NULL); +@@ -1512,7 +1512,7 @@ replica_set_enabled(Replica *r, PRBool enable) + if (r->repl_eqcxt_rs == NULL) /* event is not already registered */ + { + r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name, +- slapi_current_utc_time() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); ++ slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); + } + } else /* disable */ + { +@@ -3637,7 +3637,7 @@ replica_set_tombstone_reap_interval(Replica *r, long interval) + r->tombstone_reap_interval = interval; + if (interval > 0 && r->repl_eqcxt_tr == NULL) { + r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, r->repl_name, +- slapi_current_utc_time() + r->tombstone_reap_interval, ++ slapi_current_rel_time_t() + r->tombstone_reap_interval, + 1000 * r->tombstone_reap_interval); + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "replica_set_tombstone_reap_interval - tombstone_reap event (interval=%" PRId64 ") was %s\n", +diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c +index d64d4bf45..a969ef82f 100644 +--- a/ldap/servers/plugins/replication/repl5_replica_config.c ++++ b/ldap/servers/plugins/replication/repl5_replica_config.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -31,14 +31,17 @@ + #define CLEANALLRUVLEN 11 + #define REPLICA_RDN "cn=replica" + ++#define CLEANALLRUV_MAX_WAIT 7200 /* 2 hours */ ++#define CLEANALLRUV_SLEEP 5 ++ + int slapi_log_urp = SLAPI_LOG_REPL; + static ReplicaId cleaned_rids[CLEANRID_BUFSIZ] = {0}; + static ReplicaId pre_cleaned_rids[CLEANRID_BUFSIZ] = {0}; + static ReplicaId aborted_rids[CLEANRID_BUFSIZ] = {0}; + static PRLock *rid_lock = NULL; + static PRLock *abort_rid_lock = NULL; +-static PRLock *notify_lock = NULL; +-static PRCondVar *notify_cvar = NULL; ++static pthread_mutex_t notify_lock; ++static pthread_cond_t notify_cvar; + static PRLock *task_count_lock = NULL; + static int32_t clean_task_count = 0; + static int32_t abort_task_count = 0; +@@ -105,6 +108,9 @@ dont_allow_that(Slapi_PBlock *pb __attribute__((unused)), + int + replica_config_init() + { ++ int rc = 0; ++ pthread_condattr_t condAttr; ++ + s_configLock = PR_NewLock(); + + if (s_configLock == NULL) { +@@ -134,18 +140,31 @@ replica_config_init() + PR_GetError()); + return -1; + } +- if ((notify_lock = PR_NewLock()) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - " +- "Failed to create notify lock; NSPR error - %d\n", +- PR_GetError()); ++ if ((rc = pthread_mutex_init(¬ify_lock, NULL)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "replica_config_init", ++ "Failed to create notify lock: error %d (%s)\n", ++ rc, strerror(rc)); + return -1; + } +- if ((notify_cvar = PR_NewCondVar(notify_lock)) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - " +- "Failed to create notify cond var; NSPR error - %d\n", +- PR_GetError()); ++ if ((rc = pthread_condattr_init(&condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "replica_config_init", ++ "Failed to create notify new condition attribute variable. error %d (%s)\n", ++ rc, strerror(rc)); + return -1; + } ++ if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "replica_config_init", ++ "Cannot set condition attr clock. error %d (%s)\n", ++ rc, strerror(rc)); ++ return -1; ++ } ++ if ((rc = pthread_cond_init(¬ify_cvar, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "replica_config_init", ++ "Failed to create new condition variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ return -1; ++ } ++ pthread_condattr_destroy(&condAttr); + + /* config DSE must be initialized before we get here */ + slapi_config_register_callback(SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, CONFIG_BASE, LDAP_SCOPE_SUBTREE, +@@ -1674,9 +1693,13 @@ replica_cleanallruv_thread(void *arg) + * to startup timing issues, we need to wait before grabbing the replica obj, as + * the backends might not be online yet. + */ +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(10)); +- PR_Unlock(notify_lock); ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += 10; ++ ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_unlock(¬ify_lock); + data->replica = replica_get_replica_from_dn(data->sdn); + if (data->replica == NULL) { + cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Unable to retrieve repl object from dn(%s).", data->sdn); +@@ -1720,15 +1743,18 @@ replica_cleanallruv_thread(void *arg) + ruv_obj = replica_get_ruv(data->replica); + ruv = object_get_data(ruv_obj); + while (data->maxcsn && !is_task_aborted(data->rid) && !is_cleaned_rid(data->rid) && !slapi_is_shutting_down()) { ++ struct timespec current_time = {0}; + if (csn_get_replicaid(data->maxcsn) == 0 || + ruv_covers_csn_cleanallruv(ruv, data->maxcsn) || + strcasecmp(data->force, "yes") == 0) { + /* We are caught up, now we can clean the ruv's */ + break; + } +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(5)); +- PR_Unlock(notify_lock); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += CLEANALLRUV_SLEEP; ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_unlock(¬ify_lock); + } + object_release(ruv_obj); + /* +@@ -1796,18 +1822,20 @@ replica_cleanallruv_thread(void *arg) + /* + * need to sleep between passes + */ +- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Not all replicas have received the " +- "cleanallruv extended op, retrying in %d seconds", ++ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, ++ "Not all replicas have received the cleanallruv extended op, retrying in %d seconds", + interval); + if (!slapi_is_shutting_down()) { +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); +- PR_Unlock(notify_lock); ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += interval; ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_unlock(¬ify_lock); + } +- if (interval < 14400) { /* 4 hour max */ +- interval = interval * 2; +- } else { +- interval = 14400; ++ interval *= 2; ++ if (interval >= CLEANALLRUV_MAX_WAIT) { ++ interval = CLEANALLRUV_MAX_WAIT; + } + } + /* +@@ -1857,18 +1885,19 @@ replica_cleanallruv_thread(void *arg) + * Need to sleep between passes unless we are shutting down + */ + if (!slapi_is_shutting_down()) { +- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Replicas have not been cleaned yet, " +- "retrying in %d seconds", ++ struct timespec current_time = {0}; ++ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, ++ "Replicas have not been cleaned yet, retrying in %d seconds", + interval); +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); +- PR_Unlock(notify_lock); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += interval; ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_unlock(¬ify_lock); + } +- +- if (interval < 14400) { /* 4 hour max */ +- interval = interval * 2; +- } else { +- interval = 14400; ++ interval *= 2; ++ if (interval >= CLEANALLRUV_MAX_WAIT) { ++ interval = CLEANALLRUV_MAX_WAIT; + } + } /* while */ + +@@ -2081,15 +2110,17 @@ check_replicas_are_done_cleaning(cleanruv_data *data) + "Not all replicas finished cleaning, retrying in %d seconds", + interval); + if (!slapi_is_shutting_down()) { +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); +- PR_Unlock(notify_lock); ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += interval; ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_lock(¬ify_lock); + } + +- if (interval < 14400) { /* 4 hour max */ +- interval = interval * 2; +- } else { +- interval = 14400; ++ interval *= 2; ++ if (interval >= CLEANALLRUV_MAX_WAIT) { ++ interval = CLEANALLRUV_MAX_WAIT; + } + } + slapi_ch_free_string(&filter); +@@ -2190,14 +2221,16 @@ check_replicas_are_done_aborting(cleanruv_data *data) + cleanruv_log(data->task, data->rid, ABORT_CLEANALLRUV_ID, SLAPI_LOG_NOTICE, + "Not all replicas finished aborting, retrying in %d seconds", interval); + if (!slapi_is_shutting_down()) { +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); +- PR_Unlock(notify_lock); ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += interval; ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_unlock(¬ify_lock); + } +- if (interval < 14400) { /* 4 hour max */ +- interval = interval * 2; +- } else { +- interval = 14400; ++ interval *= 2; ++ if (interval >= CLEANALLRUV_MAX_WAIT) { ++ interval = CLEANALLRUV_MAX_WAIT; + } + } + slapi_ch_free_string(&filter); +@@ -2248,14 +2281,16 @@ check_agmts_are_caught_up(cleanruv_data *data, char *maxcsn) + cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, + "Not all replicas caught up, retrying in %d seconds", interval); + if (!slapi_is_shutting_down()) { +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); +- PR_Unlock(notify_lock); ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += interval; ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_unlock(¬ify_lock); + } +- if (interval < 14400) { /* 4 hour max */ +- interval = interval * 2; +- } else { +- interval = 14400; ++ interval *= 2; ++ if (interval >= CLEANALLRUV_MAX_WAIT) { ++ interval = CLEANALLRUV_MAX_WAIT; + } + } + slapi_ch_free_string(&rid_text); +@@ -2310,14 +2345,16 @@ check_agmts_are_alive(Replica *replica, ReplicaId rid, Slapi_Task *task) + interval); + + if (!slapi_is_shutting_down()) { +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); +- PR_Unlock(notify_lock); ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += interval; ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_unlock(¬ify_lock); + } +- if (interval < 14400) { /* 4 hour max */ +- interval = interval * 2; +- } else { +- interval = 14400; ++ interval *= 2; ++ if (interval >= CLEANALLRUV_MAX_WAIT) { ++ interval = CLEANALLRUV_MAX_WAIT; + } + } + if (is_task_aborted(rid)) { +@@ -3093,16 +3130,18 @@ replica_abort_task_thread(void *arg) + * Need to sleep between passes. unless we are shutting down + */ + if (!slapi_is_shutting_down()) { ++ struct timespec current_time = {0}; + cleanruv_log(data->task, data->rid, ABORT_CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Retrying in %d seconds", interval); +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); +- PR_Unlock(notify_lock); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += interval; ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_unlock(¬ify_lock); + } + +- if (interval < 14400) { /* 4 hour max */ +- interval = interval * 2; +- } else { +- interval = 14400; ++ interval *= 2; ++ if (interval >= CLEANALLRUV_MAX_WAIT) { ++ interval = CLEANALLRUV_MAX_WAIT; + } + } /* while */ + +@@ -3536,10 +3575,10 @@ check_and_set_abort_cleanruv_task_count(void) + + PR_Lock(task_count_lock); + if (abort_task_count > CLEANRIDSIZ) { +- rc = -1; +- } else { +- abort_task_count++; +- } ++ rc = -1; ++ } else { ++ abort_task_count++; ++ } + PR_Unlock(task_count_lock); + + return rc; +@@ -3551,11 +3590,9 @@ check_and_set_abort_cleanruv_task_count(void) + void + stop_ruv_cleaning() + { +- if (notify_lock) { +- PR_Lock(notify_lock); +- PR_NotifyCondVar(notify_cvar); +- PR_Unlock(notify_lock); +- } ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_signal(¬ify_cvar); ++ pthread_mutex_unlock(¬ify_lock); + } + + /* +diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c +index a25839f21..f67263c3e 100644 +--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c ++++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -45,7 +45,7 @@ typedef struct callback_data + unsigned long num_entries; + time_t sleep_on_busy; + time_t last_busy; +- PRLock *lock; /* Lock to protect access to this structure, the message id list and to force memory barriers */ ++ pthread_mutex_t lock; /* Lock to protect access to this structure, the message id list and to force memory barriers */ + PRThread *result_tid; /* The async result thread */ + operation_id_list_item *message_id_list; /* List of IDs for outstanding operations */ + int abort; /* Flag used to tell the sending thread asyncronously that it should abort (because an error came up in a result) */ +@@ -113,7 +113,7 @@ repl5_tot_result_threadmain(void *param) + while (!finished) { + int message_id = 0; + time_t time_now = 0; +- time_t start_time = slapi_current_utc_time(); ++ time_t start_time = slapi_current_rel_time_t(); + int backoff_time = 1; + + /* Read the next result */ +@@ -130,7 +130,7 @@ repl5_tot_result_threadmain(void *param) + /* We need to a) check that the 'real' timeout hasn't expired and + * b) implement a backoff sleep to avoid spinning */ + /* Did the connection's timeout expire ? */ +- time_now = slapi_current_utc_time(); ++ time_now = slapi_current_rel_time_t(); + if (conn_get_timeout(conn) <= (time_now - start_time)) { + /* We timed out */ + conres = CONN_TIMEOUT; +@@ -142,11 +142,11 @@ repl5_tot_result_threadmain(void *param) + backoff_time <<= 1; + } + /* Should we stop ? */ +- PR_Lock(cb->lock); ++ pthread_mutex_lock(&(cb->lock)); + if (cb->stop_result_thread) { + finished = 1; + } +- PR_Unlock(cb->lock); ++ pthread_mutex_unlock(&(cb->lock)); + } else { + /* Something other than a timeout, so we exit the loop */ + break; +@@ -164,21 +164,21 @@ repl5_tot_result_threadmain(void *param) + /* Was the result itself an error ? */ + if (0 != conres) { + /* If so then we need to take steps to abort the update process */ +- PR_Lock(cb->lock); ++ pthread_mutex_lock(&(cb->lock)); + cb->abort = 1; + if (conres == CONN_NOT_CONNECTED) { + cb->rc = LDAP_CONNECT_ERROR; + } +- PR_Unlock(cb->lock); ++ pthread_mutex_unlock(&(cb->lock)); + } + /* Should we stop ? */ +- PR_Lock(cb->lock); ++ pthread_mutex_lock(&(cb->lock)); + /* if the connection is not connected, then we cannot read any more + results - we are finished */ + if (cb->stop_result_thread || (conres == CONN_NOT_CONNECTED)) { + finished = 1; + } +- PR_Unlock(cb->lock); ++ pthread_mutex_unlock(&(cb->lock)); + } + } + +@@ -209,9 +209,9 @@ repl5_tot_destroy_async_result_thread(callback_data *cb_data) + int retval = 0; + PRThread *tid = cb_data->result_tid; + if (tid) { +- PR_Lock(cb_data->lock); ++ pthread_mutex_lock(&(cb_data->lock)); + cb_data->stop_result_thread = 1; +- PR_Unlock(cb_data->lock); ++ pthread_mutex_unlock(&(cb_data->lock)); + (void)PR_JoinThread(tid); + } + return retval; +@@ -248,7 +248,7 @@ repl5_tot_waitfor_async_results(callback_data *cb_data) + /* Keep pulling results off the LDAP connection until we catch up to the last message id stored in the rd */ + while (!done) { + /* Lock the structure to force memory barrier */ +- PR_Lock(cb_data->lock); ++ pthread_mutex_lock(&(cb_data->lock)); + /* Are we caught up ? */ + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "repl5_tot_waitfor_async_results - %d %d\n", +@@ -260,7 +260,7 @@ repl5_tot_waitfor_async_results(callback_data *cb_data) + if (cb_data->abort && LOST_CONN_ERR(cb_data->rc)) { + done = 1; /* no connection == no more results */ + } +- PR_Unlock(cb_data->lock); ++ pthread_mutex_unlock(&(cb_data->lock)); + /* If not then sleep a bit */ + DS_Sleep(PR_SecondsToInterval(1)); + loops++; +@@ -482,9 +482,9 @@ retry: + cb_data.rc = 0; + cb_data.num_entries = 1UL; + cb_data.sleep_on_busy = 0UL; +- cb_data.last_busy = slapi_current_utc_time(); ++ cb_data.last_busy = slapi_current_rel_time_t(); + cb_data.flowcontrol_detection = 0; +- cb_data.lock = PR_NewLock(); ++ pthread_mutex_init(&(cb_data.lock), NULL); + + /* This allows during perform_operation to check the callback data + * especially to do flow contol on delta send msgid / recv msgid +@@ -541,9 +541,9 @@ retry: + cb_data.rc = 0; + cb_data.num_entries = 0UL; + cb_data.sleep_on_busy = 0UL; +- cb_data.last_busy = slapi_current_utc_time(); ++ cb_data.last_busy = slapi_current_rel_time_t(); + cb_data.flowcontrol_detection = 0; +- cb_data.lock = PR_NewLock(); ++ pthread_mutex_init(&(cb_data.lock), NULL); + + /* This allows during perform_operation to check the callback data + * especially to do flow contol on delta send msgid / recv msgid +@@ -633,9 +633,7 @@ done: + type_nsds5ReplicaFlowControlWindow); + } + conn_set_tot_update_cb(prp->conn, NULL); +- if (cb_data.lock) { +- PR_DestroyLock(cb_data.lock); +- } ++ pthread_mutex_destroy(&(cb_data.lock)); + prp->stopped = 1; + } + +@@ -700,7 +698,9 @@ Private_Repl_Protocol * + Repl_5_Tot_Protocol_new(Repl_Protocol *rp) + { + repl5_tot_private *rip = NULL; +- Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_malloc(sizeof(Private_Repl_Protocol)); ++ pthread_condattr_t cattr; ++ Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_calloc(1, sizeof(Private_Repl_Protocol)); ++ + prp->delete = repl5_tot_delete; + prp->run = repl5_tot_run; + prp->stop = repl5_tot_stop; +@@ -710,12 +710,19 @@ Repl_5_Tot_Protocol_new(Repl_Protocol *rp) + prp->notify_window_opened = repl5_tot_noop; + prp->notify_window_closed = repl5_tot_noop; + prp->update_now = repl5_tot_noop; +- if ((prp->lock = PR_NewLock()) == NULL) { ++ if (pthread_mutex_init(&(prp->lock), NULL) != 0) { ++ goto loser; ++ } ++ if (pthread_condattr_init(&cattr) != 0) { ++ goto loser; ++ } ++ if (pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC) != 0) { + goto loser; + } +- if ((prp->cvar = PR_NewCondVar(prp->lock)) == NULL) { ++ if (pthread_cond_init(&(prp->cvar), &cattr) != 0) { + goto loser; + } ++ pthread_condattr_destroy(&cattr); + prp->stopped = 1; + prp->terminate = 0; + prp->eventbits = 0; +@@ -744,13 +751,11 @@ repl5_tot_delete(Private_Repl_Protocol **prpp) + (*prpp)->stop(*prpp); + } + /* Then, delete all resources used by the protocol */ +- if ((*prpp)->lock) { +- PR_DestroyLock((*prpp)->lock); +- (*prpp)->lock = NULL; ++ if (&((*prpp)->lock)) { ++ pthread_mutex_destroy(&((*prpp)->lock)); + } +- if ((*prpp)->cvar) { +- PR_DestroyCondVar((*prpp)->cvar); +- (*prpp)->cvar = NULL; ++ if (&((*prpp)->cvar)) { ++ pthread_cond_destroy(&(*prpp)->cvar); + } + slapi_ch_free((void **)&(*prpp)->private); + slapi_ch_free((void **)prpp); +@@ -824,9 +829,9 @@ send_entry(Slapi_Entry *e, void *cb_data) + + /* see if the result reader thread encountered + a fatal error */ +- PR_Lock(((callback_data *)cb_data)->lock); ++ pthread_mutex_lock((&((callback_data *)cb_data)->lock)); + rc = ((callback_data *)cb_data)->abort; +- PR_Unlock(((callback_data *)cb_data)->lock); ++ pthread_mutex_unlock((&((callback_data *)cb_data)->lock)); + if (rc) { + conn_disconnect(prp->conn); + ((callback_data *)cb_data)->rc = -1; +@@ -889,7 +894,7 @@ send_entry(Slapi_Entry *e, void *cb_data) + } + + if (rc == CONN_BUSY) { +- time_t now = slapi_current_utc_time(); ++ time_t now = slapi_current_rel_time_t(); + if ((now - *last_busyp) < (*sleep_on_busyp + 10)) { + *sleep_on_busyp += 5; + } else { +diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c +index af486f730..ef2025dd9 100644 +--- a/ldap/servers/plugins/replication/repl_extop.c ++++ b/ldap/servers/plugins/replication/repl_extop.c +@@ -1176,7 +1176,7 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb) + /* now that the changelog is open and started, we can alos cretae the + * keep alive entry without risk that db and cl will not match + */ +- replica_subentry_check(replica_get_root(r), replica_get_rid(r)); ++ replica_subentry_check((Slapi_DN *)replica_get_root(r), replica_get_rid(r)); + } + + /* ONREPL code that dealt with new RUV, etc was moved into the code +@@ -1474,7 +1474,7 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb) + * Launch the cleanruv monitoring thread. Once all the replicas are cleaned it will release the rid + */ + +- cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Launching cleanAllRUV thread...\n"); ++ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Launching cleanAllRUV thread..."); + data = (cleanruv_data *)slapi_ch_calloc(1, sizeof(cleanruv_data)); + if (data == NULL) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "multimaster_extop_cleanruv - CleanAllRUV Task - Failed to allocate " +diff --git a/ldap/servers/plugins/replication/windows_connection.c b/ldap/servers/plugins/replication/windows_connection.c +index 011b328bf..ce0662544 100644 +--- a/ldap/servers/plugins/replication/windows_connection.c ++++ b/ldap/servers/plugins/replication/windows_connection.c +@@ -1121,7 +1121,7 @@ windows_conn_start_linger(Repl_Connection *conn) + agmt_get_long_name(conn->agmt)); + return; + } +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + PR_Lock(conn->lock); + if (conn->linger_active) { + slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, +diff --git a/ldap/servers/plugins/replication/windows_inc_protocol.c b/ldap/servers/plugins/replication/windows_inc_protocol.c +index 1c07534e3..3d548e5ed 100644 +--- a/ldap/servers/plugins/replication/windows_inc_protocol.c ++++ b/ldap/servers/plugins/replication/windows_inc_protocol.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -48,7 +48,7 @@ typedef struct windows_inc_private + char *ruv; /* RUV on remote replica (use diff type for this? - ggood */ + Backoff_Timer *backoff; + Repl_Protocol *rp; +- PRLock *lock; ++ pthread_mutex_t *lock; + PRUint32 eventbits; + } windows_inc_private; + +@@ -96,7 +96,7 @@ typedef struct windows_inc_private + * don't see any updates for a period equal to this interval, + * we go ahead and start a replication session, just to be safe + */ +-#define MAX_WAIT_BETWEEN_SESSIONS PR_SecondsToInterval(60 * 5) /* 5 minutes */ ++#define MAX_WAIT_BETWEEN_SESSIONS 300 /* 5 minutes */ + /* + * tests if the protocol has been shutdown and we need to quit + * event_occurred resets the bits in the bit flag, so whoever tests for shutdown +@@ -108,7 +108,7 @@ typedef struct windows_inc_private + /* Forward declarations */ + static PRUint32 event_occurred(Private_Repl_Protocol *prp, PRUint32 event); + static void reset_events(Private_Repl_Protocol *prp); +-static void protocol_sleep(Private_Repl_Protocol *prp, PRIntervalTime duration); ++static void protocol_sleep(Private_Repl_Protocol *prp, int32_t duration); + static int send_updates(Private_Repl_Protocol *prp, RUV *ruv, PRUint32 *num_changes_sent, int do_send); + static void windows_inc_backoff_expired(time_t timer_fire_time, void *arg); + static int windows_examine_update_vector(Private_Repl_Protocol *prp, RUV *ruv); +@@ -143,13 +143,11 @@ windows_inc_delete(Private_Repl_Protocol **prpp) + (*prpp)->stopped = 1; + (*prpp)->stop(*prpp); + } +- if ((*prpp)->lock) { +- PR_DestroyLock((*prpp)->lock); +- (*prpp)->lock = NULL; ++ if (&((*prpp)->lock)) { ++ pthread_mutex_destroy(&((*prpp)->lock)); + } +- if ((*prpp)->cvar) { +- PR_DestroyCondVar((*prpp)->cvar); +- (*prpp)->cvar = NULL; ++ if (&((*prpp)->cvar)) { ++ pthread_cond_destroy(&(*prpp)->cvar); + } + slapi_ch_free((void **)&(*prpp)->private); + slapi_ch_free((void **)prpp); +@@ -360,7 +358,7 @@ windows_inc_run(Private_Repl_Protocol *prp) + } else if (event_occurred(prp, EVENT_TRIGGERING_CRITERIA_MET)) /* change available */ + { + /* just ignore it and go to sleep */ +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } else if ((e1 = event_occurred(prp, EVENT_WINDOW_CLOSED)) || + event_occurred(prp, EVENT_BACKOFF_EXPIRED)) { + /* this events - should not occur - log a warning and go to sleep */ +@@ -370,18 +368,18 @@ windows_inc_run(Private_Repl_Protocol *prp) + agmt_get_long_name(prp->agmt), + e1 ? event2name(EVENT_WINDOW_CLOSED) : event2name(EVENT_BACKOFF_EXPIRED), + state2name(current_state)); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } else if (event_occurred(prp, EVENT_RUN_DIRSYNC)) /* periodic_dirsync */ + { + /* just ignore it and go to sleep */ +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } else { + /* wait until window opens or an event occurs */ + slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, + "windows_inc_run - %s: " + "Waiting for update window to open\n", + agmt_get_long_name(prp->agmt)); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } + break; + +@@ -536,7 +534,7 @@ windows_inc_run(Private_Repl_Protocol *prp) + } + next_state = STATE_BACKOFF; + backoff_reset(prp_priv->backoff, windows_inc_backoff_expired, (void *)prp); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + use_busy_backoff_timer = PR_FALSE; + } + break; +@@ -605,7 +603,7 @@ windows_inc_run(Private_Repl_Protocol *prp) + agmt_get_long_name(prp->agmt), + next_fire_time - now); + +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } else { + /* Destroy the backoff timer, since we won't need it anymore */ + backoff_delete(&prp_priv->backoff); +@@ -624,7 +622,7 @@ windows_inc_run(Private_Repl_Protocol *prp) + next_state = STATE_READY_TO_ACQUIRE; + } else { + /* ignore changes and go to sleep */ +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } + } else if (event_occurred(prp, EVENT_WINDOW_OPENED)) { + /* this should never happen - log an error and go to sleep */ +@@ -632,7 +630,7 @@ windows_inc_run(Private_Repl_Protocol *prp) + "event %s should not occur in state %s; going to sleep\n", + agmt_get_long_name(prp->agmt), + event2name(EVENT_WINDOW_OPENED), state2name(current_state)); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } + break; + case STATE_SENDING_UPDATES: +@@ -856,7 +854,7 @@ windows_inc_run(Private_Repl_Protocol *prp) + reset_events(prp); + } + +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + break; + + case STATE_STOP_NORMAL_TERMINATION: +@@ -891,21 +889,29 @@ windows_inc_run(Private_Repl_Protocol *prp) + * Go to sleep until awakened. + */ + static void +-protocol_sleep(Private_Repl_Protocol *prp, PRIntervalTime duration) ++protocol_sleep(Private_Repl_Protocol *prp, int32_t duration) + { + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> protocol_sleep\n"); + PR_ASSERT(NULL != prp); +- PR_Lock(prp->lock); ++ pthread_mutex_lock(&(prp->lock)); + /* we should not go to sleep if there are events available to be processed. + Otherwise, we can miss the event that suppose to wake us up */ +- if (prp->eventbits == 0) +- PR_WaitCondVar(prp->cvar, duration); +- else { ++ if (prp->eventbits == 0) { ++ if (duration > 0) { ++ struct timespec current_time = {0}; ++ /* get the current monotonic time and add our interval */ ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += duration; ++ pthread_cond_timedwait(&(prp->cvar), &(prp->lock), ¤t_time); ++ } else { ++ pthread_cond_wait(&(prp->cvar), &(prp->lock)); ++ } ++ } else { + slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, + "protocol_sleep - %s: Can't go to sleep: event bits - %x\n", + agmt_get_long_name(prp->agmt), prp->eventbits); + } +- PR_Unlock(prp->lock); ++ pthread_mutex_unlock(&(prp->lock)); + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= protocol_sleep\n"); + } + +@@ -921,10 +927,10 @@ event_notify(Private_Repl_Protocol *prp, PRUint32 event) + { + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> event_notify\n"); + PR_ASSERT(NULL != prp); +- PR_Lock(prp->lock); ++ pthread_mutex_lock(&(prp->lock)); + prp->eventbits |= event; +- PR_NotifyCondVar(prp->cvar); +- PR_Unlock(prp->lock); ++ pthread_cond_signal(&(prp->cvar)); ++ pthread_mutex_unlock(&(prp->lock)); + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= event_notify\n"); + } + +@@ -941,10 +947,10 @@ event_occurred(Private_Repl_Protocol *prp, PRUint32 event) + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> event_occurred\n"); + + PR_ASSERT(NULL != prp); +- PR_Lock(prp->lock); ++ pthread_mutex_lock(&(prp->lock)); + return_value = (prp->eventbits & event); + prp->eventbits &= ~event; /* Clear event */ +- PR_Unlock(prp->lock); ++ pthread_mutex_unlock(&(prp->lock)); + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= event_occurred\n"); + return return_value; + } +@@ -954,9 +960,9 @@ reset_events(Private_Repl_Protocol *prp) + { + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> reset_events\n"); + PR_ASSERT(NULL != prp); +- PR_Lock(prp->lock); ++ pthread_mutex_lock(&(prp->lock)); + prp->eventbits = 0; +- PR_Unlock(prp->lock); ++ pthread_mutex_unlock(&(prp->lock)); + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= reset_events\n"); + } + +@@ -1416,6 +1422,7 @@ Windows_Inc_Protocol_new(Repl_Protocol *rp) + { + windows_inc_private *rip = NULL; + Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_calloc(1, sizeof(Private_Repl_Protocol)); ++ pthread_condattr_t cattr; + + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> Windows_Inc_Protocol_new\n"); + +@@ -1429,12 +1436,19 @@ Windows_Inc_Protocol_new(Repl_Protocol *rp) + prp->notify_window_closed = windows_inc_notify_window_closed; + prp->update_now = windows_inc_update_now; + prp->replica = prot_get_replica(rp); +- if ((prp->lock = PR_NewLock()) == NULL) { ++ if (pthread_mutex_init(&(prp->lock), NULL) != 0) { ++ goto loser; ++ } ++ if (pthread_condattr_init(&cattr) != 0) { ++ goto loser; ++ } ++ if (pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC) != 0) { + goto loser; + } +- if ((prp->cvar = PR_NewCondVar(prp->lock)) == NULL) { ++ if (pthread_cond_init(&(prp->cvar), &cattr) != 0) { + goto loser; + } ++ pthread_condattr_destroy(&cattr); /* no longer needed */ + prp->stopped = 0; + prp->terminate = 0; + prp->eventbits = 0; +diff --git a/ldap/servers/plugins/replication/windows_tot_protocol.c b/ldap/servers/plugins/replication/windows_tot_protocol.c +index da244c166..f67e4dbd2 100644 +--- a/ldap/servers/plugins/replication/windows_tot_protocol.c ++++ b/ldap/servers/plugins/replication/windows_tot_protocol.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -326,6 +326,7 @@ Windows_Tot_Protocol_new(Repl_Protocol *rp) + { + windows_tot_private *rip = NULL; + Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_calloc(1, sizeof(Private_Repl_Protocol)); ++ pthread_condattr_t cattr; + + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> Windows_Tot_Protocol_new\n"); + +@@ -339,12 +340,19 @@ Windows_Tot_Protocol_new(Repl_Protocol *rp) + prp->notify_window_closed = windows_tot_noop; + prp->replica = prot_get_replica(rp); + prp->update_now = windows_tot_noop; +- if ((prp->lock = PR_NewLock()) == NULL) { ++ if (pthread_mutex_init(&(prp->lock), NULL) != 0) { + goto loser; + } +- if ((prp->cvar = PR_NewCondVar(prp->lock)) == NULL) { ++ if (pthread_condattr_init(&cattr) != 0) { + goto loser; + } ++ if (pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC) != 0) { ++ goto loser; ++ } ++ if (pthread_cond_init(&(prp->cvar), &cattr) != 0) { ++ goto loser; ++ } ++ pthread_condattr_destroy(&cattr); + prp->stopped = 1; + prp->terminate = 0; + prp->eventbits = 0; +@@ -373,13 +381,11 @@ windows_tot_delete(Private_Repl_Protocol **prpp) + (*prpp)->stop(*prpp); + } + /* Then, delete all resources used by the protocol */ +- if ((*prpp)->lock) { +- PR_DestroyLock((*prpp)->lock); +- (*prpp)->lock = NULL; ++ if (&((*prpp)->lock)) { ++ pthread_mutex_destroy(&((*prpp)->lock)); + } +- if ((*prpp)->cvar) { +- PR_DestroyCondVar((*prpp)->cvar); +- (*prpp)->cvar = NULL; ++ if (&((*prpp)->cvar)) { ++ pthread_cond_destroy(&(*prpp)->cvar); + } + slapi_ch_free((void **)&(*prpp)->private); + slapi_ch_free((void **)prpp); +diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c +index d031dc3f8..a3e16c4e1 100644 +--- a/ldap/servers/plugins/retrocl/retrocl_trim.c ++++ b/ldap/servers/plugins/retrocl/retrocl_trim.c +@@ -241,7 +241,7 @@ trim_changelog(void) + int me, lt; + + +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + + PR_Lock(ts.ts_s_trim_mutex); + me = ts.ts_c_max_age; +diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c +index de99ba233..3d076a4cb 100644 +--- a/ldap/servers/plugins/roles/roles_cache.c ++++ b/ldap/servers/plugins/roles/roles_cache.c +@@ -343,7 +343,7 @@ roles_cache_create_suffix(Slapi_DN *sdn) + + slapi_lock_mutex(new_suffix->create_lock); + if (new_suffix->is_ready != 1) { +- slapi_wait_condvar(new_suffix->suffix_created, NULL); ++ slapi_wait_condvar_pt(new_suffix->suffix_created, new_suffix->create_lock, NULL); + } + slapi_unlock_mutex(new_suffix->create_lock); + +@@ -384,7 +384,7 @@ roles_cache_wait_on_change(void *arg) + test roles_def->keeprunning before + going to sleep. + */ +- slapi_wait_condvar(roles_def->something_changed, NULL); ++ slapi_wait_condvar_pt(roles_def->something_changed, roles_def->change_lock, NULL); + + slapi_log_err(SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM, "roles_cache_wait_on_change - notified\n"); + +diff --git a/ldap/servers/plugins/sync/sync.h b/ldap/servers/plugins/sync/sync.h +index 51d0da6e0..7241fddbf 100644 +--- a/ldap/servers/plugins/sync/sync.h ++++ b/ldap/servers/plugins/sync/sync.h +@@ -201,8 +201,8 @@ typedef struct sync_request_list + { + Slapi_RWLock *sync_req_rwlock; /* R/W lock struct to serialize access */ + SyncRequest *sync_req_head; /* Head of list */ +- PRLock *sync_req_cvarlock; /* Lock for cvar */ +- PRCondVar *sync_req_cvar; /* ps threads sleep on this */ ++ pthread_mutex_t sync_req_cvarlock; /* Lock for cvar */ ++ pthread_cond_t sync_req_cvar; /* ps threads sleep on this */ + int sync_req_max_persist; + int sync_req_cur_persist; + } SyncRequestList; +diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c +index 598c6868d..d13f142b0 100644 +--- a/ldap/servers/plugins/sync/sync_persist.c ++++ b/ldap/servers/plugins/sync/sync_persist.c +@@ -463,19 +463,40 @@ int + sync_persist_initialize(int argc, char **argv) + { + if (!SYNC_IS_INITIALIZED()) { ++ pthread_condattr_t sync_req_condAttr; /* cond var attribute */ ++ int rc = 0; ++ + sync_request_list = (SyncRequestList *)slapi_ch_calloc(1, sizeof(SyncRequestList)); + if ((sync_request_list->sync_req_rwlock = slapi_new_rwlock()) == NULL) { + slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_initialize - Cannot initialize lock structure(1).\n"); + return (-1); + } +- if ((sync_request_list->sync_req_cvarlock = PR_NewLock()) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_initialize - Cannot initialize lock structure(2).\n"); ++ if (pthread_mutex_init(&(sync_request_list->sync_req_cvarlock), NULL) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "sync_persist_initialize", ++ "Failed to create lock: error %d (%s)\n", ++ rc, strerror(rc)); ++ return (-1); ++ } ++ if ((rc = pthread_condattr_init(&sync_req_condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "sync_persist_initialize", ++ "Failed to create new condition attribute variable. error %d (%s)\n", ++ rc, strerror(rc)); + return (-1); + } +- if ((sync_request_list->sync_req_cvar = PR_NewCondVar(sync_request_list->sync_req_cvarlock)) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_initialize - Cannot initialize condition variable.\n"); ++ if ((rc = pthread_condattr_setclock(&sync_req_condAttr, CLOCK_MONOTONIC)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "sync_persist_initialize", ++ "Cannot set condition attr clock. error %d (%s)\n", ++ rc, strerror(rc)); + return (-1); + } ++ if ((rc = pthread_cond_init(&(sync_request_list->sync_req_cvar), &sync_req_condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "sync_persist_initialize", ++ "Failed to create new condition variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ return (-1); ++ } ++ pthread_condattr_destroy(&sync_req_condAttr); /* no longer needed */ ++ + sync_request_list->sync_req_head = NULL; + sync_request_list->sync_req_cur_persist = 0; + sync_request_list->sync_req_max_persist = SYNC_MAX_CONCURRENT; +@@ -617,8 +638,8 @@ sync_persist_terminate_all() + } + + slapi_destroy_rwlock(sync_request_list->sync_req_rwlock); +- PR_DestroyLock(sync_request_list->sync_req_cvarlock); +- PR_DestroyCondVar(sync_request_list->sync_req_cvar); ++ pthread_mutex_destroy(&(sync_request_list->sync_req_cvarlock)); ++ pthread_cond_destroy(&(sync_request_list->sync_req_cvar)); + + /* it frees the structures, just in case it remained connected sync_repl client */ + for (req = sync_request_list->sync_req_head; NULL != req; req = next) { +@@ -725,9 +746,9 @@ static void + sync_request_wakeup_all(void) + { + if (SYNC_IS_INITIALIZED()) { +- PR_Lock(sync_request_list->sync_req_cvarlock); +- PR_NotifyAllCondVar(sync_request_list->sync_req_cvar); +- PR_Unlock(sync_request_list->sync_req_cvarlock); ++ pthread_mutex_lock(&(sync_request_list->sync_req_cvarlock)); ++ pthread_cond_broadcast(&(sync_request_list->sync_req_cvar)); ++ pthread_mutex_unlock(&(sync_request_list->sync_req_cvarlock)); + } + } + +@@ -817,7 +838,7 @@ sync_send_results(void *arg) + goto done; + } + +- PR_Lock(sync_request_list->sync_req_cvarlock); ++ pthread_mutex_lock(&(sync_request_list->sync_req_cvarlock)); + + while ((conn_acq_flag == 0) && !req->req_complete && !plugin_closing) { + /* Check for an abandoned operation */ +@@ -833,7 +854,12 @@ sync_send_results(void *arg) + * connection code. Wake up every second to check if thread + * should terminate. + */ +- PR_WaitCondVar(sync_request_list->sync_req_cvar, PR_SecondsToInterval(1)); ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += 1; ++ pthread_cond_timedwait(&(sync_request_list->sync_req_cvar), ++ &(sync_request_list->sync_req_cvarlock), ++ ¤t_time); + } else { + /* dequeue the item */ + int attrsonly; +@@ -864,7 +890,7 @@ sync_send_results(void *arg) + * Send the result. Since send_ldap_search_entry can block for + * up to 30 minutes, we relinquish all locks before calling it. + */ +- PR_Unlock(sync_request_list->sync_req_cvarlock); ++ pthread_mutex_unlock(&(sync_request_list->sync_req_cvarlock)); + + /* + * The entry is in the right scope and matches the filter +@@ -910,13 +936,13 @@ sync_send_results(void *arg) + ldap_controls_free(ectrls); + slapi_ch_array_free(noattrs); + } +- PR_Lock(sync_request_list->sync_req_cvarlock); ++ pthread_mutex_lock(&(sync_request_list->sync_req_cvarlock)); + + /* Deallocate our wrapper for this entry */ + sync_node_free(&qnode); + } + } +- PR_Unlock(sync_request_list->sync_req_cvarlock); ++ pthread_mutex_unlock(&(sync_request_list->sync_req_cvarlock)); + + /* indicate the end of search */ + sync_release_connection(req->req_pblock, conn, op, conn_acq_flag == 0); +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c +index 1e4830e99..ba783ee59 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c +@@ -1,5 +1,5 @@ + /** BEGIN COPYRIGHT BLOCK +- * Copyright (C) 2019 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -1429,21 +1429,22 @@ import_free_job(ImportJob *job) + * To avoid freeing fifo queue under bulk_import_queue use + * job lock to synchronize + */ +- if (job->wire_lock) +- PR_Lock(job->wire_lock); ++ if (&job->wire_lock) { ++ pthread_mutex_lock(&job->wire_lock); ++ } + + import_fifo_destroy(job); + +- if (job->wire_lock) +- PR_Unlock(job->wire_lock); ++ if (&job->wire_lock) { ++ pthread_mutex_unlock(&job->wire_lock); ++ } + } + +- if (NULL != job->uuid_namespace) ++ if (NULL != job->uuid_namespace) { + slapi_ch_free((void **)&job->uuid_namespace); +- if (job->wire_lock) +- PR_DestroyLock(job->wire_lock); +- if (job->wire_cv) +- PR_DestroyCondVar(job->wire_cv); ++ } ++ pthread_mutex_destroy(&job->wire_lock); ++ pthread_cond_destroy(&job->wire_cv); + slapi_ch_free((void **)&job->task_status); + } + +@@ -1777,7 +1778,7 @@ import_monitor_threads(ImportJob *job, int *status) + goto error_abort; + } + +- last_time = slapi_current_utc_time(); ++ last_time = slapi_current_rel_time_t(); + job->start_time = last_time; + import_clear_progress_history(job); + +@@ -1789,7 +1790,7 @@ import_monitor_threads(ImportJob *job, int *status) + + /* First calculate the time interval since last reported */ + if (0 == (count % display_interval)) { +- time_now = slapi_current_utc_time(); ++ time_now = slapi_current_rel_time_t(); + time_interval = time_now - last_time; + last_time = time_now; + /* Now calculate our rate of progress overall for this chunk */ +@@ -2232,7 +2233,7 @@ bdb_import_main(void *arg) + opstr = "Reindexing"; + } + PR_ASSERT(inst != NULL); +- beginning = slapi_current_utc_time(); ++ beginning = slapi_current_rel_time_t(); + + /* Decide which indexes are needed */ + if (job->flags & FLAG_INDEX_ATTRS) { +@@ -2251,9 +2252,9 @@ bdb_import_main(void *arg) + ret = import_fifo_init(job); + if (ret) { + if (!(job->flags & FLAG_USE_FILES)) { +- PR_Lock(job->wire_lock); +- PR_NotifyCondVar(job->wire_cv); +- PR_Unlock(job->wire_lock); ++ pthread_mutex_lock(&job->wire_lock); ++ pthread_cond_signal(&job->wire_cv); ++ pthread_mutex_unlock(&job->wire_lock); + } + goto error; + } +@@ -2315,9 +2316,9 @@ bdb_import_main(void *arg) + } else { + /* release the startup lock and let the entries start queueing up + * in for import */ +- PR_Lock(job->wire_lock); +- PR_NotifyCondVar(job->wire_cv); +- PR_Unlock(job->wire_lock); ++ pthread_mutex_lock(&job->wire_lock); ++ pthread_cond_signal(&job->wire_cv); ++ pthread_mutex_unlock(&job->wire_lock); + } + + /* Run as many passes as we need to complete the job or die honourably in +@@ -2499,7 +2500,7 @@ error: + import_log_notice(job, SLAPI_LOG_WARNING, "bdb_import_main", "Failed to close database"); + } + } +- end = slapi_current_utc_time(); ++ end = slapi_current_rel_time_t(); + if (verbose && (0 == ret)) { + int seconds_to_import = end - beginning; + size_t entries_processed = job->lead_ID - (job->starting_ID - 1); +@@ -3393,7 +3394,7 @@ import_mega_merge(ImportJob *job) + passes, (long unsigned int)job->number_indexers); + } + +- beginning = slapi_current_utc_time(); ++ beginning = slapi_current_rel_time_t(); + /* Iterate over the files */ + for (current_worker = job->worker_list; + (ret == 0) && (current_worker != NULL); +@@ -3405,9 +3406,9 @@ import_mega_merge(ImportJob *job) + time_t file_end = 0; + int key_count = 0; + +- file_beginning = slapi_current_utc_time(); ++ file_beginning = slapi_current_rel_time_t(); + ret = import_merge_one_file(current_worker, passes, &key_count); +- file_end = slapi_current_utc_time(); ++ file_end = slapi_current_rel_time_t(); + if (key_count == 0) { + import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge", "No files to merge for \"%s\".", + current_worker->index_info->name); +@@ -3426,7 +3427,7 @@ import_mega_merge(ImportJob *job) + } + } + +- end = slapi_current_utc_time(); ++ end = slapi_current_rel_time_t(); + if (0 == ret) { + int seconds_to_merge = end - beginning; + import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge", "Merging completed in %d seconds.", +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c +index 5c7d9c8f7..905a84e74 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c +@@ -1,5 +1,5 @@ + /** BEGIN COPYRIGHT BLOCK +- * Copyright (C) 2019 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -3151,8 +3151,9 @@ bulk_import_start(Slapi_PBlock *pb) + (1024 * 1024); + } + import_subcount_stuff_init(job->mothers); +- job->wire_lock = PR_NewLock(); +- job->wire_cv = PR_NewCondVar(job->wire_lock); ++ ++ pthread_mutex_init(&job->wire_lock, NULL); ++ pthread_cond_init(&job->wire_cv, NULL); + + /* COPIED from ldif2ldbm.c : */ + +@@ -3175,7 +3176,7 @@ bulk_import_start(Slapi_PBlock *pb) + + /* END OF COPIED SECTION */ + +- PR_Lock(job->wire_lock); ++ pthread_mutex_lock(&job->wire_lock); + vlv_init(job->inst); + + /* create thread for import_main, so we can return */ +@@ -3188,7 +3189,7 @@ bulk_import_start(Slapi_PBlock *pb) + slapi_log_err(SLAPI_LOG_ERR, "bulk_import_start", + "Unable to spawn import thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", + prerr, slapd_pr_strerror(prerr)); +- PR_Unlock(job->wire_lock); ++ pthread_mutex_unlock(&job->wire_lock); + ret = -2; + goto fail; + } +@@ -3204,8 +3205,8 @@ bulk_import_start(Slapi_PBlock *pb) + /* (don't want to send the success code back to the LDAP client until + * we're ready for the adds to start rolling in) + */ +- PR_WaitCondVar(job->wire_cv, PR_INTERVAL_NO_TIMEOUT); +- PR_Unlock(job->wire_lock); ++ pthread_cond_wait(&job->wire_cv, &job->wire_lock); ++ pthread_mutex_unlock(&job->wire_lock); + + return 0; + +@@ -3243,13 +3244,13 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) + return -1; + } + +- PR_Lock(job->wire_lock); ++ pthread_mutex_lock(&job->wire_lock); + /* Let's do this inside the lock !*/ + id = job->lead_ID + 1; + /* generate uniqueid if necessary */ + if (import_generate_uniqueid(job, entry) != UID_SUCCESS) { + import_abort_all(job, 1); +- PR_Unlock(job->wire_lock); ++ pthread_mutex_unlock(&job->wire_lock); + return -1; + } + +@@ -3258,7 +3259,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) + if ((ep == NULL) || (ep->ep_entry == NULL)) { + import_abort_all(job, 1); + backentry_free(&ep); /* release the backend wrapper, here */ +- PR_Unlock(job->wire_lock); ++ pthread_mutex_unlock(&job->wire_lock); + return -1; + } + +@@ -3304,7 +3305,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) + if (job->flags & FLAG_ABORT) { + backentry_clear_entry(ep); /* entry is released in the frontend on failure*/ + backentry_free(&ep); /* release the backend wrapper, here */ +- PR_Unlock(job->wire_lock); ++ pthread_mutex_unlock(&job->wire_lock); + return -2; + } + +@@ -3342,7 +3343,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) + /* entry is released in the frontend on failure*/ + backentry_clear_entry(ep); + backentry_free(&ep); /* release the backend wrapper */ +- PR_Unlock(job->wire_lock); ++ pthread_mutex_unlock(&job->wire_lock); + return -1; + } + sepp = PL_strchr(sepp + 1, ','); +@@ -3368,7 +3369,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) + (long unsigned int)newesize, (long unsigned int)job->fifo.bsize); + backentry_clear_entry(ep); /* entry is released in the frontend on failure*/ + backentry_free(&ep); /* release the backend wrapper, here */ +- PR_Unlock(job->wire_lock); ++ pthread_mutex_unlock(&job->wire_lock); + return -1; + } + /* Now check if fifo has enough space for the new entry */ +@@ -3394,7 +3395,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) + job->trailing_ID = id - job->fifo.size; + } + +- PR_Unlock(job->wire_lock); ++ pthread_mutex_unlock(&job->wire_lock); + return 0; + } + +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c +index 0ac3694b6..5d6010f46 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c +@@ -1,5 +1,5 @@ + /** BEGIN COPYRIGHT BLOCK +- * Copyright (C) 2019 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -270,10 +270,8 @@ bdb_instance_cleanup(struct ldbm_instance *inst) + slapi_ch_free_string(&inst_dirp); + } + slapi_destroy_rwlock(inst_env->bdb_env_lock); +- PR_DestroyCondVar(inst_env->bdb_thread_count_cv); +- inst_env->bdb_thread_count_cv = NULL; +- PR_DestroyLock(inst_env->bdb_thread_count_lock); +- inst_env->bdb_thread_count_lock = NULL; ++ pthread_mutex_destroy(&(inst_env->bdb_thread_count_lock)); ++ pthread_cond_destroy(&(inst_env->bdb_thread_count_cv)); + slapi_ch_free((void **)&inst->inst_db); + /* + slapi_destroy_rwlock(((bdb_db_env *)inst->inst_db)->bdb_env_lock); +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +index 464f89f4d..6cccad8e6 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +@@ -1,5 +1,5 @@ + /** BEGIN COPYRIGHT BLOCK +- * Copyright (C) 2019 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -52,16 +52,16 @@ + return. + */ + #define INCR_THREAD_COUNT(pEnv) \ +- PR_Lock(pEnv->bdb_thread_count_lock); \ ++ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); \ + ++pEnv->bdb_thread_count; \ +- PR_Unlock(pEnv->bdb_thread_count_lock) ++ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock) + + #define DECR_THREAD_COUNT(pEnv) \ +- PR_Lock(pEnv->bdb_thread_count_lock); \ ++ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); \ + if (--pEnv->bdb_thread_count == 0) { \ +- PR_NotifyCondVar(pEnv->bdb_thread_count_cv); \ ++ pthread_cond_broadcast(&pEnv->bdb_thread_count_cv); \ + } \ +- PR_Unlock(pEnv->bdb_thread_count_lock) ++ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock) + + #define NEWDIR_MODE 0755 + #define DB_REGION_PREFIX "__db." +@@ -91,9 +91,12 @@ static int trans_batch_txn_max_sleep = 50; + static PRBool log_flush_thread = PR_FALSE; + static int txn_in_progress_count = 0; + static int *txn_log_flush_pending = NULL; +-static PRLock *sync_txn_log_flush = NULL; +-static PRCondVar *sync_txn_log_flush_done = NULL; +-static PRCondVar *sync_txn_log_do_flush = NULL; ++ ++static pthread_mutex_t sync_txn_log_flush; ++static pthread_cond_t sync_txn_log_flush_done; ++static pthread_cond_t sync_txn_log_do_flush; ++ ++ + static int bdb_db_remove_ex(bdb_db_env *env, char const path[], char const dbName[], PRBool use_lock); + static int bdb_restore_file_check(struct ldbminfo *li); + +@@ -181,12 +184,12 @@ bdb_set_batch_transactions(void *arg __attribute__((unused)), void *value, char + } else { + if (val == 0) { + if (log_flush_thread) { +- PR_Lock(sync_txn_log_flush); ++ pthread_mutex_lock(&sync_txn_log_flush); + } + trans_batch_limit = FLUSH_REMOTEOFF; + if (log_flush_thread) { + log_flush_thread = PR_FALSE; +- PR_Unlock(sync_txn_log_flush); ++ pthread_mutex_unlock(&sync_txn_log_flush); + } + } else if (val > 0) { + if (trans_batch_limit == FLUSH_REMOTEOFF) { +@@ -217,12 +220,12 @@ bdb_set_batch_txn_min_sleep(void *arg __attribute__((unused)), void *value, char + } else { + if (val == 0) { + if (log_flush_thread) { +- PR_Lock(sync_txn_log_flush); ++ pthread_mutex_lock(&sync_txn_log_flush); + } + trans_batch_txn_min_sleep = FLUSH_REMOTEOFF; + if (log_flush_thread) { + log_flush_thread = PR_FALSE; +- PR_Unlock(sync_txn_log_flush); ++ pthread_mutex_unlock(&sync_txn_log_flush); + } + } else if (val > 0) { + if (trans_batch_txn_min_sleep == FLUSH_REMOTEOFF || !log_flush_thread) { +@@ -249,12 +252,12 @@ bdb_set_batch_txn_max_sleep(void *arg __attribute__((unused)), void *value, char + } else { + if (val == 0) { + if (log_flush_thread) { +- PR_Lock(sync_txn_log_flush); ++ pthread_mutex_lock(&sync_txn_log_flush); + } + trans_batch_txn_max_sleep = FLUSH_REMOTEOFF; + if (log_flush_thread) { + log_flush_thread = PR_FALSE; +- PR_Unlock(sync_txn_log_flush); ++ pthread_mutex_unlock(&sync_txn_log_flush); + } + } else if (val > 0) { + if (trans_batch_txn_max_sleep == FLUSH_REMOTEOFF || !log_flush_thread) { +@@ -725,10 +728,9 @@ bdb_free_env(void **arg) + slapi_destroy_rwlock((*env)->bdb_env_lock); + (*env)->bdb_env_lock = NULL; + } +- PR_DestroyCondVar((*env)->bdb_thread_count_cv); +- (*env)->bdb_thread_count_cv = NULL; +- PR_DestroyLock((*env)->bdb_thread_count_lock); +- (*env)->bdb_thread_count_lock = NULL; ++ pthread_mutex_destroy(&((*env)->bdb_thread_count_lock)); ++ pthread_cond_destroy(&((*env)->bdb_thread_count_cv)); ++ + slapi_ch_free((void **)env); + return; + } +@@ -746,11 +748,15 @@ bdb_make_env(bdb_db_env **env, struct ldbminfo *li) + int ret; + Object *inst_obj; + ldbm_instance *inst = NULL; ++ pthread_condattr_t condAttr; + + pEnv = (bdb_db_env *)slapi_ch_calloc(1, sizeof(bdb_db_env)); + +- pEnv->bdb_thread_count_lock = PR_NewLock(); +- pEnv->bdb_thread_count_cv = PR_NewCondVar(pEnv->bdb_thread_count_lock); ++ pthread_mutex_init(&pEnv->bdb_thread_count_lock, NULL); ++ pthread_condattr_init(&condAttr); ++ pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC); ++ pthread_cond_init(&pEnv->bdb_thread_count_cv, &condAttr); ++ pthread_condattr_destroy(&condAttr); /* no longer needed */ + + if ((ret = db_env_create(&pEnv->bdb_DB_ENV, 0)) != 0) { + slapi_log_err(SLAPI_LOG_ERR, +@@ -2013,9 +2019,9 @@ bdb_pre_close(struct ldbminfo *li) + return; + + /* first, see if there are any housekeeping threads running */ +- PR_Lock(pEnv->bdb_thread_count_lock); ++ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); + threadcount = pEnv->bdb_thread_count; +- PR_Unlock(pEnv->bdb_thread_count_lock); ++ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock); + + if (threadcount) { + PRIntervalTime cvwaittime = PR_MillisecondsToInterval(DBLAYER_SLEEP_INTERVAL * 100); +@@ -2023,7 +2029,7 @@ bdb_pre_close(struct ldbminfo *li) + /* Print handy-dandy log message */ + slapi_log_err(SLAPI_LOG_INFO, "bdb_pre_close", "Waiting for %d database threads to stop\n", + threadcount); +- PR_Lock(pEnv->bdb_thread_count_lock); ++ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); + /* Tell them to stop - we wait until the last possible moment to invoke + this. If we do this much sooner than this, we could find ourselves + in a situation where the threads see the stop_threads and exit before +@@ -2034,6 +2040,7 @@ bdb_pre_close(struct ldbminfo *li) + conf->bdb_stop_threads = 1; + /* Wait for them to exit */ + while (pEnv->bdb_thread_count > 0) { ++ struct timespec current_time = {0}; + PRIntervalTime before = PR_IntervalNow(); + /* There are 3 ways to wake up from this WaitCondVar: + 1) The last database thread exits and calls NotifyCondVar - thread_count +@@ -2041,7 +2048,9 @@ bdb_pre_close(struct ldbminfo *li) + 2) Timeout - in this case, thread_count will be > 0 - bad + 3) A bad error occurs - bad - will be reported as a timeout + */ +- PR_WaitCondVar(pEnv->bdb_thread_count_cv, cvwaittime); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += DBLAYER_SLEEP_INTERVAL / 10; /* cvwaittime but in seconds */ ++ pthread_cond_timedwait(&pEnv->bdb_thread_count_cv, &pEnv->bdb_thread_count_lock, ¤t_time); + if (pEnv->bdb_thread_count > 0) { + /* still at least 1 thread running - see if this is a timeout */ + if ((PR_IntervalNow() - before) >= cvwaittime) { +@@ -2052,7 +2061,7 @@ bdb_pre_close(struct ldbminfo *li) + /* else just a spurious interrupt */ + } + } +- PR_Unlock(pEnv->bdb_thread_count_lock); ++ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock); + if (timedout) { + slapi_log_err(SLAPI_LOG_ERR, + "bdb_pre_close", "Timeout after [%d] milliseconds; leave %d database thread(s)...\n", +@@ -2645,12 +2654,12 @@ bdb_txn_begin(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool + and new parent for any nested transactions created */ + if (use_lock && log_flush_thread) { + int txn_id = new_txn.back_txn_txn->id(new_txn.back_txn_txn); +- PR_Lock(sync_txn_log_flush); ++ pthread_mutex_lock(&sync_txn_log_flush); + txn_in_progress_count++; + slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_begin_ext", + "Batchcount: %d, txn_in_progress: %d, curr_txn: %x\n", + trans_batch_count, txn_in_progress_count, txn_id); +- PR_Unlock(sync_txn_log_flush); ++ pthread_mutex_unlock(&sync_txn_log_flush); + } + dblayer_push_pvt_txn(&new_txn); + if (txn) { +@@ -2717,11 +2726,11 @@ bdb_txn_commit(struct ldbminfo *li, back_txn *txn, PRBool use_lock) + if ((conf->bdb_durable_transactions) && use_lock) { + if (trans_batch_limit > 0 && log_flush_thread) { + /* let log_flush thread do the flushing */ +- PR_Lock(sync_txn_log_flush); ++ pthread_mutex_lock(&sync_txn_log_flush); + txn_batch_slot = trans_batch_count++; + txn_log_flush_pending[txn_batch_slot] = txn_id; +- slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", "(before notify): batchcount: %d, " +- "txn_in_progress: %d, curr_txn: %x\n", ++ slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", ++ "(before notify): batchcount: %d, txn_in_progress: %d, curr_txn: %x\n", + trans_batch_count, + txn_in_progress_count, txn_id); + /* +@@ -2731,8 +2740,9 @@ bdb_txn_commit(struct ldbminfo *li, back_txn *txn, PRBool use_lock) + * - there is no other outstanding txn + */ + if (trans_batch_count > trans_batch_limit || +- trans_batch_count == txn_in_progress_count) { +- PR_NotifyCondVar(sync_txn_log_do_flush); ++ trans_batch_count == txn_in_progress_count) ++ { ++ pthread_cond_signal(&sync_txn_log_do_flush); + } + /* + * We need to wait until the txn has been flushed before continuing +@@ -2740,14 +2750,14 @@ bdb_txn_commit(struct ldbminfo *li, back_txn *txn, PRBool use_lock) + * PR_WaitCondvar releases and reaquires the lock + */ + while (txn_log_flush_pending[txn_batch_slot] == txn_id) { +- PR_WaitCondVar(sync_txn_log_flush_done, PR_INTERVAL_NO_TIMEOUT); ++ pthread_cond_wait(&sync_txn_log_flush_done, &sync_txn_log_flush); + } + txn_in_progress_count--; +- slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", "(before unlock): batchcount: %d, " +- "txn_in_progress: %d, curr_txn %x\n", ++ slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", ++ "(before unlock): batchcount: %d, txn_in_progress: %d, curr_txn %x\n", + trans_batch_count, + txn_in_progress_count, txn_id); +- PR_Unlock(sync_txn_log_flush); ++ pthread_mutex_unlock(&sync_txn_log_flush); + } else if (trans_batch_limit == FLUSH_REMOTEOFF) { /* user remotely turned batching off */ + LOG_FLUSH(pEnv->bdb_DB_ENV, 0); + } +@@ -2799,9 +2809,9 @@ bdb_txn_abort(struct ldbminfo *li, back_txn *txn, PRBool use_lock) + int txn_id = db_txn->id(db_txn); + bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env; + if (use_lock && log_flush_thread) { +- PR_Lock(sync_txn_log_flush); ++ pthread_mutex_lock(&sync_txn_log_flush); + txn_in_progress_count--; +- PR_Unlock(sync_txn_log_flush); ++ pthread_mutex_unlock(&sync_txn_log_flush); + slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_abort_ext", + "Batchcount: %d, txn_in_progress: %d, curr_txn: %x\n", + trans_batch_count, txn_in_progress_count, txn_id); +@@ -3420,11 +3430,18 @@ bdb_start_log_flush_thread(struct ldbminfo *li) + int max_threads = config_get_threadnumber(); + + if ((BDB_CONFIG(li)->bdb_durable_transactions) && +- (BDB_CONFIG(li)->bdb_enable_transactions) && (trans_batch_limit > 0)) { ++ (BDB_CONFIG(li)->bdb_enable_transactions) && (trans_batch_limit > 0)) ++ { + /* initialize the synchronization objects for the log_flush and worker threads */ +- sync_txn_log_flush = PR_NewLock(); +- sync_txn_log_flush_done = PR_NewCondVar(sync_txn_log_flush); +- sync_txn_log_do_flush = PR_NewCondVar(sync_txn_log_flush); ++ pthread_condattr_t condAttr; ++ ++ pthread_mutex_init(&sync_txn_log_flush, NULL); ++ pthread_condattr_init(&condAttr); ++ pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC); ++ pthread_cond_init(&sync_txn_log_do_flush, &condAttr); ++ pthread_cond_init(&sync_txn_log_flush_done, NULL); ++ pthread_condattr_destroy(&condAttr); /* no longer needed */ ++ + txn_log_flush_pending = (int *)slapi_ch_malloc(max_threads * sizeof(int)); + log_flush_thread = PR_TRUE; + if (NULL == PR_CreateThread(PR_USER_THREAD, +@@ -3451,7 +3468,7 @@ bdb_start_log_flush_thread(struct ldbminfo *li) + static int + log_flush_threadmain(void *param) + { +- PRIntervalTime interval_wait, interval_flush, interval_def; ++ PRIntervalTime interval_flush, interval_def; + PRIntervalTime last_flush = 0; + int i; + int do_flush = 0; +@@ -3464,7 +3481,6 @@ log_flush_threadmain(void *param) + INCR_THREAD_COUNT(pEnv); + + interval_flush = PR_MillisecondsToInterval(trans_batch_txn_min_sleep); +- interval_wait = PR_MillisecondsToInterval(trans_batch_txn_max_sleep); + interval_def = PR_MillisecondsToInterval(300); /*used while no txn or txn batching */ + /* LK this is only needed if online change of + * of txn config is supported ??? +@@ -3473,10 +3489,10 @@ log_flush_threadmain(void *param) + if (BDB_CONFIG(li)->bdb_enable_transactions) { + if (trans_batch_limit > 0) { + /* synchronize flushing thread with workers */ +- PR_Lock(sync_txn_log_flush); ++ pthread_mutex_lock(&sync_txn_log_flush); + if (!log_flush_thread) { + /* batch transactions was disabled while waiting for the lock */ +- PR_Unlock(sync_txn_log_flush); ++ pthread_mutex_unlock(&sync_txn_log_flush); + break; + } + slapi_log_err(SLAPI_LOG_BACKLDBM, "log_flush_threadmain", "(in loop): batchcount: %d, " +@@ -3502,20 +3518,31 @@ log_flush_threadmain(void *param) + slapi_log_err(SLAPI_LOG_BACKLDBM, "log_flush_threadmain", "(before notify): batchcount: %d, " + "txn_in_progress: %d\n", + trans_batch_count, txn_in_progress_count); +- PR_NotifyAllCondVar(sync_txn_log_flush_done); ++ pthread_cond_broadcast(&sync_txn_log_flush_done); + } + /* wait until flushing conditions are met */ + while ((trans_batch_count == 0) || +- (trans_batch_count < trans_batch_limit && trans_batch_count < txn_in_progress_count)) { ++ (trans_batch_count < trans_batch_limit && trans_batch_count < txn_in_progress_count)) ++ { ++ struct timespec current_time = {0}; ++ /* convert milliseconds to nano seconds */ ++ int32_t nano_sec_sleep = trans_batch_txn_max_sleep * 1000000; + if (BDB_CONFIG(li)->bdb_stop_threads) + break; + if (PR_IntervalNow() - last_flush > interval_flush) { + do_flush = 1; + break; + } +- PR_WaitCondVar(sync_txn_log_do_flush, interval_wait); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ if (current_time.tv_nsec + nano_sec_sleep > 1000000000) { ++ /* nano sec will overflow, just bump the seconds */ ++ current_time.tv_sec++; ++ } else { ++ current_time.tv_nsec += nano_sec_sleep; ++ } ++ pthread_cond_timedwait(&sync_txn_log_do_flush, &sync_txn_log_flush, ¤t_time); + } +- PR_Unlock(sync_txn_log_flush); ++ pthread_mutex_unlock(&sync_txn_log_flush); + slapi_log_err(SLAPI_LOG_BACKLDBM, "log_flush_threadmain", "(wakeup): batchcount: %d, " + "txn_in_progress: %d\n", + trans_batch_count, txn_in_progress_count); +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h +index bf00d2e9a..6bb04d21a 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h +@@ -1,5 +1,5 @@ + /** BEGIN COPYRIGHT BLOCK +- * Copyright (C) 2019 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -18,10 +18,10 @@ typedef struct bdb_db_env + Slapi_RWLock *bdb_env_lock; + int bdb_openflags; + int bdb_priv_flags; +- PRLock *bdb_thread_count_lock; /* lock for thread_count_cv */ +- PRCondVar *bdb_thread_count_cv; /* condition variable for housekeeping thread shutdown */ +- PRInt32 bdb_thread_count; /* Tells us how many threads are running, +- * used to figure out when they're all stopped */ ++ pthread_mutex_t bdb_thread_count_lock; /* lock for thread_count_cv */ ++ pthread_cond_t bdb_thread_count_cv; /* condition variable for housekeeping thread shutdown */ ++ PRInt32 bdb_thread_count; /* Tells us how many threads are running, ++ * used to figure out when they're all stopped */ + } bdb_db_env; + + /* structure which holds our stuff */ +diff --git a/ldap/servers/slapd/back-ldbm/import.h b/ldap/servers/slapd/back-ldbm/import.h +index db77a602b..bfa74ed49 100644 +--- a/ldap/servers/slapd/back-ldbm/import.h ++++ b/ldap/servers/slapd/back-ldbm/import.h +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -130,8 +130,8 @@ typedef struct + char **exclude_subtrees; /* list of subtrees to NOT import */ + Fifo fifo; /* entry fifo for indexing */ + char *task_status; /* transient state info for the end-user */ +- PRLock *wire_lock; /* lock for serializing wire imports */ +- PRCondVar *wire_cv; /* ... and ordering the startup */ ++ pthread_mutex_t wire_lock; /* lock for serializing wire imports */ ++ pthread_cond_t wire_cv; /* ... and ordering the startup */ + PRThread *main_thread; /* for FRI: import_main() thread id */ + int encrypt; + Slapi_Value *usn_value; /* entryusn for import */ +diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c +index 88b7dc3be..1883fe711 100644 +--- a/ldap/servers/slapd/connection.c ++++ b/ldap/servers/slapd/connection.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -64,8 +64,10 @@ struct Slapi_work_q + + static struct Slapi_work_q *head_work_q = NULL; /* global work queue head */ + static struct Slapi_work_q *tail_work_q = NULL; /* global work queue tail */ +-static PRLock *work_q_lock = NULL; /* protects head_conn_q and tail_conn_q */ +-static PRCondVar *work_q_cv; /* used by operation threads to wait for work - when there is a conn in the queue waiting to be processed */ ++static pthread_mutex_t work_q_lock; /* protects head_conn_q and tail_conn_q */ ++static pthread_cond_t work_q_cv; /* used by operation threads to wait for work - ++ * when there is a conn in the queue waiting ++ * to be processed */ + static PRInt32 work_q_size; /* size of conn_q */ + static PRInt32 work_q_size_max; /* high water mark of work_q_size */ + #define WORK_Q_EMPTY (work_q_size == 0) +@@ -409,7 +411,7 @@ connection_reset(Connection *conn, int ns, PRNetAddr *from, int fromLen __attrib + + /* initialize the remaining connection fields */ + conn->c_ldapversion = LDAP_VERSION3; +- conn->c_starttime = slapi_current_utc_time(); ++ conn->c_starttime = slapi_current_rel_time_t(); + conn->c_idlesince = conn->c_starttime; + conn->c_flags = is_SSL ? CONN_FLAG_SSL : 0; + conn->c_authtype = slapi_ch_strdup(SLAPD_AUTH_NONE); +@@ -424,32 +426,40 @@ connection_reset(Connection *conn, int ns, PRNetAddr *from, int fromLen __attrib + void + init_op_threads() + { +- int i; +- PRErrorCode errorCode; +- int max_threads = config_get_threadnumber(); +- /* Initialize the locks and cv */ ++ pthread_condattr_t condAttr; ++ int32_t max_threads = config_get_threadnumber(); ++ int32_t rc; + +- if ((work_q_lock = PR_NewLock()) == NULL) { +- errorCode = PR_GetError(); +- slapi_log_err(SLAPI_LOG_ERR, +- "init_op_threads", "PR_NewLock failed for work_q_lock, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", +- errorCode, slapd_pr_strerror(errorCode)); ++ /* Initialize the locks and cv */ ++ if ((rc = pthread_mutex_init(&work_q_lock, NULL)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", ++ "Cannot create new lock. error %d (%s)\n", ++ rc, strerror(rc)); + exit(-1); + } +- +- if ((work_q_cv = PR_NewCondVar(work_q_lock)) == NULL) { +- errorCode = PR_GetError(); +- slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", "PR_NewCondVar failed for work_q_cv, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", +- errorCode, slapd_pr_strerror(errorCode)); ++ if ((rc = pthread_condattr_init(&condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", ++ "Cannot create new condition attribute variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(-1); ++ } else if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", ++ "Cannot set condition attr clock. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(-1); ++ } else if ((rc = pthread_cond_init(&work_q_cv, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", ++ "Cannot create new condition variable. error %d (%s)\n", ++ rc, strerror(rc)); + exit(-1); + } ++ pthread_condattr_destroy(&condAttr); /* no longer needed */ + + work_q_stack = PR_CreateStack("connection_work_q"); +- + op_stack = PR_CreateStack("connection_operation"); + + /* start the operation threads */ +- for (i = 0; i < max_threads; i++) { ++ for (size_t i = 0; i < max_threads; i++) { + PR_SetConcurrency(4); + if (PR_CreateThread(PR_USER_THREAD, + (VFP)(void *)connection_threadmain, NULL, +@@ -457,7 +467,8 @@ init_op_threads() + PR_UNJOINABLE_THREAD, + SLAPD_DEFAULT_THREAD_STACKSIZE) == NULL) { + int prerr = PR_GetError(); +- slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", "PR_CreateThread failed, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", ++ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", ++ "PR_CreateThread failed, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", + prerr, slapd_pr_strerror(prerr)); + } else { + g_incr_active_threadcnt(); +@@ -949,16 +960,23 @@ connection_make_new_pb(Slapi_PBlock *pb, Connection *conn) + } + + int +-connection_wait_for_new_work(Slapi_PBlock *pb, PRIntervalTime interval) ++connection_wait_for_new_work(Slapi_PBlock *pb, int32_t interval) + { + int ret = CONN_FOUND_WORK_TO_DO; + work_q_item *wqitem = NULL; + struct Slapi_op_stack *op_stack_obj = NULL; + +- PR_Lock(work_q_lock); ++ pthread_mutex_lock(&work_q_lock); + + while (!op_shutdown && WORK_Q_EMPTY) { +- PR_WaitCondVar(work_q_cv, interval); ++ if (interval == 0 ) { ++ pthread_cond_wait(&work_q_cv, &work_q_lock); ++ } else { ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += interval; ++ pthread_cond_timedwait(&work_q_cv, &work_q_lock, ¤t_time); ++ } + } + + if (op_shutdown) { +@@ -975,7 +993,7 @@ connection_wait_for_new_work(Slapi_PBlock *pb, PRIntervalTime interval) + slapi_pblock_set(pb, SLAPI_OPERATION, op_stack_obj->op); + } + +- PR_Unlock(work_q_lock); ++ pthread_mutex_unlock(&work_q_lock); + return ret; + } + +@@ -1353,7 +1371,7 @@ connection_check_activity_level(Connection *conn) + /* store current count in the previous count slot */ + conn->c_private->previous_op_count = current_count; + /* update the last checked time */ +- conn->c_private->previous_count_check_time = slapi_current_utc_time(); ++ conn->c_private->previous_count_check_time = slapi_current_rel_time_t(); + pthread_mutex_unlock(&(conn->c_mutex)); + slapi_log_err(SLAPI_LOG_CONNS, "connection_check_activity_level", "conn %" PRIu64 " activity level = %d\n", conn->c_connid, delta_count); + } +@@ -1463,7 +1481,7 @@ connection_threadmain() + { + Slapi_PBlock *pb = slapi_pblock_new(); + /* wait forever for new pb until one is available or shutdown */ +- PRIntervalTime interval = PR_INTERVAL_NO_TIMEOUT; /* PR_SecondsToInterval(10); */ ++ int32_t interval = 0; /* used be 10 seconds */ + Connection *conn = NULL; + Operation *op; + ber_tag_t tag = 0; +@@ -1503,7 +1521,7 @@ connection_threadmain() + + switch (ret) { + case CONN_NOWORK: +- PR_ASSERT(interval != PR_INTERVAL_NO_TIMEOUT); /* this should never happen with PR_INTERVAL_NO_TIMEOUT */ ++ PR_ASSERT(interval != 0); /* this should never happen */ + continue; + case CONN_SHUTDOWN: + slapi_log_err(SLAPI_LOG_TRACE, "connection_threadmain", +@@ -1610,7 +1628,7 @@ connection_threadmain() + conn->c_opsinitiated, conn->c_refcnt, conn->c_flags); + } + +- curtime = slapi_current_utc_time(); ++ curtime = slapi_current_rel_time_t(); + #define DB_PERF_TURBO 1 + #if defined(DB_PERF_TURBO) + /* If it's been a while since we last did it ... */ +@@ -1914,7 +1932,7 @@ add_work_q(work_q_item *wqitem, struct Slapi_op_stack *op_stack_obj) + new_work_q->op_stack_obj = op_stack_obj; + new_work_q->next_work_item = NULL; + +- PR_Lock(work_q_lock); ++ pthread_mutex_lock(&work_q_lock); + if (tail_work_q == NULL) { + tail_work_q = new_work_q; + head_work_q = new_work_q; +@@ -1926,8 +1944,8 @@ add_work_q(work_q_item *wqitem, struct Slapi_op_stack *op_stack_obj) + if (work_q_size > work_q_size_max) { + work_q_size_max = work_q_size; + } +- PR_NotifyCondVar(work_q_cv); /* notify waiters in connection_wait_for_new_work */ +- PR_Unlock(work_q_lock); ++ pthread_cond_signal(&work_q_cv); /* notify waiters in connection_wait_for_new_work */ ++ pthread_mutex_unlock(&work_q_lock); + } + + /* get_work_q(): will get a work_q_item from the beginning of the work queue, return NULL if +@@ -1975,9 +1993,9 @@ op_thread_cleanup() + op_stack_size, work_q_size_max, work_q_stack_size_max); + + PR_AtomicIncrement(&op_shutdown); +- PR_Lock(work_q_lock); +- PR_NotifyAllCondVar(work_q_cv); /* tell any thread waiting in connection_wait_for_new_work to shutdown */ +- PR_Unlock(work_q_lock); ++ pthread_mutex_lock(&work_q_lock); ++ pthread_cond_broadcast(&work_q_cv); /* tell any thread waiting in connection_wait_for_new_work to shutdown */ ++ pthread_mutex_unlock(&work_q_lock); + } + + /* do this after all worker threads have terminated */ +diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c +index bfd965263..0071ed86a 100644 +--- a/ldap/servers/slapd/daemon.c ++++ b/ldap/servers/slapd/daemon.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -81,8 +81,9 @@ static int readsignalpipe = SLAPD_INVALID_SOCKET; + #define FDS_SIGNAL_PIPE 0 + + static PRThread *disk_thread_p = NULL; +-static PRCondVar *diskmon_cvar = NULL; +-static PRLock *diskmon_mutex = NULL; ++static pthread_cond_t diskmon_cvar; ++static pthread_mutex_t diskmon_mutex; ++ + void disk_monitoring_stop(void); + + typedef struct listener_info +@@ -441,9 +442,13 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) + + while (!g_get_shutdown()) { + if (!first_pass) { +- PR_Lock(diskmon_mutex); +- PR_WaitCondVar(diskmon_cvar, PR_SecondsToInterval(10)); +- PR_Unlock(diskmon_mutex); ++ struct timespec current_time = {0}; ++ ++ pthread_mutex_lock(&diskmon_mutex); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += 10; ++ pthread_cond_timedwait(&diskmon_cvar, &diskmon_mutex, ¤t_time); ++ pthread_mutex_unlock(&diskmon_mutex); + /* + * We need to subtract from disk_space to account for the + * logging we just did, it doesn't hurt if we subtract a +@@ -622,7 +627,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) + "Disk space on (%s) is too far below the threshold(%" PRIu64 " bytes). " + "Waiting %d minutes for disk space to be cleaned up before shutting slapd down...\n", + dirstr, threshold, (grace_period / 60)); +- start = slapi_current_utc_time(); ++ start = slapi_current_rel_time_t(); + now = start; + while ((now - start) < grace_period) { + if (g_get_shutdown()) { +@@ -685,7 +690,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) + immediate_shutdown = 1; + goto cleanup; + } +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + } + + if (ok_now) { +@@ -1005,21 +1010,34 @@ slapd_daemon(daemon_ports_t *ports) + * and the monitoring thread. + */ + if (config_get_disk_monitoring()) { +- if ((diskmon_mutex = PR_NewLock()) == NULL) { ++ pthread_condattr_t condAttr; ++ int rc = 0; ++ ++ if ((rc = pthread_mutex_init(&diskmon_mutex, NULL)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", "cannot create new lock. error %d (%s)\n", ++ rc, strerror(rc)); ++ g_set_shutdown(SLAPI_SHUTDOWN_EXIT); ++ } ++ if ((rc = pthread_condattr_init(&condAttr)) != 0) { + slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", +- "Cannot create new lock for disk space monitoring. " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", +- PR_GetError(), slapd_pr_strerror(PR_GetError())); ++ "cannot create new condition attribute variable. error %d (%s)\n", ++ rc, strerror(rc)); + g_set_shutdown(SLAPI_SHUTDOWN_EXIT); + } +- if (diskmon_mutex) { +- if ((diskmon_cvar = PR_NewCondVar(diskmon_mutex)) == NULL) { +- slapi_log_err(SLAPI_LOG_EMERG, "slapd_daemon", +- "Cannot create new condition variable for disk space monitoring. " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", +- PR_GetError(), slapd_pr_strerror(PR_GetError())); +- g_set_shutdown(SLAPI_SHUTDOWN_EXIT); +- } ++ if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", ++ "cannot set condition attr clock. error %d (%s)\n", ++ rc, strerror(rc)); ++ g_set_shutdown(SLAPI_SHUTDOWN_EXIT); ++ } ++ if ((rc = pthread_cond_init(&diskmon_cvar, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", ++ "cannot create new condition variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ g_set_shutdown(SLAPI_SHUTDOWN_EXIT); + } +- if (diskmon_mutex && diskmon_cvar) { ++ pthread_condattr_destroy(&condAttr); ++ if (rc == 0) { + disk_thread_p = PR_CreateThread(PR_SYSTEM_THREAD, + (VFP)(void *)disk_monitoring_thread, NULL, + PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, +@@ -1508,7 +1526,7 @@ static void + handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused))) + { + Connection *c; +- time_t curtime = slapi_current_utc_time(); ++ time_t curtime = slapi_current_rel_time_t(); + + #if LDAP_ERROR_LOGGING + if (slapd_ldap_debug & LDAP_DEBUG_CONNS) { +@@ -2884,8 +2902,8 @@ void + disk_monitoring_stop(void) + { + if (disk_thread_p) { +- PR_Lock(diskmon_mutex); +- PR_NotifyCondVar(diskmon_cvar); +- PR_Unlock(diskmon_mutex); ++ pthread_mutex_lock(&diskmon_mutex); ++ pthread_cond_signal(&diskmon_cvar); ++ pthread_mutex_unlock(&diskmon_mutex); + } + } +diff --git a/ldap/servers/slapd/eventq.c b/ldap/servers/slapd/eventq.c +index a491acd0a..e1900724f 100644 +--- a/ldap/servers/slapd/eventq.c ++++ b/ldap/servers/slapd/eventq.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -52,8 +52,8 @@ typedef struct _slapi_eq_context + */ + typedef struct _event_queue + { +- PRLock *eq_lock; +- PRCondVar *eq_cv; ++ pthread_mutex_t eq_lock; ++ pthread_cond_t eq_cv; + slapi_eq_context *eq_queue; + } event_queue; + +@@ -74,8 +74,8 @@ static PRThread *eq_loop_tid = NULL; + static int eq_running = 0; + static int eq_stopped = 0; + static int eq_initialized = 0; +-PRLock *ss_lock = NULL; +-PRCondVar *ss_cv = NULL; ++static pthread_mutex_t ss_lock; ++static pthread_cond_t ss_cv; + PRCallOnceType init_once = {0}; + + /* Forward declarations */ +@@ -170,7 +170,7 @@ slapi_eq_cancel(Slapi_Eq_Context ctx) + + PR_ASSERT(eq_initialized); + if (!eq_stopped) { +- PR_Lock(eq->eq_lock); ++ pthread_mutex_lock(&(eq->eq_lock)); + p = &(eq->eq_queue); + while (!found && *p != NULL) { + if ((*p)->ec_id == ctx) { +@@ -182,7 +182,7 @@ slapi_eq_cancel(Slapi_Eq_Context ctx) + p = &((*p)->ec_next); + } + } +- PR_Unlock(eq->eq_lock); ++ pthread_mutex_unlock(&(eq->eq_lock)); + } + slapi_log_err(SLAPI_LOG_HOUSE, NULL, + "cancellation of event id %p requested: %s\n", +@@ -223,7 +223,7 @@ eq_enqueue(slapi_eq_context *newec) + slapi_eq_context **p; + + PR_ASSERT(NULL != newec); +- PR_Lock(eq->eq_lock); ++ pthread_mutex_lock(&(eq->eq_lock)); + /* Insert in order (sorted by start time) in the list */ + for (p = &(eq->eq_queue); *p != NULL; p = &((*p)->ec_next)) { + if ((*p)->ec_when > newec->ec_when) { +@@ -236,8 +236,8 @@ eq_enqueue(slapi_eq_context *newec) + newec->ec_next = NULL; + } + *p = newec; +- PR_NotifyCondVar(eq->eq_cv); /* wake up scheduler thread */ +- PR_Unlock(eq->eq_lock); ++ pthread_cond_signal(&(eq->eq_cv)); /* wake up scheduler thread */ ++ pthread_mutex_unlock(&(eq->eq_lock)); + } + + +@@ -251,12 +251,12 @@ eq_dequeue(time_t now) + { + slapi_eq_context *retptr = NULL; + +- PR_Lock(eq->eq_lock); ++ pthread_mutex_lock(&(eq->eq_lock)); + if (NULL != eq->eq_queue && eq->eq_queue->ec_when <= now) { + retptr = eq->eq_queue; + eq->eq_queue = retptr->ec_next; + } +- PR_Unlock(eq->eq_lock); ++ pthread_mutex_unlock(&(eq->eq_lock)); + return retptr; + } + +@@ -271,7 +271,7 @@ static void + eq_call_all(void) + { + slapi_eq_context *p; +- time_t curtime = slapi_current_utc_time(); ++ time_t curtime = slapi_current_rel_time_t(); + + while ((p = eq_dequeue(curtime)) != NULL) { + /* Call the scheduled function */ +@@ -299,34 +299,35 @@ static void + eq_loop(void *arg __attribute__((unused))) + { + while (eq_running) { +- time_t curtime = slapi_current_utc_time(); +- PRIntervalTime timeout; ++ time_t curtime = slapi_current_rel_time_t(); + int until; +- PR_Lock(eq->eq_lock); ++ ++ pthread_mutex_lock(&(eq->eq_lock)); + while (!((NULL != eq->eq_queue) && (eq->eq_queue->ec_when <= curtime))) { + if (!eq_running) { +- PR_Unlock(eq->eq_lock); ++ pthread_mutex_unlock(&(eq->eq_lock)); + goto bye; + } + /* Compute new timeout */ + if (NULL != eq->eq_queue) { ++ struct timespec current_time = slapi_current_rel_time_hr(); + until = eq->eq_queue->ec_when - curtime; +- timeout = PR_SecondsToInterval(until); ++ current_time.tv_sec += until; ++ pthread_cond_timedwait(&eq->eq_cv, &eq->eq_lock, ¤t_time); + } else { +- timeout = PR_INTERVAL_NO_TIMEOUT; ++ pthread_cond_wait(&eq->eq_cv, &eq->eq_lock); + } +- PR_WaitCondVar(eq->eq_cv, timeout); +- curtime = slapi_current_utc_time(); ++ curtime = slapi_current_rel_time_t(); + } + /* There is some work to do */ +- PR_Unlock(eq->eq_lock); ++ pthread_mutex_unlock(&(eq->eq_lock)); + eq_call_all(); + } + bye: + eq_stopped = 1; +- PR_Lock(ss_lock); +- PR_NotifyAllCondVar(ss_cv); +- PR_Unlock(ss_lock); ++ pthread_mutex_lock(&ss_lock); ++ pthread_cond_broadcast(&ss_cv); ++ pthread_mutex_unlock(&ss_lock); + } + + +@@ -336,23 +337,50 @@ bye: + static PRStatus + eq_create(void) + { +- PR_ASSERT(NULL == eq->eq_lock); +- if ((eq->eq_lock = PR_NewLock()) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewLock failed\n"); ++ pthread_condattr_t condAttr; ++ int rc = 0; ++ ++ /* Init the eventq mutex and cond var */ ++ if (pthread_mutex_init(&eq->eq_lock, NULL) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ "Failed to create lock: error %d (%s)\n", ++ rc, strerror(rc)); + exit(1); + } +- if ((eq->eq_cv = PR_NewCondVar(eq->eq_lock)) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewCondVar failed\n"); ++ if ((rc = pthread_condattr_init(&condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ "Failed to create new condition attribute variable. error %d (%s)\n", ++ rc, strerror(rc)); + exit(1); + } +- if ((ss_lock = PR_NewLock()) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewLock failed\n"); ++ if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ "Cannot set condition attr clock. error %d (%s)\n", ++ rc, strerror(rc)); + exit(1); + } +- if ((ss_cv = PR_NewCondVar(ss_lock)) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewCondVar failed\n"); ++ if ((rc = pthread_cond_init(&eq->eq_cv, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ "Failed to create new condition variable. error %d (%s)\n", ++ rc, strerror(rc)); + exit(1); + } ++ ++ /* Init the "ss" mutex and condition var */ ++ if (pthread_mutex_init(&ss_lock, NULL) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ "Failed to create ss lock: error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(1); ++ } ++ if ((rc = pthread_cond_init(&ss_cv, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ "Failed to create new ss condition variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(1); ++ } ++ pthread_condattr_destroy(&condAttr); /* no longer needed */ ++ + eq->eq_queue = NULL; + eq_initialized = 1; + return PR_SUCCESS; +@@ -411,7 +439,7 @@ eq_stop() + { + slapi_eq_context *p, *q; + +- if (NULL == eq || NULL == eq->eq_lock) { /* never started */ ++ if (NULL == eq) { /* never started */ + eq_stopped = 1; + return; + } +@@ -423,12 +451,24 @@ eq_stop() + * it acknowledges by setting eq_stopped. + */ + while (!eq_stopped) { +- PR_Lock(eq->eq_lock); +- PR_NotifyAllCondVar(eq->eq_cv); +- PR_Unlock(eq->eq_lock); +- PR_Lock(ss_lock); +- PR_WaitCondVar(ss_cv, PR_MillisecondsToInterval(100)); +- PR_Unlock(ss_lock); ++ struct timespec current_time = {0}; ++ ++ pthread_mutex_lock(&(eq->eq_lock)); ++ pthread_cond_broadcast(&(eq->eq_cv)); ++ pthread_mutex_unlock(&(eq->eq_lock)); ++ ++ pthread_mutex_lock(&ss_lock); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ if (current_time.tv_nsec + 100000000 > 1000000000) { ++ /* nanoseconds will overflow, adjust the seconds and nanoseconds */ ++ current_time.tv_sec++; ++ /* Add the remainder to nanoseconds */ ++ current_time.tv_nsec = (current_time.tv_nsec + 100000000) - 1000000000; ++ } else { ++ current_time.tv_nsec += 100000000; /* 100 ms */ ++ } ++ pthread_cond_timedwait(&ss_cv, &ss_lock, ¤t_time); ++ pthread_mutex_unlock(&ss_lock); + } + (void)PR_JoinThread(eq_loop_tid); + /* +@@ -438,7 +478,7 @@ eq_stop() + * The downside is that the event queue can't be stopped and restarted + * easily. + */ +- PR_Lock(eq->eq_lock); ++ pthread_mutex_lock(&(eq->eq_lock)); + p = eq->eq_queue; + while (p != NULL) { + q = p->ec_next; +@@ -449,7 +489,7 @@ eq_stop() + */ + p = q; + } +- PR_Unlock(eq->eq_lock); ++ pthread_mutex_unlock(&(eq->eq_lock)); + slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have shut down\n"); + } + +@@ -463,17 +503,17 @@ slapi_eq_get_arg(Slapi_Eq_Context ctx) + + PR_ASSERT(eq_initialized); + if (eq && !eq_stopped) { +- PR_Lock(eq->eq_lock); ++ pthread_mutex_lock(&(eq->eq_lock)); + p = &(eq->eq_queue); + while (p && *p != NULL) { + if ((*p)->ec_id == ctx) { +- PR_Unlock(eq->eq_lock); ++ pthread_mutex_unlock(&(eq->eq_lock)); + return (*p)->ec_arg; + } else { + p = &((*p)->ec_next); + } + } +- PR_Unlock(eq->eq_lock); ++ pthread_mutex_unlock(&(eq->eq_lock)); + } + return NULL; + } +diff --git a/ldap/servers/slapd/house.c b/ldap/servers/slapd/house.c +index ff139a4a5..ac1d94f26 100644 +--- a/ldap/servers/slapd/house.c ++++ b/ldap/servers/slapd/house.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -23,17 +23,15 @@ + #define SLAPD_HOUSEKEEPING_INTERVAL 30 /* seconds */ + + static PRThread *housekeeping_tid = NULL; +-static PRLock *housekeeping_mutex = NULL; +-static PRCondVar *housekeeping_cvar = NULL; ++static pthread_mutex_t housekeeping_mutex; ++static pthread_cond_t housekeeping_cvar; + + + static void + housecleaning(void *cur_time __attribute__((unused))) + { +- int interval; +- +- interval = PR_SecondsToInterval(SLAPD_HOUSEKEEPING_INTERVAL); + while (!g_get_shutdown()) { ++ struct timespec current_time = {0}; + /* + * Looks simple, but could potentially take a long time. + */ +@@ -42,9 +40,15 @@ housecleaning(void *cur_time __attribute__((unused))) + if (g_get_shutdown()) { + break; + } +- PR_Lock(housekeeping_mutex); +- PR_WaitCondVar(housekeeping_cvar, interval); +- PR_Unlock(housekeeping_mutex); ++ ++ /* get the current monotonic time and add our interval */ ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += SLAPD_HOUSEKEEPING_INTERVAL; ++ ++ /* Now we wait... */ ++ pthread_mutex_lock(&housekeeping_mutex); ++ pthread_cond_timedwait(&housekeeping_cvar, &housekeeping_mutex, ¤t_time); ++ pthread_mutex_unlock(&housekeeping_mutex); + } + } + +@@ -52,20 +56,31 @@ PRThread * + housekeeping_start(time_t cur_time, void *arg __attribute__((unused))) + { + static time_t thread_start_time; ++ pthread_condattr_t condAttr; ++ int rc = 0; + + if (housekeeping_tid) { + return housekeeping_tid; + } + +- if ((housekeeping_mutex = PR_NewLock()) == NULL) { ++ if ((rc = pthread_mutex_init(&housekeeping_mutex, NULL)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", ++ "housekeeping cannot create new lock. error %d (%s)\n", ++ rc, strerror(rc)); ++ } else if ((rc = pthread_condattr_init(&condAttr)) != 0) { + slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", +- "housekeeping cannot create new lock. " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", +- PR_GetError(), slapd_pr_strerror(PR_GetError())); +- } else if ((housekeeping_cvar = PR_NewCondVar(housekeeping_mutex)) == NULL) { ++ "housekeeping cannot create new condition attribute variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ } else if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { + slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", +- "housekeeping cannot create new condition variable. " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", +- PR_GetError(), slapd_pr_strerror(PR_GetError())); ++ "housekeeping cannot set condition attr clock. error %d (%s)\n", ++ rc, strerror(rc)); ++ } else if ((rc = pthread_cond_init(&housekeeping_cvar, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", ++ "housekeeping cannot create new condition variable. error %d (%s)\n", ++ rc, strerror(rc)); + } else { ++ pthread_condattr_destroy(&condAttr); /* no longer needed */ + thread_start_time = cur_time; + if ((housekeeping_tid = PR_CreateThread(PR_USER_THREAD, + (VFP)housecleaning, (void *)&thread_start_time, +@@ -84,9 +99,16 @@ void + housekeeping_stop() + { + if (housekeeping_tid) { +- PR_Lock(housekeeping_mutex); +- PR_NotifyCondVar(housekeeping_cvar); +- PR_Unlock(housekeeping_mutex); ++ /* Notify the thread */ ++ pthread_mutex_lock(&housekeeping_mutex); ++ pthread_cond_signal(&housekeeping_cvar); ++ pthread_mutex_unlock(&housekeeping_mutex); ++ ++ /* Wait for the thread to finish */ + (void)PR_JoinThread(housekeeping_tid); ++ ++ /* Clean it all up */ ++ pthread_mutex_destroy(&housekeeping_mutex); ++ pthread_cond_destroy(&housekeeping_cvar); + } + } +diff --git a/ldap/servers/slapd/libmakefile b/ldap/servers/slapd/libmakefile +index b3ecabc29..3559c0104 100644 +--- a/ldap/servers/slapd/libmakefile ++++ b/ldap/servers/slapd/libmakefile +@@ -46,7 +46,7 @@ LIBSLAPD_OBJS=plugin_role.o getfilelist.o libglobs.o log.o ch_malloc.o entry.o p + filter.o filtercmp.o filterentry.o operation.o schemaparse.o pw.o \ + backend.o defbackend.o ava.o charray.o regex.o \ + str2filter.o dynalib.o plugin.o plugin_syntax.o plugin_mr.o \ +- slapi2nspr.o rwlock.o control.o plugin_internal_op.o \ ++ slapi2runtime.o rwlock.o control.o plugin_internal_op.o \ + result.o pw_retry.o agtmmap.o referral.o snmp_collator.o util.o \ + dse.o errormap.o computed.o match.o fileio.o \ + generation.o localhost.o ssl.o factory.o auditlog.o \ +diff --git a/ldap/servers/slapd/psearch.c b/ldap/servers/slapd/psearch.c +index 6820a5d75..c60e6a8ed 100644 +--- a/ldap/servers/slapd/psearch.c ++++ b/ldap/servers/slapd/psearch.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -59,10 +59,10 @@ typedef struct _psearch + */ + typedef struct _psearch_list + { +- Slapi_RWLock *pl_rwlock; /* R/W lock struct to serialize access */ +- PSearch *pl_head; /* Head of list */ +- PRLock *pl_cvarlock; /* Lock for cvar */ +- PRCondVar *pl_cvar; /* ps threads sleep on this */ ++ Slapi_RWLock *pl_rwlock; /* R/W lock struct to serialize access */ ++ PSearch *pl_head; /* Head of list */ ++ pthread_mutex_t pl_cvarlock; /* Lock for cvar */ ++ pthread_cond_t pl_cvar; /* ps threads sleep on this */ + } PSearch_List; + + /* +@@ -101,21 +101,26 @@ void + ps_init_psearch_system() + { + if (!PS_IS_INITIALIZED()) { ++ int32_t rc = 0; ++ + psearch_list = (PSearch_List *)slapi_ch_calloc(1, sizeof(PSearch_List)); + if ((psearch_list->pl_rwlock = slapi_new_rwlock()) == NULL) { + slapi_log_err(SLAPI_LOG_ERR, "ps_init_psearch_system", "Cannot initialize lock structure. " + "The server is terminating.\n"); + exit(-1); + } +- if ((psearch_list->pl_cvarlock = PR_NewLock()) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, "ps_init_psearch_system", "Cannot create new lock. " +- "The server is terminating.\n"); +- exit(-1); ++ ++ if ((rc = pthread_mutex_init(&(psearch_list->pl_cvarlock), NULL)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "ps_init_psearch_system", ++ "Cannot create new lock. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(1); + } +- if ((psearch_list->pl_cvar = PR_NewCondVar(psearch_list->pl_cvarlock)) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, "ps_init_psearch_system", "Cannot create new condition variable. " +- "The server is terminating.\n"); +- exit(-1); ++ if ((rc = pthread_cond_init(&(psearch_list->pl_cvar), NULL)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", ++ "housekeeping cannot create new condition variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(1); + } + psearch_list->pl_head = NULL; + } +@@ -288,7 +293,7 @@ ps_send_results(void *arg) + pb_conn->c_connid, pb_op ? pb_op->o_opid : -1); + } + +- PR_Lock(psearch_list->pl_cvarlock); ++ pthread_mutex_lock(&(psearch_list->pl_cvarlock)); + + while ((conn_acq_flag == 0) && slapi_atomic_load_64(&(ps->ps_complete), __ATOMIC_ACQUIRE) == 0) { + /* Check for an abandoned operation */ +@@ -300,7 +305,7 @@ ps_send_results(void *arg) + } + if (NULL == ps->ps_eq_head) { + /* Nothing to do */ +- PR_WaitCondVar(psearch_list->pl_cvar, PR_INTERVAL_NO_TIMEOUT); ++ pthread_cond_wait(&(psearch_list->pl_cvar), &(psearch_list->pl_cvarlock)); + } else { + /* dequeue the item */ + int attrsonly; +@@ -330,17 +335,17 @@ ps_send_results(void *arg) + } + + /* +- * Send the result. Since send_ldap_search_entry can block for +- * up to 30 minutes, we relinquish all locks before calling it. +- */ +- PR_Unlock(psearch_list->pl_cvarlock); ++ * Send the result. Since send_ldap_search_entry can block for ++ * up to 30 minutes, we relinquish all locks before calling it. ++ */ ++ pthread_mutex_unlock(&(psearch_list->pl_cvarlock)); + + /* +- * The entry is in the right scope and matches the filter +- * but we need to redo the filter test here to check access +- * controls. See the comments at the slapi_filter_test() +- * call in ps_service_persistent_searches(). +- */ ++ * The entry is in the right scope and matches the filter ++ * but we need to redo the filter test here to check access ++ * controls. See the comments at the slapi_filter_test() ++ * call in ps_service_persistent_searches(). ++ */ + slapi_pblock_get(ps->ps_pblock, SLAPI_SEARCH_FILTER, &f); + + /* See if the entry meets the filter and ACL criteria */ +@@ -358,13 +363,13 @@ ps_send_results(void *arg) + } + } + +- PR_Lock(psearch_list->pl_cvarlock); ++ pthread_mutex_lock(&(psearch_list->pl_cvarlock)); + + /* Deallocate our wrapper for this entry */ + pe_ch_free(&peq); + } + } +- PR_Unlock(psearch_list->pl_cvarlock); ++ pthread_mutex_unlock(&(psearch_list->pl_cvarlock)); + ps_remove(ps); + + /* indicate the end of search */ +@@ -474,9 +479,9 @@ void + ps_wakeup_all() + { + if (PS_IS_INITIALIZED()) { +- PR_Lock(psearch_list->pl_cvarlock); +- PR_NotifyAllCondVar(psearch_list->pl_cvar); +- PR_Unlock(psearch_list->pl_cvarlock); ++ pthread_mutex_lock(&(psearch_list->pl_cvarlock)); ++ pthread_cond_broadcast(&(psearch_list->pl_cvar)); ++ pthread_mutex_unlock(&(psearch_list->pl_cvarlock)); + } + } + +diff --git a/ldap/servers/slapd/regex.c b/ldap/servers/slapd/regex.c +index 97249a4c5..a17c354fd 100644 +--- a/ldap/servers/slapd/regex.c ++++ b/ldap/servers/slapd/regex.c +@@ -72,7 +72,7 @@ int + slapi_re_exec(Slapi_Regex *re_handle, const char *subject, time_t time_up) + { + int rc; +- time_t curtime = slapi_current_utc_time(); ++ time_t curtime = slapi_current_rel_time_t(); + + if (NULL == re_handle || NULL == re_handle->re_pcre || NULL == subject) { + return LDAP_PARAM_ERROR; +diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h +index f9ac8b46c..55ded5eb8 100644 +--- a/ldap/servers/slapd/slapi-plugin.h ++++ b/ldap/servers/slapd/slapi-plugin.h +@@ -6086,6 +6086,7 @@ Slapi_CondVar *slapi_new_condvar(Slapi_Mutex *mutex); + void slapi_destroy_condvar(Slapi_CondVar *cvar); + int slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout); + int slapi_notify_condvar(Slapi_CondVar *cvar, int notify_all); ++int slapi_wait_condvar_pt(Slapi_CondVar *cvar, Slapi_Mutex *mutex, struct timeval *timeout); + + /** + * Creates a new read/write lock +@@ -6777,6 +6778,12 @@ struct timespec slapi_current_time_hr(void); + * \return timespec of the current monotonic time. + */ + struct timespec slapi_current_rel_time_hr(void); ++/** ++ * Returns the current system time as a hr clock ++ * ++ * \return time_t of the current monotonic time. ++ */ ++time_t slapi_current_rel_time_t(void); + /** + * Returns the current system time as a hr clock in UTC timezone. + * This clock adjusts with ntp steps, and should NOT be +diff --git a/ldap/servers/slapd/slapi2nspr.c b/ldap/servers/slapd/slapi2runtime.c +similarity index 69% +rename from ldap/servers/slapd/slapi2nspr.c +rename to ldap/servers/slapd/slapi2runtime.c +index 232d1599e..85dc4c9a8 100644 +--- a/ldap/servers/slapd/slapi2nspr.c ++++ b/ldap/servers/slapd/slapi2runtime.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -14,6 +14,8 @@ + /* + * slapi2nspr.c - expose a subset of the NSPR20/21 API to SLAPI plugin writers + * ++ * Also include slapi2pthread functions ++ * + */ + + #include "slap.h" +@@ -44,47 +46,50 @@ + Slapi_Mutex * + slapi_new_mutex(void) + { +- return ((Slapi_Mutex *)PR_NewLock()); ++ pthread_mutex_t *new_mutex = (pthread_mutex_t *)slapi_ch_calloc(1, sizeof(pthread_mutex_t)); ++ pthread_mutex_init(new_mutex, NULL); ++ return ((Slapi_Mutex *)new_mutex); + } + +- + /* + * Function: slapi_destroy_mutex +- * Description: behaves just like PR_DestroyLock(). ++ * Description: behaves just like pthread_mutex_destroy(). + */ + void + slapi_destroy_mutex(Slapi_Mutex *mutex) + { + if (mutex != NULL) { +- PR_DestroyLock((PRLock *)mutex); ++ pthread_mutex_destroy((pthread_mutex_t *)mutex); ++ slapi_ch_free((void **)&mutex); + } + } + + + /* + * Function: slapi_lock_mutex +- * Description: behaves just like PR_Lock(). ++ * Description: behaves just like pthread_mutex_lock(). + */ +-void ++inline void __attribute__((always_inline)) + slapi_lock_mutex(Slapi_Mutex *mutex) + { + if (mutex != NULL) { +- PR_Lock((PRLock *)mutex); ++ pthread_mutex_lock((pthread_mutex_t *)mutex); + } + } + + + /* + * Function: slapi_unlock_mutex +- * Description: behaves just like PR_Unlock(). ++ * Description: behaves just like pthread_mutex_unlock(). + * Returns: + * non-zero if mutex was successfully unlocked. + * 0 if mutex is NULL or is not locked by the calling thread. + */ +-int ++inline int __attribute__((always_inline)) + slapi_unlock_mutex(Slapi_Mutex *mutex) + { +- if (mutex == NULL || PR_Unlock((PRLock *)mutex) == PR_FAILURE) { ++ PR_ASSERT(mutex != NULL); ++ if (mutex == NULL || pthread_mutex_unlock((pthread_mutex_t *)mutex) != 0) { + return (0); + } else { + return (1); +@@ -98,13 +103,18 @@ slapi_unlock_mutex(Slapi_Mutex *mutex) + * Returns: pointer to a new condition variable (NULL if one can't be created). + */ + Slapi_CondVar * +-slapi_new_condvar(Slapi_Mutex *mutex) ++slapi_new_condvar(Slapi_Mutex *mutex __attribute__((unused))) + { +- if (mutex == NULL) { +- return (NULL); +- } ++ pthread_cond_t *new_cv = (pthread_cond_t *)slapi_ch_calloc(1, sizeof(pthread_cond_t)); ++ pthread_condattr_t condAttr; ++ ++ pthread_condattr_init(&condAttr); ++ pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC); ++ pthread_cond_init(new_cv, &condAttr); ++ /* Done with the cond attr, it's safe to destroy it */ ++ pthread_condattr_destroy(&condAttr); + +- return ((Slapi_CondVar *)PR_NewCondVar((PRLock *)mutex)); ++ return (Slapi_CondVar *)new_cv; + } + + +@@ -116,7 +126,8 @@ void + slapi_destroy_condvar(Slapi_CondVar *cvar) + { + if (cvar != NULL) { +- PR_DestroyCondVar((PRCondVar *)cvar); ++ pthread_cond_destroy((pthread_cond_t *)cvar); ++ slapi_ch_free((void **)&cvar); + } + } + +@@ -134,23 +145,35 @@ slapi_destroy_condvar(Slapi_CondVar *cvar) + int + slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout) + { +- PRIntervalTime prit; ++ /* deprecated in favor of slapi_wait_condvar_pt() which requires that the ++ * mutex be passed in */ ++ return (0); ++} ++ ++int ++slapi_wait_condvar_pt(Slapi_CondVar *cvar, Slapi_Mutex *mutex, struct timeval *timeout) ++{ ++ int32_t rc = 1; + + if (cvar == NULL) { +- return (0); ++ return 0; + } + + if (timeout == NULL) { +- prit = PR_INTERVAL_NO_TIMEOUT; ++ rc = pthread_cond_wait((pthread_cond_t *)cvar, (pthread_mutex_t *)mutex); + } else { +- prit = PR_SecondsToInterval(timeout->tv_sec) + PR_MicrosecondsToInterval(timeout->tv_usec); ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += (timeout->tv_sec + PR_MicrosecondsToInterval(timeout->tv_usec)); ++ rc = pthread_cond_timedwait((pthread_cond_t *)cvar, (pthread_mutex_t *)mutex, ¤t_time); + } + +- if (PR_WaitCondVar((PRCondVar *)cvar, prit) != PR_SUCCESS) { +- return (0); ++ if (rc != 0) { ++ /* something went wrong */ ++ return 0; + } + +- return (1); ++ return 1; /* success */ + } + + +@@ -166,19 +189,19 @@ slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout) + int + slapi_notify_condvar(Slapi_CondVar *cvar, int notify_all) + { +- PRStatus prrc; ++ int32_t rc; + + if (cvar == NULL) { +- return (0); ++ return 0; + } + + if (notify_all) { +- prrc = PR_NotifyAllCondVar((PRCondVar *)cvar); ++ rc = pthread_cond_broadcast((pthread_cond_t *)cvar); + } else { +- prrc = PR_NotifyCondVar((PRCondVar *)cvar); ++ rc = pthread_cond_signal((pthread_cond_t *)cvar); + } + +- return (prrc == PR_SUCCESS ? 1 : 0); ++ return (rc == 0 ? 1 : 0); + } + + Slapi_RWLock * +@@ -236,7 +259,7 @@ slapi_destroy_rwlock(Slapi_RWLock *rwlock) + } + } + +-int ++inline int __attribute__((always_inline)) + slapi_rwlock_rdlock(Slapi_RWLock *rwlock) + { + int ret = 0; +@@ -252,7 +275,7 @@ slapi_rwlock_rdlock(Slapi_RWLock *rwlock) + return ret; + } + +-int ++inline int __attribute__((always_inline)) + slapi_rwlock_wrlock(Slapi_RWLock *rwlock) + { + int ret = 0; +@@ -268,7 +291,7 @@ slapi_rwlock_wrlock(Slapi_RWLock *rwlock) + return ret; + } + +-int ++inline int __attribute__((always_inline)) + slapi_rwlock_unlock(Slapi_RWLock *rwlock) + { + int ret = 0; +diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c +index 806077a16..26f281cba 100644 +--- a/ldap/servers/slapd/task.c ++++ b/ldap/servers/slapd/task.c +@@ -380,16 +380,14 @@ slapi_task_status_changed(Slapi_Task *task) + Slapi_PBlock *pb = slapi_pblock_new(); + Slapi_Entry *e; + int ttl; +- time_t expire; + + if ((e = get_internal_entry(pb, task->task_dn))) { + ttl = atoi(slapi_fetch_attr(e, "ttl", DEFAULT_TTL)); + if (ttl > (24*3600)) + ttl = (24*3600); /* be reasonable, allow to check task status not longer than one day */ +- expire = time(NULL) + ttl; + task->task_flags |= SLAPI_TASK_DESTROYING; + /* queue an event to destroy the state info */ +- slapi_eq_once(destroy_task, (void *)task, expire); ++ slapi_eq_once(destroy_task, (void *)task, slapi_current_rel_time_t() + ttl); + } + slapi_free_search_results_internal(pb); + slapi_pblock_destroy(pb); +diff --git a/ldap/servers/slapd/time.c b/ldap/servers/slapd/time.c +index 545538404..0406c3689 100644 +--- a/ldap/servers/slapd/time.c ++++ b/ldap/servers/slapd/time.c +@@ -107,6 +107,14 @@ slapi_current_rel_time_hr(void) + return now; + } + ++time_t ++slapi_current_rel_time_t(void) ++{ ++ struct timespec now = {0}; ++ clock_gettime(CLOCK_MONOTONIC, &now); ++ return now.tv_sec; ++} ++ + struct timespec + slapi_current_utc_time_hr(void) + { +@@ -292,7 +300,7 @@ slapi_timer_result + slapi_timespec_expire_check(struct timespec *expire) + { + /* +- * Check this first, as it makes no timeout virutally free. ++ * Check this first, as it makes no timeout virtually free. + */ + if (expire->tv_sec == 0 && expire->tv_nsec == 0) { + return TIMER_CONTINUE; +-- +2.26.2 + diff --git a/SOURCES/0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch b/SOURCES/0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch new file mode 100644 index 0000000..66a40e8 --- /dev/null +++ b/SOURCES/0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch @@ -0,0 +1,1748 @@ +From 69af412d42acccac660037e1f4026a6a6717634c Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 17 Dec 2020 15:25:42 -0500 +Subject: [PATCH 2/2] Issue 4384 - Separate eventq into REALTIME and MONOTONIC + +Description: The recent changes to the eventq "when" time changed + internally from REALTIME to MONOTONIC, and this broke + the API. Create a new API for MONOTONIC clocks, and + keep the original API intact for REALTIME clocks. + +Relates: https://github.com/389ds/389-ds-base/issues/4384 + +Reviewed by: firstyear(Thanks!) +--- + Makefile.am | 1 + + docs/slapi.doxy.in | 1 - + ldap/servers/plugins/chainingdb/cb_instance.c | 6 +- + ldap/servers/plugins/dna/dna.c | 4 +- + .../plugins/replication/repl5_backoff.c | 12 +- + .../plugins/replication/repl5_connection.c | 10 +- + .../plugins/replication/repl5_mtnode_ext.c | 4 +- + .../plugins/replication/repl5_replica.c | 24 +- + .../plugins/replication/repl5_schedule.c | 4 +- + .../plugins/replication/windows_connection.c | 12 +- + .../replication/windows_inc_protocol.c | 7 +- + ldap/servers/plugins/retrocl/retrocl_trim.c | 10 +- + ldap/servers/slapd/daemon.c | 3 +- + ldap/servers/slapd/eventq-deprecated.c | 483 ++++++++++++++++++ + ldap/servers/slapd/eventq.c | 236 ++++----- + ldap/servers/slapd/main.c | 18 +- + ldap/servers/slapd/proto-slap.h | 6 +- + ldap/servers/slapd/slapi-plugin.h | 62 ++- + ldap/servers/slapd/slapi2runtime.c | 23 +- + ldap/servers/slapd/snmp_collator.c | 7 +- + ldap/servers/slapd/task.c | 2 +- + ldap/servers/slapd/uuid.c | 3 +- + 22 files changed, 750 insertions(+), 188 deletions(-) + create mode 100644 ldap/servers/slapd/eventq-deprecated.c + +diff --git a/Makefile.am b/Makefile.am +index f7bf1c44c..ece1ad41a 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -1408,6 +1408,7 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \ + ldap/servers/slapd/entrywsi.c \ + ldap/servers/slapd/errormap.c \ + ldap/servers/slapd/eventq.c \ ++ ldap/servers/slapd/eventq-deprecated.c \ + ldap/servers/slapd/factory.c \ + ldap/servers/slapd/features.c \ + ldap/servers/slapd/fileio.c \ +diff --git a/docs/slapi.doxy.in b/docs/slapi.doxy.in +index b1e4810ab..1cafc50ce 100644 +--- a/docs/slapi.doxy.in ++++ b/docs/slapi.doxy.in +@@ -759,7 +759,6 @@ WARN_LOGFILE = + # Note: If this tag is empty the current directory is searched. + + INPUT = src/libsds/include/sds.h \ +- docs/job-safety.md \ + # ldap/servers/slapd/slapi-plugin.h \ + + # This tag can be used to specify the character encoding of the source files +diff --git a/ldap/servers/plugins/chainingdb/cb_instance.c b/ldap/servers/plugins/chainingdb/cb_instance.c +index bc1864c1a..7fd85deb0 100644 +--- a/ldap/servers/plugins/chainingdb/cb_instance.c ++++ b/ldap/servers/plugins/chainingdb/cb_instance.c +@@ -217,7 +217,7 @@ cb_instance_free(cb_backend_instance *inst) + slapi_rwlock_wrlock(inst->rwl_config_lock); + + if (inst->eq_ctx != NULL) { +- slapi_eq_cancel(inst->eq_ctx); ++ slapi_eq_cancel_rel(inst->eq_ctx); + inst->eq_ctx = NULL; + } + +@@ -1947,8 +1947,8 @@ cb_instance_add_config_callback(Slapi_PBlock *pb __attribute__((unused)), + * we can't call recursively into the DSE to do more adds, they'll + * silently fail. instead, schedule the adds to happen in 1 second. + */ +- inst->eq_ctx = slapi_eq_once(cb_instance_add_monitor_later, (void *)inst, +- slapi_current_rel_time_t() + 1); ++ inst->eq_ctx = slapi_eq_once_rel(cb_instance_add_monitor_later, (void *)inst, ++ slapi_current_rel_time_t() + 1); + } + + /* Get the list of operational attrs defined in the schema */ +diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c +index 1cb54580b..b46edfcbb 100644 +--- a/ldap/servers/plugins/dna/dna.c ++++ b/ldap/servers/plugins/dna/dna.c +@@ -688,7 +688,7 @@ dna_close(Slapi_PBlock *pb __attribute__((unused))) + slapi_log_err(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, + "--> dna_close\n"); + +- slapi_eq_cancel(eq_ctx); ++ slapi_eq_cancel_rel(eq_ctx); + dna_delete_config(NULL); + slapi_ch_free((void **)&dna_global_config); + slapi_destroy_rwlock(g_dna_cache_lock); +@@ -908,7 +908,7 @@ dna_load_plugin_config(Slapi_PBlock *pb, int use_eventq) + * starting up would cause the change to not + * get changelogged. */ + now = slapi_current_rel_time_t(); +- eq_ctx = slapi_eq_once(dna_update_config_event, NULL, now + 30); ++ eq_ctx = slapi_eq_once_rel(dna_update_config_event, NULL, now + 30); + } else { + dna_update_config_event(0, NULL); + } +diff --git a/ldap/servers/plugins/replication/repl5_backoff.c b/ldap/servers/plugins/replication/repl5_backoff.c +index 40ec75dd7..8c851beb2 100644 +--- a/ldap/servers/plugins/replication/repl5_backoff.c ++++ b/ldap/servers/plugins/replication/repl5_backoff.c +@@ -99,7 +99,7 @@ backoff_reset(Backoff_Timer *bt, slapi_eq_fn_t callback, void *callback_data) + bt->callback_arg = callback_data; + /* Cancel any pending events in the event queue */ + if (NULL != bt->pending_event) { +- slapi_eq_cancel(bt->pending_event); ++ slapi_eq_cancel_rel(bt->pending_event); + bt->pending_event = NULL; + } + /* Compute the first fire time */ +@@ -112,8 +112,8 @@ backoff_reset(Backoff_Timer *bt, slapi_eq_fn_t callback, void *callback_data) + /* Schedule the callback */ + bt->last_fire_time = slapi_current_rel_time_t(); + return_value = bt->last_fire_time + bt->next_interval; +- bt->pending_event = slapi_eq_once(bt->callback, bt->callback_arg, +- return_value); ++ bt->pending_event = slapi_eq_once_rel(bt->callback, bt->callback_arg, ++ return_value); + PR_Unlock(bt->lock); + return return_value; + } +@@ -159,8 +159,8 @@ backoff_step(Backoff_Timer *bt) + /* Schedule the callback, if any */ + bt->last_fire_time += previous_interval; + return_value = bt->last_fire_time + bt->next_interval; +- bt->pending_event = slapi_eq_once(bt->callback, bt->callback_arg, +- return_value); ++ bt->pending_event = slapi_eq_once_rel(bt->callback, bt->callback_arg, ++ return_value); + } + PR_Unlock(bt->lock); + return return_value; +@@ -196,7 +196,7 @@ backoff_delete(Backoff_Timer **btp) + PR_Lock(bt->lock); + /* Cancel any pending events in the event queue */ + if (NULL != bt->pending_event) { +- slapi_eq_cancel(bt->pending_event); ++ slapi_eq_cancel_rel(bt->pending_event); + } + PR_Unlock(bt->lock); + PR_DestroyLock(bt->lock); +diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c +index bc9ca424b..2dd74f9e7 100644 +--- a/ldap/servers/plugins/replication/repl5_connection.c ++++ b/ldap/servers/plugins/replication/repl5_connection.c +@@ -272,7 +272,7 @@ conn_delete(Repl_Connection *conn) + PR_ASSERT(NULL != conn); + PR_Lock(conn->lock); + if (conn->linger_active) { +- if (slapi_eq_cancel(conn->linger_event) == 1) { ++ if (slapi_eq_cancel_rel(conn->linger_event) == 1) { + /* Event was found and cancelled. Destroy the connection object. */ + destroy_it = PR_TRUE; + } else { +@@ -961,7 +961,7 @@ conn_cancel_linger(Repl_Connection *conn) + "conn_cancel_linger - %s - Canceling linger on the connection\n", + agmt_get_long_name(conn->agmt)); + conn->linger_active = PR_FALSE; +- if (slapi_eq_cancel(conn->linger_event) == 1) { ++ if (slapi_eq_cancel_rel(conn->linger_event) == 1) { + conn->refcnt--; + } + conn->linger_event = NULL; +@@ -1030,7 +1030,7 @@ conn_start_linger(Repl_Connection *conn) + agmt_get_long_name(conn->agmt)); + } else { + conn->linger_active = PR_TRUE; +- conn->linger_event = slapi_eq_once(linger_timeout, conn, now + conn->linger_time); ++ conn->linger_event = slapi_eq_once_rel(linger_timeout, conn, now + conn->linger_time); + conn->status = STATUS_LINGERING; + } + PR_Unlock(conn->lock); +@@ -1990,7 +1990,7 @@ repl5_start_debug_timeout(int *setlevel) + Slapi_Eq_Context eqctx = 0; + if (s_debug_timeout && s_debug_level) { + time_t now = slapi_current_rel_time_t(); +- eqctx = slapi_eq_once(repl5_debug_timeout_callback, setlevel, ++ eqctx = slapi_eq_once_rel(repl5_debug_timeout_callback, setlevel, + s_debug_timeout + now); + } + return eqctx; +@@ -2002,7 +2002,7 @@ repl5_stop_debug_timeout(Slapi_Eq_Context eqctx, int *setlevel) + char buf[20]; + + if (eqctx && !*setlevel) { +- (void)slapi_eq_cancel(eqctx); ++ (void)slapi_eq_cancel_rel(eqctx); + } + + if (s_debug_timeout && s_debug_level && *setlevel) { +diff --git a/ldap/servers/plugins/replication/repl5_mtnode_ext.c b/ldap/servers/plugins/replication/repl5_mtnode_ext.c +index 82e230958..2967a47f8 100644 +--- a/ldap/servers/plugins/replication/repl5_mtnode_ext.c ++++ b/ldap/servers/plugins/replication/repl5_mtnode_ext.c +@@ -82,8 +82,8 @@ multimaster_mtnode_construct_replicas() + } + } + /* Wait a few seconds for everything to startup before resuming any replication tasks */ +- slapi_eq_once(replica_check_for_tasks, (void *)replica_get_root(r), +- slapi_current_rel_time_t() + 5); ++ slapi_eq_once_rel(replica_check_for_tasks, (void *)replica_get_root(r), ++ slapi_current_rel_time_t() + 5); + } + } + } +diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c +index c1d376c72..7102e0606 100644 +--- a/ldap/servers/plugins/replication/repl5_replica.c ++++ b/ldap/servers/plugins/replication/repl5_replica.c +@@ -231,17 +231,17 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, + /* ONREPL - the state update can occur before the entry is added to the DIT. + In that case the updated would fail but nothing bad would happen. The next + scheduled update would save the state */ +- r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name, +- slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); ++ r->repl_eqcxt_rs = slapi_eq_repeat_rel(replica_update_state, r->repl_name, ++ slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); + + if (r->tombstone_reap_interval > 0) { + /* + * Reap Tombstone should be started some time after the plugin started. + * This will allow the server to fully start before consuming resources. + */ +- r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, r->repl_name, +- slapi_current_rel_time_t() + r->tombstone_reap_interval, +- 1000 * r->tombstone_reap_interval); ++ r->repl_eqcxt_tr = slapi_eq_repeat_rel(eq_cb_reap_tombstones, r->repl_name, ++ slapi_current_rel_time_t() + r->tombstone_reap_interval, ++ 1000 * r->tombstone_reap_interval); + } + + done: +@@ -303,12 +303,12 @@ replica_destroy(void **arg) + */ + + if (r->repl_eqcxt_rs) { +- slapi_eq_cancel(r->repl_eqcxt_rs); ++ slapi_eq_cancel_rel(r->repl_eqcxt_rs); + r->repl_eqcxt_rs = NULL; + } + + if (r->repl_eqcxt_tr) { +- slapi_eq_cancel(r->repl_eqcxt_tr); ++ slapi_eq_cancel_rel(r->repl_eqcxt_tr); + r->repl_eqcxt_tr = NULL; + } + +@@ -1511,14 +1511,14 @@ replica_set_enabled(Replica *r, PRBool enable) + if (enable) { + if (r->repl_eqcxt_rs == NULL) /* event is not already registered */ + { +- r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name, +- slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); ++ r->repl_eqcxt_rs = slapi_eq_repeat_rel(replica_update_state, r->repl_name, ++ slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); + } + } else /* disable */ + { + if (r->repl_eqcxt_rs) /* event is still registerd */ + { +- slapi_eq_cancel(r->repl_eqcxt_rs); ++ slapi_eq_cancel_rel(r->repl_eqcxt_rs); + r->repl_eqcxt_rs = NULL; + } + } +@@ -3628,7 +3628,7 @@ replica_set_tombstone_reap_interval(Replica *r, long interval) + if (interval > 0 && r->repl_eqcxt_tr && r->tombstone_reap_interval != interval) { + int found; + +- found = slapi_eq_cancel(r->repl_eqcxt_tr); ++ found = slapi_eq_cancel_rel(r->repl_eqcxt_tr); + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "replica_set_tombstone_reap_interval - tombstone_reap event (interval=%" PRId64 ") was %s\n", + r->tombstone_reap_interval, (found ? "cancelled" : "not found")); +@@ -3636,7 +3636,7 @@ replica_set_tombstone_reap_interval(Replica *r, long interval) + } + r->tombstone_reap_interval = interval; + if (interval > 0 && r->repl_eqcxt_tr == NULL) { +- r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, r->repl_name, ++ r->repl_eqcxt_tr = slapi_eq_repeat_rel(eq_cb_reap_tombstones, r->repl_name, + slapi_current_rel_time_t() + r->tombstone_reap_interval, + 1000 * r->tombstone_reap_interval); + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, +diff --git a/ldap/servers/plugins/replication/repl5_schedule.c b/ldap/servers/plugins/replication/repl5_schedule.c +index 9539f4031..ca42df561 100644 +--- a/ldap/servers/plugins/replication/repl5_schedule.c ++++ b/ldap/servers/plugins/replication/repl5_schedule.c +@@ -550,7 +550,7 @@ schedule_window_state_change_event(Schedule *sch) + wakeup_time = PRTime2time_t(tm); + + /* schedule the event */ +- sch->pending_event = slapi_eq_once(window_state_changed, sch, wakeup_time); ++ sch->pending_event = slapi_eq_once_rel(window_state_changed, sch, wakeup_time); + + timestr = get_timestring(&wakeup_time); + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "%s: Update window will %s at %s\n", +@@ -593,7 +593,7 @@ static void + unschedule_window_state_change_event(Schedule *sch) + { + if (sch->pending_event) { +- slapi_eq_cancel(sch->pending_event); ++ slapi_eq_cancel_rel(sch->pending_event); + sch->pending_event = NULL; + } + } +diff --git a/ldap/servers/plugins/replication/windows_connection.c b/ldap/servers/plugins/replication/windows_connection.c +index ce0662544..5eca5fad1 100644 +--- a/ldap/servers/plugins/replication/windows_connection.c ++++ b/ldap/servers/plugins/replication/windows_connection.c +@@ -204,7 +204,7 @@ windows_conn_delete(Repl_Connection *conn) + PR_ASSERT(NULL != conn); + PR_Lock(conn->lock); + if (conn->linger_active) { +- if (slapi_eq_cancel(conn->linger_event) == 1) { ++ if (slapi_eq_cancel_rel(conn->linger_event) == 1) { + /* Event was found and cancelled. Destroy the connection object. */ + PR_Unlock(conn->lock); + destroy_it = PR_TRUE; +@@ -1052,7 +1052,7 @@ windows_conn_cancel_linger(Repl_Connection *conn) + "windows_conn_cancel_linger - %s: Cancelling linger on the connection\n", + agmt_get_long_name(conn->agmt)); + conn->linger_active = PR_FALSE; +- if (slapi_eq_cancel(conn->linger_event) == 1) { ++ if (slapi_eq_cancel_rel(conn->linger_event) == 1) { + conn->refcnt--; + } + conn->linger_event = NULL; +@@ -1129,7 +1129,7 @@ windows_conn_start_linger(Repl_Connection *conn) + agmt_get_long_name(conn->agmt)); + } else { + conn->linger_active = PR_TRUE; +- conn->linger_event = slapi_eq_once(linger_timeout, conn, now + conn->linger_time); ++ conn->linger_event = slapi_eq_once_rel(linger_timeout, conn, now + conn->linger_time); + conn->status = STATUS_LINGERING; + } + PR_Unlock(conn->lock); +@@ -1822,8 +1822,8 @@ repl5_start_debug_timeout(int *setlevel) + + if (s_debug_timeout && s_debug_level) { + time_t now = time(NULL); +- eqctx = slapi_eq_once(repl5_debug_timeout_callback, setlevel, +- s_debug_timeout + now); ++ eqctx = slapi_eq_once_rel(repl5_debug_timeout_callback, setlevel, ++ s_debug_timeout + now); + } + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= repl5_start_debug_timeout\n"); + return eqctx; +@@ -1837,7 +1837,7 @@ repl5_stop_debug_timeout(Slapi_Eq_Context eqctx, int *setlevel) + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> repl5_stop_debug_timeout\n"); + + if (eqctx && !*setlevel) { +- (void)slapi_eq_cancel(eqctx); ++ (void)slapi_eq_cancel_rel(eqctx); + } + + if (s_debug_timeout && s_debug_level && *setlevel) { +diff --git a/ldap/servers/plugins/replication/windows_inc_protocol.c b/ldap/servers/plugins/replication/windows_inc_protocol.c +index 3d548e5ed..c07a8180a 100644 +--- a/ldap/servers/plugins/replication/windows_inc_protocol.c ++++ b/ldap/servers/plugins/replication/windows_inc_protocol.c +@@ -132,7 +132,7 @@ windows_inc_delete(Private_Repl_Protocol **prpp) + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_inc_delete\n"); + /* First, stop the protocol if it isn't already stopped */ + /* Then, delete all resources used by the protocol */ +- rc = slapi_eq_cancel(dirsync); ++ rc = slapi_eq_cancel_rel(dirsync); + slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, + "windows_inc_delete - dirsync: %p, rval: %d\n", dirsync, rc); + /* if backoff is set, delete it (from EQ, as well) */ +@@ -324,12 +324,13 @@ windows_inc_run(Private_Repl_Protocol *prp) + if (interval != current_interval) { + current_interval = interval; + if (dirsync) { +- int rc = slapi_eq_cancel(dirsync); ++ int rc = slapi_eq_cancel_rel(dirsync); + slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, + "windows_inc_run - Cancelled dirsync: %p, rval: %d\n", + dirsync, rc); + } +- dirsync = slapi_eq_repeat(periodic_dirsync, (void *)prp, (time_t)0, interval); ++ dirsync = slapi_eq_repeat_rel(periodic_dirsync, (void *)prp, ++ slapi_current_rel_time_t(), interval); + slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, + "windows_inc_run - New dirsync: %p\n", dirsync); + } +diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c +index a3e16c4e1..12a395210 100644 +--- a/ldap/servers/plugins/retrocl/retrocl_trim.c ++++ b/ldap/servers/plugins/retrocl/retrocl_trim.c +@@ -460,10 +460,10 @@ retrocl_init_trimming(void) + ts.ts_s_initialized = 1; + retrocl_trimming = 1; + +- retrocl_trim_ctx = slapi_eq_repeat(retrocl_housekeeping, +- NULL, (time_t)0, +- /* in milliseconds */ +- trim_interval * 1000); ++ retrocl_trim_ctx = slapi_eq_repeat_rel(retrocl_housekeeping, ++ NULL, (time_t)0, ++ /* in milliseconds */ ++ trim_interval * 1000); + } + + /* +@@ -487,7 +487,7 @@ retrocl_stop_trimming(void) + */ + retrocl_trimming = 0; + if (retrocl_trim_ctx) { +- slapi_eq_cancel(retrocl_trim_ctx); ++ slapi_eq_cancel_rel(retrocl_trim_ctx); + retrocl_trim_ctx = NULL; + } + PR_DestroyLock(ts.ts_s_trim_mutex); +diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c +index 0071ed86a..7681e88ea 100644 +--- a/ldap/servers/slapd/daemon.c ++++ b/ldap/servers/slapd/daemon.c +@@ -1240,7 +1240,8 @@ slapd_daemon(daemon_ports_t *ports) + slapi_log_err(SLAPI_LOG_TRACE, "slapd_daemon", + "slapd shutting down - waiting for backends to close down\n"); + +- eq_stop(); ++ eq_stop(); /* deprecated */ ++ eq_stop_rel(); + if (!in_referral_mode) { + task_shutdown(); + uniqueIDGenCleanup(); +diff --git a/ldap/servers/slapd/eventq-deprecated.c b/ldap/servers/slapd/eventq-deprecated.c +new file mode 100644 +index 000000000..71a7bf8f5 +--- /dev/null ++++ b/ldap/servers/slapd/eventq-deprecated.c +@@ -0,0 +1,483 @@ ++/** BEGIN COPYRIGHT BLOCK ++ * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. ++ * Copyright (C) 2020 Red Hat, Inc. ++ * All rights reserved. ++ * ++ * License: GPL (version 3 or any later version). ++ * See LICENSE for details. ++ * END COPYRIGHT BLOCK **/ ++ ++#ifdef HAVE_CONFIG_H ++#include ++#endif ++ ++ ++/* ******************************************************** ++eventq-deprecated.c - Event queue/scheduling system. ++ ++There are 3 publicly-accessible entry points: ++ ++slapi_eq_once(): cause an event to happen exactly once ++slapi_eq_repeat(): cause an event to happen repeatedly ++slapi_eq_cancel(): cancel a pending event ++ ++There is also an initialization point which must be ++called by the server to initialize the event queue system: ++eq_start(), and an entry point used to shut down the system: ++eq_stop(). ++ ++These functions are now deprecated in favor of the functions ++in eventq.c which use MONOTONIC clocks instead of REALTIME ++clocks. ++*********************************************************** */ ++ ++#include "slap.h" ++#include "prlock.h" ++#include "prcvar.h" ++#include "prinit.h" ++ ++/* ++ * Private definition of slapi_eq_context. Only this ++ * module (eventq.c) should know about the layout of ++ * this structure. ++ */ ++typedef struct _slapi_eq_context ++{ ++ time_t ec_when; ++ time_t ec_interval; ++ slapi_eq_fn_t ec_fn; ++ void *ec_arg; ++ Slapi_Eq_Context ec_id; ++ struct _slapi_eq_context *ec_next; ++} slapi_eq_context; ++ ++/* ++ * Definition of the event queue. ++ */ ++typedef struct _event_queue ++{ ++ PRLock *eq_lock; ++ PRCondVar *eq_cv; ++ slapi_eq_context *eq_queue; ++} event_queue; ++ ++/* ++ * The event queue itself. ++ */ ++static event_queue eqs = {0}; ++static event_queue *eq = &eqs; ++ ++/* ++ * Thread ID of the main thread loop ++ */ ++static PRThread *eq_loop_tid = NULL; ++ ++/* ++ * Flags used to control startup/shutdown of the event queue ++ */ ++static int eq_running = 0; ++static int eq_stopped = 0; ++static int eq_initialized = 0; ++PRLock *ss_lock = NULL; ++PRCondVar *ss_cv = NULL; ++PRCallOnceType init_once = {0}; ++ ++/* Forward declarations */ ++static slapi_eq_context *eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); ++static void eq_enqueue(slapi_eq_context *newec); ++static slapi_eq_context *eq_dequeue(time_t now); ++static PRStatus eq_create(void); ++ ++ ++/* ******************************************************** */ ++ ++ ++/* ++ * slapi_eq_once: cause an event to happen exactly once. ++ * ++ * Arguments: ++ * fn: the function to call ++ * arg: an argument to pass to the called function ++ * when: the time that the function should be called ++ * Returns: ++ * slapi_eq_context - a handle to an opaque object which ++ * the caller can use to refer to this particular scheduled ++ * event. ++ */ ++Slapi_Eq_Context ++slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) ++{ ++ slapi_eq_context *tmp; ++ PR_ASSERT(eq_initialized); ++ if (!eq_stopped) { ++ ++ Slapi_Eq_Context id; ++ ++ tmp = eq_new(fn, arg, when, 0UL); ++ id = tmp->ec_id; ++ ++ eq_enqueue(tmp); ++ ++ /* After this point, may have */ ++ /* been freed, depending on the thread */ ++ /* scheduling. Too bad */ ++ ++ slapi_log_err(SLAPI_LOG_HOUSE, NULL, ++ "added one-time event id %p at time %ld\n", ++ id, when); ++ return (id); ++ } ++ return NULL; /* JCM - Not sure if this should be 0 or something else. */ ++} ++ ++ ++/* ++ * slapi_eq_repeat: cause an event to happen repeatedly. ++ * ++ * Arguments: ++ * fn: the function to call ++ * arg: an argument to pass to the called function ++ * when: the time that the function should first be called ++ * interval: the amount of time (in milliseconds) between ++ * successive calls to the function ++ * Returns: ++ * slapi_eq_context - a handle to an opaque object which ++ * the caller can use to refer to this particular scheduled ++ */ ++Slapi_Eq_Context ++slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) ++{ ++ slapi_eq_context *tmp; ++ PR_ASSERT(eq_initialized); ++ if (!eq_stopped) { ++ tmp = eq_new(fn, arg, when, interval); ++ eq_enqueue(tmp); ++ slapi_log_err(SLAPI_LOG_HOUSE, NULL, ++ "added repeating event id %p at time %ld, interval %lu\n", ++ tmp->ec_id, when, interval); ++ return (tmp->ec_id); ++ } ++ return NULL; /* JCM - Not sure if this should be 0 or something else. */ ++} ++ ++ ++/* ++ * slapi_eq_cancel: cancel a pending event. ++ * Arguments: ++ * ctx: the context of the event which should be de-scheduled ++ */ ++int ++slapi_eq_cancel(Slapi_Eq_Context ctx) ++{ ++ slapi_eq_context **p, *tmp = NULL; ++ int found = 0; ++ ++ PR_ASSERT(eq_initialized); ++ if (!eq_stopped) { ++ PR_Lock(eq->eq_lock); ++ p = &(eq->eq_queue); ++ while (!found && *p != NULL) { ++ if ((*p)->ec_id == ctx) { ++ tmp = *p; ++ *p = (*p)->ec_next; ++ slapi_ch_free((void **)&tmp); ++ found = 1; ++ } else { ++ p = &((*p)->ec_next); ++ } ++ } ++ PR_Unlock(eq->eq_lock); ++ } ++ slapi_log_err(SLAPI_LOG_HOUSE, NULL, ++ "cancellation of event id %p requested: %s\n", ++ ctx, found ? "cancellation succeeded" : "event not found"); ++ return found; ++} ++ ++ ++/* ++ * Construct a new ec structure ++ */ ++static slapi_eq_context * ++eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) ++{ ++ slapi_eq_context *retptr = (slapi_eq_context *)slapi_ch_calloc(1, sizeof(slapi_eq_context)); ++ ++ retptr->ec_fn = fn; ++ retptr->ec_arg = arg; ++ /* ++ * retptr->ec_when = when < now ? now : when; ++ * we used to amke this check, but it make no sense: when queued, if when ++ * has expired, we'll be executed anyway. save the cycles, and just set ++ * ec_when. ++ */ ++ retptr->ec_when = when; ++ retptr->ec_interval = interval == 0UL ? 0UL : (interval + 999) / 1000; ++ retptr->ec_id = (Slapi_Eq_Context)retptr; ++ return retptr; ++} ++ ++ ++/* ++ * Add a new event to the event queue. ++ */ ++static void ++eq_enqueue(slapi_eq_context *newec) ++{ ++ slapi_eq_context **p; ++ ++ PR_ASSERT(NULL != newec); ++ PR_Lock(eq->eq_lock); ++ /* Insert in order (sorted by start time) in the list */ ++ for (p = &(eq->eq_queue); *p != NULL; p = &((*p)->ec_next)) { ++ if ((*p)->ec_when > newec->ec_when) { ++ break; ++ } ++ } ++ if (NULL != *p) { ++ newec->ec_next = *p; ++ } else { ++ newec->ec_next = NULL; ++ } ++ *p = newec; ++ PR_NotifyCondVar(eq->eq_cv); /* wake up scheduler thread */ ++ PR_Unlock(eq->eq_lock); ++} ++ ++ ++/* ++ * If there is an event in the queue scheduled at time ++ * or before, dequeue it and return a pointer ++ * to it. Otherwise, return NULL. ++ */ ++static slapi_eq_context * ++eq_dequeue(time_t now) ++{ ++ slapi_eq_context *retptr = NULL; ++ ++ PR_Lock(eq->eq_lock); ++ if (NULL != eq->eq_queue && eq->eq_queue->ec_when <= now) { ++ retptr = eq->eq_queue; ++ eq->eq_queue = retptr->ec_next; ++ } ++ PR_Unlock(eq->eq_lock); ++ return retptr; ++} ++ ++ ++/* ++ * Call all events which are due to run. ++ * Note that if we've missed a schedule ++ * opportunity, we don't try to catch up ++ * by calling the function repeatedly. ++ */ ++static void ++eq_call_all(void) ++{ ++ slapi_eq_context *p; ++ time_t curtime = slapi_current_utc_time(); ++ ++ while ((p = eq_dequeue(curtime)) != NULL) { ++ /* Call the scheduled function */ ++ p->ec_fn(p->ec_when, p->ec_arg); ++ slapi_log_err(SLAPI_LOG_HOUSE, NULL, ++ "Event id %p called at %ld (scheduled for %ld)\n", ++ p->ec_id, curtime, p->ec_when); ++ if (0UL != p->ec_interval) { ++ /* This is a repeating event. Requeue it. */ ++ do { ++ p->ec_when += p->ec_interval; ++ } while (p->ec_when < curtime); ++ eq_enqueue(p); ++ } else { ++ slapi_ch_free((void **)&p); ++ } ++ } ++} ++ ++ ++/* ++ * The main event queue loop. ++ */ ++static void ++eq_loop(void *arg __attribute__((unused))) ++{ ++ while (eq_running) { ++ time_t curtime = slapi_current_utc_time(); ++ PRIntervalTime timeout; ++ int until; ++ PR_Lock(eq->eq_lock); ++ while (!((NULL != eq->eq_queue) && (eq->eq_queue->ec_when <= curtime))) { ++ if (!eq_running) { ++ PR_Unlock(eq->eq_lock); ++ goto bye; ++ } ++ /* Compute new timeout */ ++ if (NULL != eq->eq_queue) { ++ until = eq->eq_queue->ec_when - curtime; ++ timeout = PR_SecondsToInterval(until); ++ } else { ++ timeout = PR_INTERVAL_NO_TIMEOUT; ++ } ++ PR_WaitCondVar(eq->eq_cv, timeout); ++ curtime = slapi_current_utc_time(); ++ } ++ /* There is some work to do */ ++ PR_Unlock(eq->eq_lock); ++ eq_call_all(); ++ } ++bye: ++ eq_stopped = 1; ++ PR_Lock(ss_lock); ++ PR_NotifyAllCondVar(ss_cv); ++ PR_Unlock(ss_lock); ++} ++ ++ ++/* ++ * Allocate and initialize the event queue structures. ++ */ ++static PRStatus ++eq_create(void) ++{ ++ PR_ASSERT(NULL == eq->eq_lock); ++ if ((eq->eq_lock = PR_NewLock()) == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewLock failed\n"); ++ exit(1); ++ } ++ if ((eq->eq_cv = PR_NewCondVar(eq->eq_lock)) == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewCondVar failed\n"); ++ exit(1); ++ } ++ if ((ss_lock = PR_NewLock()) == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewLock failed\n"); ++ exit(1); ++ } ++ if ((ss_cv = PR_NewCondVar(ss_lock)) == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewCondVar failed\n"); ++ exit(1); ++ } ++ eq->eq_queue = NULL; ++ eq_initialized = 1; ++ return PR_SUCCESS; ++} ++ ++ ++/* ++ * eq_start: start the event queue system. ++ * ++ * This should be called exactly once. It will start a ++ * thread which wakes up periodically and schedules events. ++ */ ++void ++eq_start() ++{ ++ PR_ASSERT(eq_initialized); ++ eq_running = 1; ++ if ((eq_loop_tid = PR_CreateThread(PR_USER_THREAD, (VFP)eq_loop, ++ NULL, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, ++ SLAPD_DEFAULT_THREAD_STACKSIZE)) == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_start", "eq_loop PR_CreateThread failed\n"); ++ exit(1); ++ } ++ slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have started\n"); ++} ++ ++ ++/* ++ * eq_init: initialize the event queue system. ++ * ++ * This function should be called early in server startup. ++ * Once it has been called, the event queue will queue ++ * events, but will not fire any events. Once all of the ++ * server plugins have been started, the eq_start() ++ * function should be called, and events will then start ++ * to fire. ++ */ ++void ++eq_init() ++{ ++ if (!eq_initialized) { ++ if (PR_SUCCESS != PR_CallOnce(&init_once, eq_create)) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_init", "eq_create failed\n"); ++ } ++ } ++} ++ ++ ++/* ++ * eq_stop: shut down the event queue system. ++ * Does not return until event queue is fully ++ * shut down. ++ */ ++void ++eq_stop() ++{ ++ slapi_eq_context *p, *q; ++ ++ if (NULL == eq || NULL == eq->eq_lock) { /* never started */ ++ eq_stopped = 1; ++ return; ++ } ++ ++ eq_stopped = 0; ++ eq_running = 0; ++ /* ++ * Signal the eq thread function to stop, and wait until ++ * it acknowledges by setting eq_stopped. ++ */ ++ while (!eq_stopped) { ++ PR_Lock(eq->eq_lock); ++ PR_NotifyAllCondVar(eq->eq_cv); ++ PR_Unlock(eq->eq_lock); ++ PR_Lock(ss_lock); ++ PR_WaitCondVar(ss_cv, PR_MillisecondsToInterval(100)); ++ PR_Unlock(ss_lock); ++ } ++ (void)PR_JoinThread(eq_loop_tid); ++ /* ++ * XXXggood we don't free the actual event queue data structures. ++ * This is intentional, to allow enqueueing/cancellation of events ++ * even after event queue services have shut down (these are no-ops). ++ * The downside is that the event queue can't be stopped and restarted ++ * easily. ++ */ ++ PR_Lock(eq->eq_lock); ++ p = eq->eq_queue; ++ while (p != NULL) { ++ q = p->ec_next; ++ slapi_ch_free((void **)&p); ++ /* Some ec_arg could get leaked here in shutdown (e.g., replica_name) ++ * This can be fixed by specifying a flag when the context is queued. ++ * [After 6.2] ++ */ ++ p = q; ++ } ++ PR_Unlock(eq->eq_lock); ++ slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have shut down\n"); ++} ++ ++/* ++ * return arg (ec_arg) only if the context is in the event queue ++ */ ++void * ++slapi_eq_get_arg(Slapi_Eq_Context ctx) ++{ ++ slapi_eq_context **p; ++ ++ PR_ASSERT(eq_initialized); ++ if (eq && !eq_stopped) { ++ PR_Lock(eq->eq_lock); ++ p = &(eq->eq_queue); ++ while (p && *p != NULL) { ++ if ((*p)->ec_id == ctx) { ++ PR_Unlock(eq->eq_lock); ++ return (*p)->ec_arg; ++ } else { ++ p = &((*p)->ec_next); ++ } ++ } ++ PR_Unlock(eq->eq_lock); ++ } ++ return NULL; ++} +diff --git a/ldap/servers/slapd/eventq.c b/ldap/servers/slapd/eventq.c +index e1900724f..4c39e08cf 100644 +--- a/ldap/servers/slapd/eventq.c ++++ b/ldap/servers/slapd/eventq.c +@@ -17,14 +17,14 @@ eventq.c - Event queue/scheduling system. + + There are 3 publicly-accessible entry points: + +-slapi_eq_once(): cause an event to happen exactly once +-slapi_eq_repeat(): cause an event to happen repeatedly +-slapi_eq_cancel(): cancel a pending event ++slapi_eq_once_rel(): cause an event to happen exactly once ++slapi_eq_repeat_rel(): cause an event to happen repeatedly ++slapi_eq_cancel_rel(): cancel a pending event + + There is also an initialization point which must be + called by the server to initialize the event queue system: +-eq_start(), and an entry point used to shut down the system: +-eq_stop(). ++eq_start_rel(), and an entry point used to shut down the system: ++eq_stop_rel(). + *********************************************************** */ + + #include "slap.h" +@@ -60,36 +60,36 @@ typedef struct _event_queue + /* + * The event queue itself. + */ +-static event_queue eqs = {0}; +-static event_queue *eq = &eqs; ++static event_queue eqs_rel = {0}; ++static event_queue *eq_rel = &eqs_rel; + + /* + * Thread ID of the main thread loop + */ +-static PRThread *eq_loop_tid = NULL; ++static PRThread *eq_loop_rel_tid = NULL; + + /* + * Flags used to control startup/shutdown of the event queue + */ +-static int eq_running = 0; +-static int eq_stopped = 0; +-static int eq_initialized = 0; +-static pthread_mutex_t ss_lock; +-static pthread_cond_t ss_cv; +-PRCallOnceType init_once = {0}; ++static int eq_rel_running = 0; ++static int eq_rel_stopped = 0; ++static int eq_rel_initialized = 0; ++static pthread_mutex_t ss_rel_lock; ++static pthread_cond_t ss_rel_cv; ++PRCallOnceType init_once_rel = {0}; + + /* Forward declarations */ +-static slapi_eq_context *eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); +-static void eq_enqueue(slapi_eq_context *newec); +-static slapi_eq_context *eq_dequeue(time_t now); +-static PRStatus eq_create(void); ++static slapi_eq_context *eq_new_rel(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); ++static void eq_enqueue_rel(slapi_eq_context *newec); ++static slapi_eq_context *eq_dequeue_rel(time_t now); ++static PRStatus eq_create_rel(void); + + + /* ******************************************************** */ + + + /* +- * slapi_eq_once: cause an event to happen exactly once. ++ * slapi_eq_once_rel: cause an event to happen exactly once. + * + * Arguments: + * fn: the function to call +@@ -101,18 +101,18 @@ static PRStatus eq_create(void); + * event. + */ + Slapi_Eq_Context +-slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) ++slapi_eq_once_rel(slapi_eq_fn_t fn, void *arg, time_t when) + { + slapi_eq_context *tmp; +- PR_ASSERT(eq_initialized); +- if (!eq_stopped) { ++ PR_ASSERT(eq_rel_initialized); ++ if (!eq_rel_stopped) { + + Slapi_Eq_Context id; + +- tmp = eq_new(fn, arg, when, 0UL); ++ tmp = eq_new_rel(fn, arg, when, 0UL); + id = tmp->ec_id; + +- eq_enqueue(tmp); ++ eq_enqueue_rel(tmp); + + /* After this point, may have */ + /* been freed, depending on the thread */ +@@ -128,7 +128,7 @@ slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) + + + /* +- * slapi_eq_repeat: cause an event to happen repeatedly. ++ * slapi_eq_repeat_rel: cause an event to happen repeatedly. + * + * Arguments: + * fn: the function to call +@@ -141,13 +141,13 @@ slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) + * the caller can use to refer to this particular scheduled + */ + Slapi_Eq_Context +-slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) ++slapi_eq_repeat_rel(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) + { + slapi_eq_context *tmp; +- PR_ASSERT(eq_initialized); +- if (!eq_stopped) { +- tmp = eq_new(fn, arg, when, interval); +- eq_enqueue(tmp); ++ PR_ASSERT(eq_rel_initialized); ++ if (!eq_rel_stopped) { ++ tmp = eq_new_rel(fn, arg, when, interval); ++ eq_enqueue_rel(tmp); + slapi_log_err(SLAPI_LOG_HOUSE, NULL, + "added repeating event id %p at time %ld, interval %lu\n", + tmp->ec_id, when, interval); +@@ -158,20 +158,20 @@ slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval + + + /* +- * slapi_eq_cancel: cancel a pending event. ++ * slapi_eq_cancel_rel: cancel a pending event. + * Arguments: + * ctx: the context of the event which should be de-scheduled + */ + int +-slapi_eq_cancel(Slapi_Eq_Context ctx) ++slapi_eq_cancel_rel(Slapi_Eq_Context ctx) + { + slapi_eq_context **p, *tmp = NULL; + int found = 0; + +- PR_ASSERT(eq_initialized); +- if (!eq_stopped) { +- pthread_mutex_lock(&(eq->eq_lock)); +- p = &(eq->eq_queue); ++ PR_ASSERT(eq_rel_initialized); ++ if (!eq_rel_stopped) { ++ pthread_mutex_lock(&(eq_rel->eq_lock)); ++ p = &(eq_rel->eq_queue); + while (!found && *p != NULL) { + if ((*p)->ec_id == ctx) { + tmp = *p; +@@ -182,7 +182,7 @@ slapi_eq_cancel(Slapi_Eq_Context ctx) + p = &((*p)->ec_next); + } + } +- pthread_mutex_unlock(&(eq->eq_lock)); ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); + } + slapi_log_err(SLAPI_LOG_HOUSE, NULL, + "cancellation of event id %p requested: %s\n", +@@ -195,7 +195,7 @@ slapi_eq_cancel(Slapi_Eq_Context ctx) + * Construct a new ec structure + */ + static slapi_eq_context * +-eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) ++eq_new_rel(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) + { + slapi_eq_context *retptr = (slapi_eq_context *)slapi_ch_calloc(1, sizeof(slapi_eq_context)); + +@@ -218,14 +218,14 @@ eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) + * Add a new event to the event queue. + */ + static void +-eq_enqueue(slapi_eq_context *newec) ++eq_enqueue_rel(slapi_eq_context *newec) + { + slapi_eq_context **p; + + PR_ASSERT(NULL != newec); +- pthread_mutex_lock(&(eq->eq_lock)); ++ pthread_mutex_lock(&(eq_rel->eq_lock)); + /* Insert in order (sorted by start time) in the list */ +- for (p = &(eq->eq_queue); *p != NULL; p = &((*p)->ec_next)) { ++ for (p = &(eq_rel->eq_queue); *p != NULL; p = &((*p)->ec_next)) { + if ((*p)->ec_when > newec->ec_when) { + break; + } +@@ -236,8 +236,8 @@ eq_enqueue(slapi_eq_context *newec) + newec->ec_next = NULL; + } + *p = newec; +- pthread_cond_signal(&(eq->eq_cv)); /* wake up scheduler thread */ +- pthread_mutex_unlock(&(eq->eq_lock)); ++ pthread_cond_signal(&(eq_rel->eq_cv)); /* wake up scheduler thread */ ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); + } + + +@@ -247,16 +247,16 @@ eq_enqueue(slapi_eq_context *newec) + * to it. Otherwise, return NULL. + */ + static slapi_eq_context * +-eq_dequeue(time_t now) ++eq_dequeue_rel(time_t now) + { + slapi_eq_context *retptr = NULL; + +- pthread_mutex_lock(&(eq->eq_lock)); +- if (NULL != eq->eq_queue && eq->eq_queue->ec_when <= now) { +- retptr = eq->eq_queue; +- eq->eq_queue = retptr->ec_next; ++ pthread_mutex_lock(&(eq_rel->eq_lock)); ++ if (NULL != eq_rel->eq_queue && eq_rel->eq_queue->ec_when <= now) { ++ retptr = eq_rel->eq_queue; ++ eq_rel->eq_queue = retptr->ec_next; + } +- pthread_mutex_unlock(&(eq->eq_lock)); ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); + return retptr; + } + +@@ -268,12 +268,12 @@ eq_dequeue(time_t now) + * by calling the function repeatedly. + */ + static void +-eq_call_all(void) ++eq_call_all_rel(void) + { + slapi_eq_context *p; + time_t curtime = slapi_current_rel_time_t(); + +- while ((p = eq_dequeue(curtime)) != NULL) { ++ while ((p = eq_dequeue_rel(curtime)) != NULL) { + /* Call the scheduled function */ + p->ec_fn(p->ec_when, p->ec_arg); + slapi_log_err(SLAPI_LOG_HOUSE, NULL, +@@ -284,7 +284,7 @@ eq_call_all(void) + do { + p->ec_when += p->ec_interval; + } while (p->ec_when < curtime); +- eq_enqueue(p); ++ eq_enqueue_rel(p); + } else { + slapi_ch_free((void **)&p); + } +@@ -296,38 +296,38 @@ eq_call_all(void) + * The main event queue loop. + */ + static void +-eq_loop(void *arg __attribute__((unused))) ++eq_loop_rel(void *arg __attribute__((unused))) + { +- while (eq_running) { ++ while (eq_rel_running) { + time_t curtime = slapi_current_rel_time_t(); + int until; + +- pthread_mutex_lock(&(eq->eq_lock)); +- while (!((NULL != eq->eq_queue) && (eq->eq_queue->ec_when <= curtime))) { +- if (!eq_running) { +- pthread_mutex_unlock(&(eq->eq_lock)); ++ pthread_mutex_lock(&(eq_rel->eq_lock)); ++ while (!((NULL != eq_rel->eq_queue) && (eq_rel->eq_queue->ec_when <= curtime))) { ++ if (!eq_rel_running) { ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); + goto bye; + } + /* Compute new timeout */ +- if (NULL != eq->eq_queue) { ++ if (NULL != eq_rel->eq_queue) { + struct timespec current_time = slapi_current_rel_time_hr(); +- until = eq->eq_queue->ec_when - curtime; ++ until = eq_rel->eq_queue->ec_when - curtime; + current_time.tv_sec += until; +- pthread_cond_timedwait(&eq->eq_cv, &eq->eq_lock, ¤t_time); ++ pthread_cond_timedwait(&eq_rel->eq_cv, &eq_rel->eq_lock, ¤t_time); + } else { +- pthread_cond_wait(&eq->eq_cv, &eq->eq_lock); ++ pthread_cond_wait(&eq_rel->eq_cv, &eq_rel->eq_lock); + } + curtime = slapi_current_rel_time_t(); + } + /* There is some work to do */ +- pthread_mutex_unlock(&(eq->eq_lock)); +- eq_call_all(); ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); ++ eq_call_all_rel(); + } + bye: +- eq_stopped = 1; +- pthread_mutex_lock(&ss_lock); +- pthread_cond_broadcast(&ss_cv); +- pthread_mutex_unlock(&ss_lock); ++ eq_rel_stopped = 1; ++ pthread_mutex_lock(&ss_rel_lock); ++ pthread_cond_broadcast(&ss_rel_cv); ++ pthread_mutex_unlock(&ss_rel_lock); + } + + +@@ -335,73 +335,73 @@ bye: + * Allocate and initialize the event queue structures. + */ + static PRStatus +-eq_create(void) ++eq_create_rel(void) + { + pthread_condattr_t condAttr; + int rc = 0; + + /* Init the eventq mutex and cond var */ +- if (pthread_mutex_init(&eq->eq_lock, NULL) != 0) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ if (pthread_mutex_init(&eq_rel->eq_lock, NULL) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", + "Failed to create lock: error %d (%s)\n", + rc, strerror(rc)); + exit(1); + } + if ((rc = pthread_condattr_init(&condAttr)) != 0) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", + "Failed to create new condition attribute variable. error %d (%s)\n", + rc, strerror(rc)); + exit(1); + } + if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", + "Cannot set condition attr clock. error %d (%s)\n", + rc, strerror(rc)); + exit(1); + } +- if ((rc = pthread_cond_init(&eq->eq_cv, &condAttr)) != 0) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ if ((rc = pthread_cond_init(&eq_rel->eq_cv, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", + "Failed to create new condition variable. error %d (%s)\n", + rc, strerror(rc)); + exit(1); + } + + /* Init the "ss" mutex and condition var */ +- if (pthread_mutex_init(&ss_lock, NULL) != 0) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ if (pthread_mutex_init(&ss_rel_lock, NULL) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", + "Failed to create ss lock: error %d (%s)\n", + rc, strerror(rc)); + exit(1); + } +- if ((rc = pthread_cond_init(&ss_cv, &condAttr)) != 0) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ if ((rc = pthread_cond_init(&ss_rel_cv, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", + "Failed to create new ss condition variable. error %d (%s)\n", + rc, strerror(rc)); + exit(1); + } + pthread_condattr_destroy(&condAttr); /* no longer needed */ + +- eq->eq_queue = NULL; +- eq_initialized = 1; ++ eq_rel->eq_queue = NULL; ++ eq_rel_initialized = 1; + return PR_SUCCESS; + } + + + /* +- * eq_start: start the event queue system. ++ * eq_start_rel: start the event queue system. + * + * This should be called exactly once. It will start a + * thread which wakes up periodically and schedules events. + */ + void +-eq_start() ++eq_start_rel() + { +- PR_ASSERT(eq_initialized); +- eq_running = 1; +- if ((eq_loop_tid = PR_CreateThread(PR_USER_THREAD, (VFP)eq_loop, ++ PR_ASSERT(eq_rel_initialized); ++ eq_rel_running = 1; ++ if ((eq_loop_rel_tid = PR_CreateThread(PR_USER_THREAD, (VFP)eq_loop_rel, + NULL, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, + SLAPD_DEFAULT_THREAD_STACKSIZE)) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_start", "eq_loop PR_CreateThread failed\n"); ++ slapi_log_err(SLAPI_LOG_ERR, "eq_start_rel", "eq_loop_rel PR_CreateThread failed\n"); + exit(1); + } + slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have started\n"); +@@ -409,55 +409,55 @@ eq_start() + + + /* +- * eq_init: initialize the event queue system. ++ * eq_init_rel: initialize the event queue system. + * + * This function should be called early in server startup. + * Once it has been called, the event queue will queue + * events, but will not fire any events. Once all of the +- * server plugins have been started, the eq_start() ++ * server plugins have been started, the eq_start_rel() + * function should be called, and events will then start + * to fire. + */ + void +-eq_init() ++eq_init_rel() + { +- if (!eq_initialized) { +- if (PR_SUCCESS != PR_CallOnce(&init_once, eq_create)) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_init", "eq_create failed\n"); ++ if (!eq_rel_initialized) { ++ if (PR_SUCCESS != PR_CallOnce(&init_once_rel, eq_create_rel)) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_init_rel", "eq_create_rel failed\n"); + } + } + } + + + /* +- * eq_stop: shut down the event queue system. ++ * eq_stop_rel: shut down the event queue system. + * Does not return until event queue is fully + * shut down. + */ + void +-eq_stop() ++eq_stop_rel() + { + slapi_eq_context *p, *q; + +- if (NULL == eq) { /* never started */ +- eq_stopped = 1; ++ if (NULL == eq_rel) { /* never started */ ++ eq_rel_stopped = 1; + return; + } + +- eq_stopped = 0; +- eq_running = 0; ++ eq_rel_stopped = 0; ++ eq_rel_running = 0; + /* + * Signal the eq thread function to stop, and wait until +- * it acknowledges by setting eq_stopped. ++ * it acknowledges by setting eq_rel_stopped. + */ +- while (!eq_stopped) { ++ while (!eq_rel_stopped) { + struct timespec current_time = {0}; + +- pthread_mutex_lock(&(eq->eq_lock)); +- pthread_cond_broadcast(&(eq->eq_cv)); +- pthread_mutex_unlock(&(eq->eq_lock)); ++ pthread_mutex_lock(&(eq_rel->eq_lock)); ++ pthread_cond_broadcast(&(eq_rel->eq_cv)); ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); + +- pthread_mutex_lock(&ss_lock); ++ pthread_mutex_lock(&ss_rel_lock); + clock_gettime(CLOCK_MONOTONIC, ¤t_time); + if (current_time.tv_nsec + 100000000 > 1000000000) { + /* nanoseconds will overflow, adjust the seconds and nanoseconds */ +@@ -467,10 +467,10 @@ eq_stop() + } else { + current_time.tv_nsec += 100000000; /* 100 ms */ + } +- pthread_cond_timedwait(&ss_cv, &ss_lock, ¤t_time); +- pthread_mutex_unlock(&ss_lock); ++ pthread_cond_timedwait(&ss_rel_cv, &ss_rel_lock, ¤t_time); ++ pthread_mutex_unlock(&ss_rel_lock); + } +- (void)PR_JoinThread(eq_loop_tid); ++ (void)PR_JoinThread(eq_loop_rel_tid); + /* + * XXXggood we don't free the actual event queue data structures. + * This is intentional, to allow enqueueing/cancellation of events +@@ -478,8 +478,8 @@ eq_stop() + * The downside is that the event queue can't be stopped and restarted + * easily. + */ +- pthread_mutex_lock(&(eq->eq_lock)); +- p = eq->eq_queue; ++ pthread_mutex_lock(&(eq_rel->eq_lock)); ++ p = eq_rel->eq_queue; + while (p != NULL) { + q = p->ec_next; + slapi_ch_free((void **)&p); +@@ -489,7 +489,7 @@ eq_stop() + */ + p = q; + } +- pthread_mutex_unlock(&(eq->eq_lock)); ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); + slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have shut down\n"); + } + +@@ -497,23 +497,23 @@ eq_stop() + * return arg (ec_arg) only if the context is in the event queue + */ + void * +-slapi_eq_get_arg(Slapi_Eq_Context ctx) ++slapi_eq_get_arg_rel(Slapi_Eq_Context ctx) + { + slapi_eq_context **p; + +- PR_ASSERT(eq_initialized); +- if (eq && !eq_stopped) { +- pthread_mutex_lock(&(eq->eq_lock)); +- p = &(eq->eq_queue); ++ PR_ASSERT(eq_rel_initialized); ++ if (eq_rel && !eq_rel_stopped) { ++ pthread_mutex_lock(&(eq_rel->eq_lock)); ++ p = &(eq_rel->eq_queue); + while (p && *p != NULL) { + if ((*p)->ec_id == ctx) { +- pthread_mutex_unlock(&(eq->eq_lock)); ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); + return (*p)->ec_arg; + } else { + p = &((*p)->ec_next); + } + } +- pthread_mutex_unlock(&(eq->eq_lock)); ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); + } + return NULL; + } +diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c +index 104f6826c..dbc8cec15 100644 +--- a/ldap/servers/slapd/main.c ++++ b/ldap/servers/slapd/main.c +@@ -979,7 +979,8 @@ main(int argc, char **argv) + fedse_create_startOK(DSE_FILENAME, DSE_STARTOKFILE, + slapdFrontendConfig->configdir); + +- eq_init(); /* must be done before plugins started */ ++ eq_init(); /* DEPRECATED */ ++ eq_init_rel(); /* must be done before plugins started */ + + /* Start the SNMP collator if counters are enabled. */ + if (config_get_slapi_counters()) { +@@ -1035,7 +1036,8 @@ main(int argc, char **argv) + goto cleanup; + } + +- eq_start(); /* must be done after plugins started */ ++ eq_start(); /* must be done after plugins started - DEPRECATED */ ++ eq_start_rel(); /* must be done after plugins started */ + + #ifdef HPUX10 + /* HPUX linker voodoo */ +@@ -2205,10 +2207,13 @@ slapd_exemode_db2ldif(int argc, char **argv, struct main_config *mcfg) + */ + plugin_get_plugin_dependencies(repl_plg_name, &plugin_list); + +- eq_init(); /* must be done before plugins started */ ++ eq_init(); /* must be done before plugins started - DEPRECATED */ ++ eq_init_rel(); /* must be done before plugins started */ ++ + ps_init_psearch_system(); /* must come before plugin_startall() */ + plugin_startall(argc, argv, plugin_list); +- eq_start(); /* must be done after plugins started */ ++ eq_start(); /* must be done after plugins started - DEPRECATED*/ ++ eq_start_rel(); /* must be done after plugins started */ + charray_free(plugin_list); + } + +@@ -2263,8 +2268,9 @@ slapd_exemode_db2ldif(int argc, char **argv, struct main_config *mcfg) + charray_free(mcfg->cmd_line_instance_names); + charray_free(mcfg->db2ldif_include); + if (mcfg->db2ldif_dump_replica) { +- eq_stop(); /* event queue should be shutdown before closing +- all plugins (especailly, replication plugin) */ ++ eq_stop(); /* DEPRECATED*/ ++ eq_stop_rel(); /* event queue should be shutdown before closing ++ all plugins (especially, replication plugin) */ + plugin_closeall(1 /* Close Backends */, 1 /* Close Globals */); + } + return (return_value); +diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h +index 3acc24f03..87080dd82 100644 +--- a/ldap/servers/slapd/proto-slap.h ++++ b/ldap/servers/slapd/proto-slap.h +@@ -1322,7 +1322,6 @@ void factory_destroy_extension(int type, void *object, void *parent, void **exte + /* + * auditlog.c + */ +- + void write_audit_log_entry(Slapi_PBlock *pb); + void auditlog_hide_unhashed_pw(void); + void auditlog_expose_unhashed_pw(void); +@@ -1334,10 +1333,15 @@ void auditfaillog_expose_unhashed_pw(void); + /* + * eventq.c + */ ++void eq_init_rel(void); ++void eq_start_rel(void); ++void eq_stop_rel(void); ++/* Deprecated eventq that uses REALTIME clock instead of MONOTONIC */ + void eq_init(void); + void eq_start(void); + void eq_stop(void); + ++ + /* + * uniqueidgen.c + */ +diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h +index 55ded5eb8..f76b86e3c 100644 +--- a/ldap/servers/slapd/slapi-plugin.h ++++ b/ldap/servers/slapd/slapi-plugin.h +@@ -6084,7 +6084,7 @@ void slapi_lock_mutex(Slapi_Mutex *mutex); + int slapi_unlock_mutex(Slapi_Mutex *mutex); + Slapi_CondVar *slapi_new_condvar(Slapi_Mutex *mutex); + void slapi_destroy_condvar(Slapi_CondVar *cvar); +-int slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout); ++int slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout) __attribute__((deprecated)); + int slapi_notify_condvar(Slapi_CondVar *cvar, int notify_all); + int slapi_wait_condvar_pt(Slapi_CondVar *cvar, Slapi_Mutex *mutex, struct timeval *timeout); + +@@ -8059,24 +8059,24 @@ typedef void (*slapi_eq_fn_t)(time_t when, void *arg); + * + * \param fn The function to call when the event is triggered. + * \param arg An argument to pass to the called function. +- * \param when The time that the function should be called. ++ * \param when The time that the function should be called(MONOTONIC clock). + * + * \return slapi_eq_context + */ +-Slapi_Eq_Context slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when); ++Slapi_Eq_Context slapi_eq_once_rel(slapi_eq_fn_t fn, void *arg, time_t when); + + /** + * Cause an event to happen repeatedly. + * + * \param fn The function to call when the vent is triggered. + * \param arg An argument to pass to the called function. +- * \param when The time that the function should be called. ++ * \param when The time that the function should be called(MONOTONIC clock). + * \param interval The amount of time (in milliseconds) between + * successive calls to the function. + * + * \return slapi_eq_context + */ +-Slapi_Eq_Context slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); ++Slapi_Eq_Context slapi_eq_repeat_rel(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); + + /** + * Cause a scheduled event to be canceled. +@@ -8086,7 +8086,7 @@ Slapi_Eq_Context slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsig + * \return 1 If event was found and canceled. + * \return 0 If event was not found in the queue. + */ +-int slapi_eq_cancel(Slapi_Eq_Context ctx); ++int slapi_eq_cancel_rel(Slapi_Eq_Context ctx); + + /** + * Return the event's argument. +@@ -8095,7 +8095,55 @@ int slapi_eq_cancel(Slapi_Eq_Context ctx); + * + * \return A pointer to the event argument. + */ +-void *slapi_eq_get_arg(Slapi_Eq_Context ctx); ++void *slapi_eq_get_arg_rel(Slapi_Eq_Context ctx); ++ ++/* ++ * These event queue functions are now DEPRECATED as they REALTIME clocks ++ * instead of the preferred MONOTONIC clocks. ++ */ ++ ++/** ++ * Cause an event to happen exactly once. ++ * ++ * \param fn The function to call when the event is triggered. ++ * \param arg An argument to pass to the called function. ++ * \param when The time that the function should be called(REALTIME clock). ++ * ++ * \return slapi_eq_context ++ */ ++Slapi_Eq_Context slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) __attribute__((deprecated)); ++ ++/** ++ * Cause an event to happen repeatedly. ++ * ++ * \param fn The function to call when the vent is triggered. ++ * \param arg An argument to pass to the called function. ++ * \param when The time that the function should be called(REALTIME clock). ++ * \param interval The amount of time (in milliseconds) between ++ * successive calls to the function. ++ * ++ * \return slapi_eq_context ++ */ ++Slapi_Eq_Context slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) __attribute__((deprecated)); ++ ++/** ++ * Cause a scheduled event to be canceled. ++ * ++ * \param ctx The event object to cancel ++ * ++ * \return 1 If event was found and canceled. ++ * \return 0 If event was not found in the queue. ++ */ ++int slapi_eq_cancel(Slapi_Eq_Context ctx) __attribute__((deprecated)); ++ ++/** ++ * Return the event's argument. ++ * ++ * \param ctx The event object ++ * ++ * \return A pointer to the event argument. ++ */ ++void *slapi_eq_get_arg(Slapi_Eq_Context ctx) __attribute__((deprecated)); + + /** + * Construct a full path and name of a plugin. +diff --git a/ldap/servers/slapd/slapi2runtime.c b/ldap/servers/slapd/slapi2runtime.c +index 85dc4c9a8..53927934a 100644 +--- a/ldap/servers/slapd/slapi2runtime.c ++++ b/ldap/servers/slapd/slapi2runtime.c +@@ -133,7 +133,7 @@ slapi_destroy_condvar(Slapi_CondVar *cvar) + + + /* +- * Function: slapi_wait_condvar ++ * Function: slapi_wait_condvar (DEPRECATED) + * Description: behaves just like PR_WaitCondVar() except timeout is + * in seconds and microseconds instead of PRIntervalTime units. + * If timeout is NULL, this call blocks indefinitely. +@@ -145,9 +145,26 @@ slapi_destroy_condvar(Slapi_CondVar *cvar) + int + slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout) + { +- /* deprecated in favor of slapi_wait_condvar_pt() which requires that the ++ /* Deprecated in favor of slapi_wait_condvar_pt() which requires that the + * mutex be passed in */ +- return (0); ++ PRIntervalTime prit; ++ ++ if (cvar == NULL) { ++ return (0); ++ } ++ ++ if (timeout == NULL) { ++ prit = PR_INTERVAL_NO_TIMEOUT; ++ } else { ++ prit = PR_SecondsToInterval(timeout->tv_sec) + PR_MicrosecondsToInterval(timeout->tv_usec); ++ } ++ ++ if (PR_WaitCondVar((PRCondVar *)cvar, prit) != PR_SUCCESS) { ++ return (0); ++ } ++ ++ return (1); ++ + } + + int +diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c +index 3dd3af657..d760515f4 100644 +--- a/ldap/servers/slapd/snmp_collator.c ++++ b/ldap/servers/slapd/snmp_collator.c +@@ -385,8 +385,9 @@ snmp_collator_start() + snmp_collator_init(); + + /* Arrange to be called back periodically to update the mmap'd stats file. */ +- snmp_eq_ctx = slapi_eq_repeat(snmp_collator_update, NULL, (time_t)0, +- SLAPD_SNMP_UPDATE_INTERVAL); ++ snmp_eq_ctx = slapi_eq_repeat_rel(snmp_collator_update, NULL, ++ slapi_current_rel_time_t(), ++ SLAPD_SNMP_UPDATE_INTERVAL); + return 0; + } + +@@ -411,7 +412,7 @@ snmp_collator_stop() + } + + /* Abort any pending events */ +- slapi_eq_cancel(snmp_eq_ctx); ++ slapi_eq_cancel_rel(snmp_eq_ctx); + snmp_collator_stopped = 1; + + /* acquire the semaphore */ +diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c +index 26f281cba..bded287c6 100644 +--- a/ldap/servers/slapd/task.c ++++ b/ldap/servers/slapd/task.c +@@ -387,7 +387,7 @@ slapi_task_status_changed(Slapi_Task *task) + ttl = (24*3600); /* be reasonable, allow to check task status not longer than one day */ + task->task_flags |= SLAPI_TASK_DESTROYING; + /* queue an event to destroy the state info */ +- slapi_eq_once(destroy_task, (void *)task, slapi_current_rel_time_t() + ttl); ++ slapi_eq_once_rel(destroy_task, (void *)task, slapi_current_rel_time_t() + ttl); + } + slapi_free_search_results_internal(pb); + slapi_pblock_destroy(pb); +diff --git a/ldap/servers/slapd/uuid.c b/ldap/servers/slapd/uuid.c +index a8bd6ee6c..31384a544 100644 +--- a/ldap/servers/slapd/uuid.c ++++ b/ldap/servers/slapd/uuid.c +@@ -186,7 +186,8 @@ uuid_init(const char *configDir, const Slapi_DN *configDN, PRBool mtGen) + + /* schedule update task for multithreaded generation */ + if (_state.mtGen) +- slapi_eq_repeat(uuid_update_state, NULL, (time_t)0, UPDATE_INTERVAL); ++ slapi_eq_repeat_rel(uuid_update_state, NULL, slapi_current_rel_time_t(), ++ UPDATE_INTERVAL); + + _state.initialized = PR_TRUE; + return UUID_SUCCESS; +-- +2.26.2 + diff --git a/SOURCES/0032-Backport-tests-from-master-branch-fix-failing-tests-.patch b/SOURCES/0032-Backport-tests-from-master-branch-fix-failing-tests-.patch new file mode 100644 index 0000000..1e49598 --- /dev/null +++ b/SOURCES/0032-Backport-tests-from-master-branch-fix-failing-tests-.patch @@ -0,0 +1,4208 @@ +From 0f309fee0e2b337ee333d9ce80a6c64d6f7161ef Mon Sep 17 00:00:00 2001 +From: Viktor Ashirov +Date: Thu, 12 Nov 2020 17:53:09 +0100 +Subject: [PATCH] Backport tests from master branch, fix failing tests (#4425) + +Relates: #2820 + +Reviewed by: mreynolds (Thanks!) +--- + dirsrvtests/tests/suites/acl/acivattr_test.py | 50 +-- + dirsrvtests/tests/suites/acl/acl_deny_test.py | 10 +- + dirsrvtests/tests/suites/acl/acl_test.py | 26 +- + .../acl/default_aci_allows_self_write.py | 4 +- + dirsrvtests/tests/suites/acl/deladd_test.py | 54 ++-- + .../suites/acl/enhanced_aci_modrnd_test.py | 22 +- + .../suites/acl/globalgroup_part2_test.py | 36 ++- + .../tests/suites/acl/globalgroup_test.py | 16 +- + .../tests/suites/acl/keywords_part2_test.py | 30 +- + dirsrvtests/tests/suites/acl/keywords_test.py | 71 ++--- + dirsrvtests/tests/suites/acl/misc_test.py | 104 +++--- + dirsrvtests/tests/suites/acl/modrdn_test.py | 180 +++++------ + dirsrvtests/tests/suites/acl/roledn_test.py | 4 +- + .../suites/acl/selfdn_permissions_test.py | 23 +- + dirsrvtests/tests/suites/acl/syntax_test.py | 56 ++-- + dirsrvtests/tests/suites/acl/userattr_test.py | 6 +- + .../tests/suites/acl/valueacl_part2_test.py | 107 ++++--- + dirsrvtests/tests/suites/acl/valueacl_test.py | 207 ++++++------ + dirsrvtests/tests/suites/basic/basic_test.py | 23 +- + .../tests/suites/ds_logs/ds_logs_test.py | 301 ++++++++++++++---- + .../filter/rfc3673_all_oper_attrs_test.py | 23 +- + .../suites/mapping_tree/acceptance_test.py | 65 ++++ + .../be_del_and_default_naming_attr_test.py | 17 +- + .../password/pwdPolicy_attribute_test.py | 9 +- + .../suites/replication/changelog_test.py | 6 +- + .../replication/conflict_resolve_test.py | 4 +- + .../tests/suites/replication/rfc2307compat.py | 174 ++++++++++ + dirsrvtests/tests/suites/roles/__init__.py | 3 + + dirsrvtests/tests/suites/roles/basic_test.py | 83 ++--- + .../tests/suites/sasl/regression_test.py | 21 +- + .../tests/suites/syncrepl_plugin/__init__.py | 163 ++++++++++ + .../suites/syncrepl_plugin/basic_test.py | 66 ++-- + .../tests/suites/vlv/regression_test.py | 2 +- + 33 files changed, 1319 insertions(+), 647 deletions(-) + create mode 100644 dirsrvtests/tests/suites/mapping_tree/acceptance_test.py + create mode 100644 dirsrvtests/tests/suites/replication/rfc2307compat.py + create mode 100644 dirsrvtests/tests/suites/roles/__init__.py + create mode 100644 dirsrvtests/tests/suites/syncrepl_plugin/__init__.py + +diff --git a/dirsrvtests/tests/suites/acl/acivattr_test.py b/dirsrvtests/tests/suites/acl/acivattr_test.py +index 35759f36e..d55eea023 100644 +--- a/dirsrvtests/tests/suites/acl/acivattr_test.py ++++ b/dirsrvtests/tests/suites/acl/acivattr_test.py +@@ -174,18 +174,19 @@ LDAPURL_ACI = '(targetattr="*")(version 3.0; acl "url"; allow (all) userdn="ldap + '(ENG_USER, ENG_MANAGER, LDAPURL_ACI)', + ]) + def test_positive(topo, _add_user, aci_of_user, user, entry, aci): +- """ +- :id: ba6d5e9c-786b-11e8-860d-8c16451d917b +- :parametrized: yes +- :setup: server +- :steps: +- 1. Add test entry +- 2. Add ACI +- 3. ACI role should be followed +- :expectedresults: +- 1. Entry should be added +- 2. Operation should succeed +- 3. Operation should succeed ++ """Positive testing of ACLs ++ ++ :id: ba6d5e9c-786b-11e8-860d-8c16451d917b ++ :parametrized: yes ++ :setup: server ++ :steps: ++ 1. Add test entry ++ 2. Add ACI ++ 3. ACI role should be followed ++ :expectedresults: ++ 1. Entry should be added ++ 2. Operation should succeed ++ 3. Operation should succeed + """ + # set aci + Domain(topo.standalone, DNBASE).set("aci", aci) +@@ -225,18 +226,19 @@ def test_positive(topo, _add_user, aci_of_user, user, entry, aci): + + ]) + def test_negative(topo, _add_user, aci_of_user, user, entry, aci): +- """ +- :id: c4c887c2-786b-11e8-a328-8c16451d917b +- :parametrized: yes +- :setup: server +- :steps: +- 1. Add test entry +- 2. Add ACI +- 3. ACI role should be followed +- :expectedresults: +- 1. Entry should be added +- 2. Operation should succeed +- 3. Operation should succeed ++ """Negative testing of ACLs ++ ++ :id: c4c887c2-786b-11e8-a328-8c16451d917b ++ :parametrized: yes ++ :setup: server ++ :steps: ++ 1. Add test entry ++ 2. Add ACI ++ 3. ACI role should be followed ++ :expectedresults: ++ 1. Entry should be added ++ 2. Operation should succeed ++ 3. Operation should not succeed + """ + # set aci + Domain(topo.standalone, DNBASE).set("aci", aci) +diff --git a/dirsrvtests/tests/suites/acl/acl_deny_test.py b/dirsrvtests/tests/suites/acl/acl_deny_test.py +index 8ea6cd27b..96d08e9da 100644 +--- a/dirsrvtests/tests/suites/acl/acl_deny_test.py ++++ b/dirsrvtests/tests/suites/acl/acl_deny_test.py +@@ -1,3 +1,11 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2020 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# + import logging + import pytest + import os +@@ -5,7 +13,7 @@ import ldap + import time + from lib389._constants import * + from lib389.topologies import topology_st as topo +-from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES ++from lib389.idm.user import UserAccount, TEST_USER_PROPERTIES + from lib389.idm.domain import Domain + + pytestmark = pytest.mark.tier1 +diff --git a/dirsrvtests/tests/suites/acl/acl_test.py b/dirsrvtests/tests/suites/acl/acl_test.py +index 5ca86523c..4c3214650 100644 +--- a/dirsrvtests/tests/suites/acl/acl_test.py ++++ b/dirsrvtests/tests/suites/acl/acl_test.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2016 Red Hat, Inc. ++# Copyright (C) 2020 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -14,9 +14,8 @@ from lib389.schema import Schema + from lib389.idm.domain import Domain + from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES + from lib389.idm.organizationalrole import OrganizationalRole, OrganizationalRoles +- + from lib389.topologies import topology_m2 +-from lib389._constants import SUFFIX, DN_SCHEMA, DN_DM, DEFAULT_SUFFIX, PASSWORD ++from lib389._constants import SUFFIX, DN_DM, DEFAULT_SUFFIX, PASSWORD + + pytestmark = pytest.mark.tier1 + +@@ -243,6 +242,14 @@ def moddn_setup(topology_m2): + 'userpassword': BIND_PW}) + user.create(properties=user_props, basedn=SUFFIX) + ++ # Add anonymous read aci ++ ACI_TARGET = "(target = \"ldap:///%s\")(targetattr=\"*\")" % (SUFFIX) ++ ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ++ ACI_SUBJECT = " userdn = \"ldap:///anyone\";)" ++ ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT ++ suffix = Domain(m1, SUFFIX) ++ suffix.add('aci', ACI_BODY) ++ + # DIT for staging + m1.log.info("Add {}".format(STAGING_DN)) + o_roles.create(properties={'cn': STAGING_CN, 'description': "staging DIT"}) +@@ -411,7 +418,8 @@ def test_moddn_staging_prod(topology_m2, moddn_setup, + + + def test_moddn_staging_prod_9(topology_m2, moddn_setup): +- """ ++ """Test with nsslapd-moddn-aci set to off so that MODDN requires an 'add' aci. ++ + :id: 222dd7e8-7ff1-40b8-ad26-6f8e42fbfcd9 + :setup: MMR with two masters, + M1 - staging DIT +@@ -1061,10 +1069,12 @@ def test_mode_legacy_ger_with_moddn(topology_m2, moddn_setup): + @pytest.fixture(scope="module") + def rdn_write_setup(topology_m2): + topology_m2.ms["master1"].log.info("\n\n######## Add entry tuser ########\n") +- topology_m2.ms["master1"].add_s(Entry((SRC_ENTRY_DN, { +- 'objectclass': "top person".split(), +- 'sn': SRC_ENTRY_CN, +- 'cn': SRC_ENTRY_CN}))) ++ user = UserAccount(topology_m2.ms["master1"], SRC_ENTRY_DN) ++ user_props = TEST_USER_PROPERTIES.copy() ++ user_props.update({'sn': SRC_ENTRY_CN, ++ 'cn': SRC_ENTRY_CN, ++ 'userpassword': BIND_PW}) ++ user.create(properties=user_props, basedn=SUFFIX) + + + def test_rdn_write_get_ger(topology_m2, rdn_write_setup): +diff --git a/dirsrvtests/tests/suites/acl/default_aci_allows_self_write.py b/dirsrvtests/tests/suites/acl/default_aci_allows_self_write.py +index 5700abfba..9c7226b42 100644 +--- a/dirsrvtests/tests/suites/acl/default_aci_allows_self_write.py ++++ b/dirsrvtests/tests/suites/acl/default_aci_allows_self_write.py +@@ -21,7 +21,7 @@ pytestmark = pytest.mark.tier1 + USER_PASSWORD = "some test password" + NEW_USER_PASSWORD = "some new password" + +-@pytest.mark.skipif(default_paths.perl_enabled or ds_is_older('1.4.2.0'), reason="Default aci's in older versions do not support this functionality") ++@pytest.mark.skipif(ds_is_older('1.4.2.0'), reason="Default aci's in older versions do not support this functionality") + def test_acl_default_allow_self_write_nsuser(topology): + """ + Testing nsusers can self write and self read. This it a sanity test +@@ -80,7 +80,7 @@ def test_acl_default_allow_self_write_nsuser(topology): + self_ent.change_password(USER_PASSWORD, NEW_USER_PASSWORD) + + +-@pytest.mark.skipif(default_paths.perl_enabled or ds_is_older('1.4.2.0'), reason="Default aci's in older versions do not support this functionality") ++@pytest.mark.skipif(ds_is_older('1.4.2.0'), reason="Default aci's in older versions do not support this functionality") + def test_acl_default_allow_self_write_user(topology): + """ + Testing users can self write and self read. This it a sanity test +diff --git a/dirsrvtests/tests/suites/acl/deladd_test.py b/dirsrvtests/tests/suites/acl/deladd_test.py +index 45a66be94..afdc772d1 100644 +--- a/dirsrvtests/tests/suites/acl/deladd_test.py ++++ b/dirsrvtests/tests/suites/acl/deladd_test.py +@@ -86,8 +86,8 @@ def _add_user(request, topo): + + def test_allow_delete_access_to_groupdn(topo, _add_user, _aci_of_user): + +- """ +- Test allow delete access to groupdn ++ """Test allow delete access to groupdn ++ + :id: 7cf15992-68ad-11e8-85af-54e1ad30572c + :setup: topo.standalone + :steps: +@@ -124,8 +124,8 @@ def test_allow_delete_access_to_groupdn(topo, _add_user, _aci_of_user): + + def test_allow_add_access_to_anyone(topo, _add_user, _aci_of_user): + +- """ +- Test to allow add access to anyone ++ """Test to allow add access to anyone ++ + :id: 5ca31cc4-68e0-11e8-8666-8c16451d917b + :setup: topo.standalone + :steps: +@@ -160,8 +160,8 @@ def test_allow_add_access_to_anyone(topo, _add_user, _aci_of_user): + + def test_allow_delete_access_to_anyone(topo, _add_user, _aci_of_user): + +- """ +- Test to allow delete access to anyone ++ """Test to allow delete access to anyone ++ + :id: f5447c7e-68e1-11e8-84c4-8c16451d917b + :setup: server + :steps: +@@ -191,8 +191,8 @@ def test_allow_delete_access_to_anyone(topo, _add_user, _aci_of_user): + + def test_allow_delete_access_not_to_userdn(topo, _add_user, _aci_of_user): + +- """ +- Test to Allow delete access to != userdn ++ """Test to Allow delete access to != userdn ++ + :id: 00637f6e-68e3-11e8-92a3-8c16451d917b + :setup: server + :steps: +@@ -224,8 +224,8 @@ def test_allow_delete_access_not_to_userdn(topo, _add_user, _aci_of_user): + + def test_allow_delete_access_not_to_group(topo, _add_user, _aci_of_user): + +- """ +- Test to Allow delete access to != groupdn ++ """Test to Allow delete access to != groupdn ++ + :id: f58fc8b0-68e5-11e8-9313-8c16451d917b + :setup: server + :steps: +@@ -263,8 +263,8 @@ def test_allow_delete_access_not_to_group(topo, _add_user, _aci_of_user): + + def test_allow_add_access_to_parent(topo, _add_user, _aci_of_user): + +- """ +- Test to Allow add privilege to parent ++ """Test to Allow add privilege to parent ++ + :id: 9f099845-9dbc-412f-bdb9-19a5ea729694 + :setup: server + :steps: +@@ -299,8 +299,8 @@ def test_allow_add_access_to_parent(topo, _add_user, _aci_of_user): + + def test_allow_delete_access_to_parent(topo, _add_user, _aci_of_user): + +- """ +- Test to Allow delete access to parent ++ """Test to Allow delete access to parent ++ + :id: 2dd7f624-68e7-11e8-8591-8c16451d917b + :setup: server + :steps: +@@ -333,10 +333,10 @@ def test_allow_delete_access_to_parent(topo, _add_user, _aci_of_user): + new_user.delete() + + +-def test_allow_delete_access_to_dynamic_group(topo, _add_user, _aci_of_user): ++def test_allow_delete_access_to_dynamic_group(topo, _add_user, _aci_of_user, request): ++ ++ """Test to Allow delete access to dynamic group + +- """ +- Test to Allow delete access to dynamic group + :id: 14ffa452-68ed-11e8-a60d-8c16451d917b + :setup: server + :steps: +@@ -361,8 +361,8 @@ def test_allow_delete_access_to_dynamic_group(topo, _add_user, _aci_of_user): + + # Set ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ +- add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' +- f'(version 3.0; acl "$tet_thistest"; ' ++ add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' ++ f'(version 3.0; acl "{request.node.name}"; ' + f'allow (delete) (groupdn = "ldap:///{group.dn}"); )') + + # create connection with USER_WITH_ACI_DELADD +@@ -372,10 +372,10 @@ def test_allow_delete_access_to_dynamic_group(topo, _add_user, _aci_of_user): + UserAccount(conn, USER_DELADD).delete() + + +-def test_allow_delete_access_to_dynamic_group_uid(topo, _add_user, _aci_of_user): ++def test_allow_delete_access_to_dynamic_group_uid(topo, _add_user, _aci_of_user, request): ++ ++ """Test to Allow delete access to dynamic group + +- """ +- Test to Allow delete access to dynamic group + :id: 010a4f20-752a-4173-b763-f520c7a85b82 + :setup: server + :steps: +@@ -401,7 +401,7 @@ def test_allow_delete_access_to_dynamic_group_uid(topo, _add_user, _aci_of_user) + # Set ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})' +- f'(targetattr=uid)(version 3.0; acl "$tet_thistest"; ' ++ f'(targetattr="uid")(version 3.0; acl "{request.node.name}"; ' + f'allow (delete) (groupdn = "ldap:///{group.dn}"); )') + + # create connection with USER_WITH_ACI_DELADD +@@ -411,10 +411,10 @@ def test_allow_delete_access_to_dynamic_group_uid(topo, _add_user, _aci_of_user) + UserAccount(conn, USER_DELADD).delete() + + +-def test_allow_delete_access_not_to_dynamic_group(topo, _add_user, _aci_of_user): ++def test_allow_delete_access_not_to_dynamic_group(topo, _add_user, _aci_of_user, request): ++ ++ """Test to Allow delete access to != dynamic group + +- """ +- Test to Allow delete access to != dynamic group + :id: 9ecb139d-bca8-428e-9044-fd89db5a3d14 + :setup: server + :steps: +@@ -439,7 +439,7 @@ def test_allow_delete_access_not_to_dynamic_group(topo, _add_user, _aci_of_user) + # Set ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})' +- f'(targetattr=*)(version 3.0; acl "$tet_thistest"; ' ++ f'(targetattr="*")(version 3.0; acl "{request.node.name}"; ' + f'allow (delete) (groupdn != "ldap:///{group.dn}"); )') + + # create connection with USER_WITH_ACI_DELADD +diff --git a/dirsrvtests/tests/suites/acl/enhanced_aci_modrnd_test.py b/dirsrvtests/tests/suites/acl/enhanced_aci_modrnd_test.py +index ca9456935..0cecde4b8 100644 +--- a/dirsrvtests/tests/suites/acl/enhanced_aci_modrnd_test.py ++++ b/dirsrvtests/tests/suites/acl/enhanced_aci_modrnd_test.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2016 Red Hat, Inc. ++# Copyright (C) 2020 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -31,15 +31,13 @@ def env_setup(topology_st): + + log.info("Add a container: %s" % CONTAINER_1) + topology_st.standalone.add_s(Entry((CONTAINER_1, +- {'objectclass': 'top', +- 'objectclass': 'organizationalunit', ++ {'objectclass': ['top','organizationalunit'], + 'ou': CONTAINER_1_OU, + }))) + + log.info("Add a container: %s" % CONTAINER_2) + topology_st.standalone.add_s(Entry((CONTAINER_2, +- {'objectclass': 'top', +- 'objectclass': 'organizationalunit', ++ {'objectclass': ['top', 'organizationalunit'], + 'ou': CONTAINER_2_OU, + }))) + +@@ -75,13 +73,13 @@ def test_enhanced_aci_modrnd(topology_st, env_setup): + :id: 492cf2a9-2efe-4e3b-955e-85eca61d66b9 + :setup: Standalone instance + :steps: +- 1. Create two containers +- 2. Create a user within "ou=test_ou_1,dc=example,dc=com" +- 3. Add an aci with a rule "cn=test_user is allowed all" within these containers +- 4. Run MODRDN operation on the "cn=test_user" and set "newsuperior" to +- the "ou=test_ou_2,dc=example,dc=com" +- 5. Check there is no user under container one (ou=test_ou_1,dc=example,dc=com) +- 6. Check there is a user under container two (ou=test_ou_2,dc=example,dc=com) ++ 1. Create two containers ++ 2. Create a user within "ou=test_ou_1,dc=example,dc=com" ++ 3. Add an aci with a rule "cn=test_user is allowed all" within these containers ++ 4. Run MODRDN operation on the "cn=test_user" and set "newsuperior" to ++ the "ou=test_ou_2,dc=example,dc=com" ++ 5. Check there is no user under container one (ou=test_ou_1,dc=example,dc=com) ++ 6. Check there is a user under container two (ou=test_ou_2,dc=example,dc=com) + + :expectedresults: + 1. Two containers should be created +diff --git a/dirsrvtests/tests/suites/acl/globalgroup_part2_test.py b/dirsrvtests/tests/suites/acl/globalgroup_part2_test.py +index b10fb1b65..7474f61f0 100644 +--- a/dirsrvtests/tests/suites/acl/globalgroup_part2_test.py ++++ b/dirsrvtests/tests/suites/acl/globalgroup_part2_test.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2019 Red Hat, Inc. ++# Copyright (C) 2020 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -70,6 +70,14 @@ def test_user(request, topo): + 'userPassword': PW_DM + }) + ++ # Add anonymous access aci ++ ACI_TARGET = "(targetattr=\"*\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ++ ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ++ ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" ++ ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT ++ suffix = Domain(topo.standalone, DEFAULT_SUFFIX) ++ suffix.add('aci', ANON_ACI) ++ + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, 'uid=GROUPDNATTRSCRATCHENTRY_GLOBAL,ou=nestedgroup') + for demo1 in ['c1', 'CHILD1_GLOBAL']: + uas.create(properties={ +@@ -112,7 +120,7 @@ def test_undefined_in_group_eval_five(topo, test_user, aci_of_user): + 5. Operation should succeed + """ + +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPF_GLOBAL)) ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPF_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER2_GLOBAL).bind(PW_DM) + # This aci should NOT allow access + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) +@@ -140,7 +148,7 @@ def test_undefined_in_group_eval_six(topo, test_user, aci_of_user): + 4. Operation should succeed + 5. Operation should succeed + """ +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{} || ldap:///{}" ;)'.format(GROUPH_GLOBAL, ALLGROUPS_GLOBAL)) ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{} || ldap:///{}" ;)'.format(GROUPH_GLOBAL, ALLGROUPS_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) + # test UNDEFINED in group + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) +@@ -168,7 +176,7 @@ def test_undefined_in_group_eval_seven(topo, test_user, aci_of_user): + 4. Operation should succeed + 5. Operation should succeed + """ +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPH_GLOBAL)) ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPH_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) + # test UNDEFINED in group + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) +@@ -196,7 +204,7 @@ def test_undefined_in_group_eval_eight(topo, test_user, aci_of_user): + 4. Operation should succeed + 5. Operation should succeed + """ +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{} || ldap:///{} || ldap:///{}" ;)'.format(GROUPH_GLOBAL, GROUPA_GLOBAL, ALLGROUPS_GLOBAL)) ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{} || ldap:///{} || ldap:///{}" ;)'.format(GROUPH_GLOBAL, GROUPA_GLOBAL, ALLGROUPS_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) + # test UNDEFINED in group + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) +@@ -224,7 +232,7 @@ def test_undefined_in_group_eval_nine(topo, test_user, aci_of_user): + 4. Operation should succeed + 5. Operation should succeed + """ +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{} || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPA_GLOBAL, GROUPH_GLOBAL)) ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{} || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPA_GLOBAL, GROUPH_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) + # test UNDEFINED in group + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) +@@ -252,7 +260,7 @@ def test_undefined_in_group_eval_ten(topo, test_user, aci_of_user): + 4. Operation should succeed + 5. Operation should succeed + """ +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) userattr = "description#GROUPDN";)') ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "description#GROUPDN";)') + user = UserAccount(topo.standalone, DEEPGROUPSCRATCHENTRY_GLOBAL) + user.add("description", [ALLGROUPS_GLOBAL, GROUPG_GLOBAL]) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) +@@ -281,7 +289,7 @@ def test_undefined_in_group_eval_eleven(topo, test_user, aci_of_user): + 4. Operation should succeed + 5. Operation should succeed + """ +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) not( userattr = "description#GROUPDN");)') ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) not( userattr = "description#GROUPDN");)') + user = UserAccount(topo.standalone, DEEPGROUPSCRATCHENTRY_GLOBAL) + user.add("description", [ALLGROUPS_GLOBAL, GROUPH_GLOBAL]) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) +@@ -312,7 +320,7 @@ def test_undefined_in_group_eval_twelve(topo, test_user, aci_of_user): + 4. Operation should succeed + 5. Operation should succeed + """ +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') + user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) + user.add("description", [ALLGROUPS_GLOBAL, GROUPD_GLOBAL]) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) +@@ -341,7 +349,7 @@ def test_undefined_in_group_eval_fourteen(topo, test_user, aci_of_user): + 4. Operation should succeed + 5. Operation should succeed + """ +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') + user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) + user.add("description", [ALLGROUPS_GLOBAL, GROUPG_GLOBAL]) + conn = UserAccount(topo.standalone, DEEPUSER2_GLOBAL).bind(PW_DM) +@@ -372,7 +380,7 @@ def test_undefined_in_group_eval_fifteen(topo, test_user, aci_of_user): + 4. Operation should succeed + 5. Operation should succeed + """ +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#USERDN";)') ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#USERDN";)') + UserAccount(topo.standalone, NESTEDGROUP_OU_GLOBAL).add("description", DEEPUSER_GLOBAL) + # Here do the same tests for userattr with the parent keyword. + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) +@@ -399,7 +407,7 @@ def test_undefined_in_group_eval_sixteen(topo, test_user, aci_of_user): + 5. Operation should succeed + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) +- domain.add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) not ( userattr = "parent[0,1].description#USERDN");)') ++ domain.add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) not ( userattr = "parent[0,1].description#USERDN");)') + domain.add("description", DEEPUSER_GLOBAL) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # Test with parent keyword with not key +@@ -427,7 +435,7 @@ def test_undefined_in_group_eval_seventeen(topo, test_user, aci_of_user): + 4. Operation should succeed + 5. Operation should succeed + """ +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') + user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) + # Test with the parent keyord + user.add("description", [ALLGROUPS_GLOBAL, GROUPD_GLOBAL]) +@@ -455,7 +463,7 @@ def test_undefined_in_group_eval_eighteen(topo, test_user, aci_of_user): + 4. Operation should succeed + 5. Operation should succeed + """ +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) not (userattr = "parent[0,1].description#GROUPDN" );)') ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) not (userattr = "parent[0,1].description#GROUPDN" );)') + user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) + # Test with parent keyword with not key + user.add("description", [ALLGROUPS_GLOBAL, GROUPH_GLOBAL]) +diff --git a/dirsrvtests/tests/suites/acl/globalgroup_test.py b/dirsrvtests/tests/suites/acl/globalgroup_test.py +index 58c4392e5..dc51a8170 100644 +--- a/dirsrvtests/tests/suites/acl/globalgroup_test.py ++++ b/dirsrvtests/tests/suites/acl/globalgroup_test.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2019 Red Hat, Inc. ++# Copyright (C) 2020 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -72,6 +72,14 @@ def test_user(request, topo): + 'userPassword': PW_DM + }) + ++ # Add anonymous access aci ++ ACI_TARGET = "(targetattr=\"*\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ++ ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ++ ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" ++ ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT ++ suffix = Domain(topo.standalone, DEFAULT_SUFFIX) ++ suffix.add('aci', ANON_ACI) ++ + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, 'ou=nestedgroup') + for demo1 in ['DEEPUSER_GLOBAL', 'scratchEntry', 'DEEPUSER2_GLOBAL', 'DEEPUSER1_GLOBAL', + 'DEEPUSER3_GLOBAL', 'GROUPDNATTRSCRATCHENTRY_GLOBAL', 'newChild']: +@@ -361,7 +369,7 @@ def test_undefined_in_group_eval_two(topo, test_user, aci_of_user): + 4. Operation should succeed + 5. Operation should succeed + """ +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPG_GLOBAL)) ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPG_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + # This aci should allow access +@@ -389,7 +397,7 @@ def test_undefined_in_group_eval_three(topo, test_user, aci_of_user): + 4. Operation should succeed + 5. Operation should succeed + """ +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(GROUPG_GLOBAL, ALLGROUPS_GLOBAL)) ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(GROUPG_GLOBAL, ALLGROUPS_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + user = Domain(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + # test UNDEFINED in group +@@ -417,7 +425,7 @@ def test_undefined_in_group_eval_four(topo, test_user, aci_of_user): + 4. Operation should succeed + 5. Operation should succeed + """ +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPG_GLOBAL)) ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPG_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER1_GLOBAL).bind(PW_DM) + # test UNDEFINED in group + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) +diff --git a/dirsrvtests/tests/suites/acl/keywords_part2_test.py b/dirsrvtests/tests/suites/acl/keywords_part2_test.py +index c2aa9ac53..642e65bad 100644 +--- a/dirsrvtests/tests/suites/acl/keywords_part2_test.py ++++ b/dirsrvtests/tests/suites/acl/keywords_part2_test.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2019 Red Hat, Inc. ++# Copyright (C) 2020 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -68,7 +68,7 @@ def test_access_from_certain_network_only_ip(topo, add_user, aci_of_user): + + # Add ACI + domain = Domain(topo.standalone, DEFAULT_SUFFIX) +- domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr=*)(version 3.0; aci "IP aci"; ' ++ domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr=\"*\")(version 3.0; aci "IP aci"; ' + f'allow(all)userdn = "ldap:///{NETSCAPEIP_KEY}" and ip = "{ip_ip}" ;)') + + # create a new connection for the test +@@ -76,12 +76,13 @@ def test_access_from_certain_network_only_ip(topo, add_user, aci_of_user): + # Perform Operation + org = OrganizationalUnit(conn, IP_OU_KEY) + org.replace("seeAlso", "cn=1") ++ + # remove the aci +- domain.ensure_removed("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr=*)(version 3.0; aci ' ++ domain.ensure_removed("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr=\"*\")(version 3.0; aci ' + f'"IP aci"; allow(all)userdn = "ldap:///{NETSCAPEIP_KEY}" and ' + f'ip = "{ip_ip}" ;)') + # Now add aci with new ip +- domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr=*)(version 3.0; aci "IP aci"; ' ++ domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr="*")(version 3.0; aci "IP aci"; ' + f'allow(all)userdn = "ldap:///{NETSCAPEIP_KEY}" and ip = "100.1.1.1" ;)') + + # After changing the ip user cant access data +@@ -106,10 +107,11 @@ def test_connectin_from_an_unauthorized_network(topo, add_user, aci_of_user): + """ + # Find the ip from ds logs , as we need to know the exact ip used by ds to run the instances. + ip_ip = topo.standalone.ds_access_log.match('.* connection from ')[0].split()[-1] ++ + # Add ACI + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")' +- f'(targetattr=*)(version 3.0; aci "IP aci"; ' ++ f'(targetattr="*")(version 3.0; aci "IP aci"; ' + f'allow(all) userdn = "ldap:///{NETSCAPEIP_KEY}" ' + f'and ip != "{ip_ip}" ;)') + +@@ -122,7 +124,7 @@ def test_connectin_from_an_unauthorized_network(topo, add_user, aci_of_user): + # Remove the ACI + domain.ensure_removed('aci', domain.get_attr_vals('aci')[-1]) + # Add new ACI +- domain.add('aci', f'(target = "ldap:///{IP_OU_KEY}")(targetattr=*)' ++ domain.add('aci', f'(target = "ldap:///{IP_OU_KEY}")(targetattr="*")' + f'(version 3.0; aci "IP aci"; allow(all) ' + f'userdn = "ldap:///{NETSCAPEIP_KEY}" and ip = "{ip_ip}" ;)') + +@@ -148,7 +150,7 @@ def test_ip_keyword_test_noip_cannot(topo, add_user, aci_of_user): + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target ="ldap:///{IP_OU_KEY}")' +- f'(targetattr=*)(version 3.0; aci "IP aci"; allow(all) ' ++ f'(targetattr="*")(version 3.0; aci "IP aci"; allow(all) ' + f'userdn = "ldap:///{FULLIP_KEY}" and ip = "*" ;)') + + # Create a new connection for this test. +@@ -177,7 +179,7 @@ def test_user_can_access_the_data_at_any_time(topo, add_user, aci_of_user): + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' +- f'(targetattr=*)(version 3.0; aci "Timeofday aci"; ' ++ f'(targetattr="*")(version 3.0; aci "Timeofday aci"; ' + f'allow(all) userdn ="ldap:///{FULLWORKER_KEY}" and ' + f'(timeofday >= "0000" and timeofday <= "2359") ;)') + +@@ -206,7 +208,7 @@ def test_user_can_access_the_data_only_in_the_morning(topo, add_user, aci_of_use + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' +- f'(targetattr=*)(version 3.0; aci "Timeofday aci"; ' ++ f'(targetattr="*")(version 3.0; aci "Timeofday aci"; ' + f'allow(all) userdn = "ldap:///{DAYWORKER_KEY}" ' + f'and timeofday < "1200" ;)') + +@@ -239,7 +241,7 @@ def test_user_can_access_the_data_only_in_the_afternoon(topo, add_user, aci_of_u + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' +- f'(targetattr=*)(version 3.0; aci "Timeofday aci"; ' ++ f'(targetattr="*")(version 3.0; aci "Timeofday aci"; ' + f'allow(all) userdn = "ldap:///{NIGHTWORKER_KEY}" ' + f'and timeofday > \'1200\' ;)') + +@@ -275,7 +277,7 @@ def test_timeofday_keyword(topo, add_user, aci_of_user): + # Add ACI + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' +- f'(targetattr=*)(version 3.0; aci "Timeofday aci"; ' ++ f'(targetattr="*")(version 3.0; aci "Timeofday aci"; ' + f'allow(all) userdn = "ldap:///{NOWORKER_KEY}" ' + f'and timeofday = \'{now_1}\' ;)') + +@@ -312,7 +314,7 @@ def test_dayofweek_keyword_test_everyday_can_access(topo, add_user, aci_of_user) + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{DAYOFWEEK_OU_KEY}")' +- f'(targetattr=*)(version 3.0; aci "Dayofweek aci"; ' ++ f'(targetattr="*")(version 3.0; aci "Dayofweek aci"; ' + f'allow(all) userdn = "ldap:///{EVERYDAY_KEY}" and ' + f'dayofweek = "Sun, Mon, Tue, Wed, Thu, Fri, Sat" ;)') + +@@ -342,7 +344,7 @@ def test_dayofweek_keyword_today_can_access(topo, add_user, aci_of_user): + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{DAYOFWEEK_OU_KEY}")' +- f'(targetattr=*)(version 3.0; aci "Dayofweek aci"; ' ++ f'(targetattr="*")(version 3.0; aci "Dayofweek aci"; ' + f'allow(all) userdn = "ldap:///{TODAY_KEY}" ' + f'and dayofweek = \'{today_1}\' ;)') + +@@ -371,7 +373,7 @@ def test_user_cannot_access_the_data_at_all(topo, add_user, aci_of_user): + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{DAYOFWEEK_OU_KEY}")' +- f'(targetattr=*)(version 3.0; aci "Dayofweek aci"; ' ++ f'(targetattr="*")(version 3.0; aci "Dayofweek aci"; ' + f'allow(all) userdn = "ldap:///{TODAY_KEY}" ' + f'and dayofweek = "$NEW_DATE" ;)') + +diff --git a/dirsrvtests/tests/suites/acl/keywords_test.py b/dirsrvtests/tests/suites/acl/keywords_test.py +index 138e3ede1..0174152e3 100644 +--- a/dirsrvtests/tests/suites/acl/keywords_test.py ++++ b/dirsrvtests/tests/suites/acl/keywords_test.py +@@ -39,11 +39,11 @@ NONE_2_KEY = "uid=NONE_2_KEY,{}".format(AUTHMETHOD_OU_KEY) + + + NONE_ACI_KEY = f'(target = "ldap:///{AUTHMETHOD_OU_KEY}")' \ +- f'(targetattr=*)(version 3.0; aci "Authmethod aci"; ' \ ++ f'(targetattr="*")(version 3.0; aci "Authmethod aci"; ' \ + f'allow(all) userdn = "ldap:///{NONE_1_KEY}" and authmethod = "none" ;)' + + SIMPLE_ACI_KEY = f'(target = "ldap:///{AUTHMETHOD_OU_KEY}")' \ +- f'(targetattr=*)(version 3.0; aci "Authmethod aci"; ' \ ++ f'(targetattr="*")(version 3.0; aci "Authmethod aci"; ' \ + f'allow(all) userdn = "ldap:///{SIMPLE_1_KEY}" and authmethod = "simple" ;)' + + +@@ -55,8 +55,7 @@ def _add_aci(topo, name): + + + def test_user_binds_with_a_password_and_can_access_the_data(topo, add_user, aci_of_user): +- """ +- User binds with a password and can access the data as per the ACI. ++ """User binds with a password and can access the data as per the ACI. + + :id: f6c4b6f0-7ac4-11e8-a517-8c16451d917b + :setup: Standalone Server +@@ -78,8 +77,7 @@ def test_user_binds_with_a_password_and_can_access_the_data(topo, add_user, aci_ + + + def test_user_binds_with_a_bad_password_and_cannot_access_the_data(topo, add_user, aci_of_user): +- """ +- User binds with a BAD password and cannot access the data . ++ """User binds with a BAD password and cannot access the data . + + :id: 0397744e-7ac5-11e8-bfb1-8c16451d917b + :setup: Standalone Server +@@ -98,8 +96,7 @@ def test_user_binds_with_a_bad_password_and_cannot_access_the_data(topo, add_use + + + def test_anonymous_user_cannot_access_the_data(topo, add_user, aci_of_user): +- """ +- Anonymous user cannot access the data ++ """Anonymous user cannot access the data + + :id: 0821a55c-7ac5-11e8-b214-8c16451d917b + :setup: Standalone Server +@@ -124,8 +121,7 @@ def test_anonymous_user_cannot_access_the_data(topo, add_user, aci_of_user): + + + def test_authenticated_but_has_no_rigth_on_the_data(topo, add_user, aci_of_user): +- """ +- User has a password. He is authenticated but has no rigth on the data. ++ """User has a password. He is authenticated but has no rigth on the data. + + :id: 11be7ebe-7ac5-11e8-b754-8c16451d917b + :setup: Standalone Server +@@ -150,10 +146,9 @@ def test_authenticated_but_has_no_rigth_on_the_data(topo, add_user, aci_of_user) + + + def test_the_bind_client_is_accessing_the_directory(topo, add_user, aci_of_user): +- """ +- The bind rule is evaluated to be true if the client is accessing the directory as per the ACI. ++ """The bind rule is evaluated to be true if the client is accessing the directory as per the ACI. + +- :id: 1715bfb2-7ac5-11e8-8f2c-8c16451d917b ++ :id: 1715bfb2-7ac5-11e8-8f2c-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry +@@ -175,8 +170,7 @@ def test_the_bind_client_is_accessing_the_directory(topo, add_user, aci_of_user) + + def test_users_binds_with_a_password_and_can_access_the_data( + topo, add_user, aci_of_user): +- """ +- User binds with a password and can access the data as per the ACI. ++ """User binds with a password and can access the data as per the ACI. + + :id: 1bd01cb4-7ac5-11e8-a2f1-8c16451d917b + :setup: Standalone Server +@@ -199,8 +193,7 @@ def test_users_binds_with_a_password_and_can_access_the_data( + + + def test_user_binds_without_any_password_and_cannot_access_the_data(topo, add_user, aci_of_user): +- """ +- User binds without any password and cannot access the data ++ """User binds without any password and cannot access the data + + :id: 205777fa-7ac5-11e8-ba2f-8c16451d917b + :setup: Standalone Server +@@ -227,8 +220,7 @@ def test_user_binds_without_any_password_and_cannot_access_the_data(topo, add_us + def test_user_can_access_the_data_when_connecting_from_any_machine( + topo, add_user, aci_of_user + ): +- """ +- User can access the data when connecting from any machine as per the ACI. ++ """User can access the data when connecting from any machine as per the ACI. + + :id: 28cbc008-7ac5-11e8-934e-8c16451d917b + :setup: Standalone Server +@@ -244,7 +236,7 @@ def test_user_can_access_the_data_when_connecting_from_any_machine( + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX)\ + .add("aci", f'(target ="ldap:///{DNS_OU_KEY}")' +- f'(targetattr=*)(version 3.0; aci "DNS aci"; allow(all) ' ++ f'(targetattr="*")(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{FULLDNS_KEY}" and dns = "*" ;)') + + # Create a new connection for this test. +@@ -256,8 +248,8 @@ def test_user_can_access_the_data_when_connecting_from_any_machine( + def test_user_can_access_the_data_when_connecting_from_internal_ds_network_only( + topo, add_user, aci_of_user + ): +- """ +- User can access the data when connecting from internal ICNC network only as per the ACI. ++ """User can access the data when connecting from internal ICNC network only as per the ACI. ++ + :id: 2cac2136-7ac5-11e8-8328-8c16451d917b + :setup: Standalone Server + :steps: +@@ -273,9 +265,9 @@ def test_user_can_access_the_data_when_connecting_from_internal_ds_network_only( + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", [f'(target = "ldap:///{DNS_OU_KEY}")' +- f'(targetattr=*)(version 3.0; aci "DNS aci"; ' ++ f'(targetattr="*")(version 3.0; aci "DNS aci"; ' + f'allow(all) userdn = "ldap:///{SUNDNS_KEY}" and dns = "*redhat.com" ;)', +- f'(target = "ldap:///{DNS_OU_KEY}")(targetattr=*)' ++ f'(target = "ldap:///{DNS_OU_KEY}")(targetattr="*")' + f'(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{SUNDNS_KEY}" and dns = "{dns_name}" ;)']) + +@@ -288,8 +280,7 @@ def test_user_can_access_the_data_when_connecting_from_internal_ds_network_only( + def test_user_can_access_the_data_when_connecting_from_some_network_only( + topo, add_user, aci_of_user + ): +- """ +- User can access the data when connecting from some network only as per the ACI. ++ """User can access the data when connecting from some network only as per the ACI. + + :id: 3098512a-7ac5-11e8-af85-8c16451d917b + :setup: Standalone Server +@@ -306,7 +297,7 @@ def test_user_can_access_the_data_when_connecting_from_some_network_only( + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX)\ + .add("aci", f'(target = "ldap:///{DNS_OU_KEY}")' +- f'(targetattr=*)(version 3.0; aci "DNS aci"; allow(all) ' ++ f'(targetattr="*")(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{NETSCAPEDNS_KEY}" ' + f'and dns = "{dns_name}" ;)') + +@@ -317,8 +308,7 @@ def test_user_can_access_the_data_when_connecting_from_some_network_only( + + + def test_from_an_unauthorized_network(topo, add_user, aci_of_user): +- """ +- User cannot access the data when connecting from an unauthorized network as per the ACI. ++ """User cannot access the data when connecting from an unauthorized network as per the ACI. + + :id: 34cf9726-7ac5-11e8-bc12-8c16451d917b + :setup: Standalone Server +@@ -334,7 +324,7 @@ def test_from_an_unauthorized_network(topo, add_user, aci_of_user): + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = "ldap:///{DNS_OU_KEY}")' +- f'(targetattr=*)(version 3.0; aci "DNS aci"; allow(all) ' ++ f'(targetattr="*")(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{NETSCAPEDNS_KEY}" and dns != "red.iplanet.com" ;)') + + # Create a new connection for this test. +@@ -345,8 +335,7 @@ def test_from_an_unauthorized_network(topo, add_user, aci_of_user): + + def test_user_cannot_access_the_data_when_connecting_from_an_unauthorized_network_2( + topo, add_user, aci_of_user): +- """ +- User cannot access the data when connecting from an unauthorized network as per the ACI. ++ """User cannot access the data when connecting from an unauthorized network as per the ACI. + + :id: 396bdd44-7ac5-11e8-8014-8c16451d917b + :setup: Standalone Server +@@ -362,7 +351,7 @@ def test_user_cannot_access_the_data_when_connecting_from_an_unauthorized_networ + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = "ldap:///{DNS_OU_KEY}")' +- f'(targetattr=*)(version 3.0; aci "DNS aci"; allow(all) ' ++ f'(targetattr="*")(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{NETSCAPEDNS_KEY}" ' + f'and dnsalias != "www.redhat.com" ;)') + +@@ -373,8 +362,8 @@ def test_user_cannot_access_the_data_when_connecting_from_an_unauthorized_networ + + + def test_user_cannot_access_the_data_if_not_from_a_certain_domain(topo, add_user, aci_of_user): +- """ +- User cannot access the data if not from a certain domain as per the ACI. ++ """User cannot access the data if not from a certain domain as per the ACI. ++ + :id: 3d658972-7ac5-11e8-930f-8c16451d917b + :setup: Standalone Server + :steps: +@@ -388,7 +377,7 @@ def test_user_cannot_access_the_data_if_not_from_a_certain_domain(topo, add_user + """ + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ +- add("aci", f'(target = "ldap:///{DNS_OU_KEY}")(targetattr=*)' ++ add("aci", f'(target = "ldap:///{DNS_OU_KEY}")(targetattr="*")' + f'(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{NODNS_KEY}" ' + f'and dns = "RAP.rock.SALSA.house.COM" ;)') +@@ -402,8 +391,7 @@ def test_user_cannot_access_the_data_if_not_from_a_certain_domain(topo, add_user + + + def test_dnsalias_keyword_test_nodns_cannot(topo, add_user, aci_of_user): +- """ +- Dnsalias Keyword NODNS_KEY cannot assess data as per the ACI. ++ """Dnsalias Keyword NODNS_KEY cannot assess data as per the ACI. + + :id: 41b467be-7ac5-11e8-89a3-8c16451d917b + :setup: Standalone Server +@@ -418,7 +406,7 @@ def test_dnsalias_keyword_test_nodns_cannot(topo, add_user, aci_of_user): + """ + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ +- add("aci", f'(target = "ldap:///{DNS_OU_KEY}")(targetattr=*)' ++ add("aci", f'(target = "ldap:///{DNS_OU_KEY}")(targetattr="*")' + f'(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{NODNS_KEY}" and ' + f'dnsalias = "RAP.rock.SALSA.house.COM" ;)') +@@ -434,8 +422,7 @@ def test_dnsalias_keyword_test_nodns_cannot(topo, add_user, aci_of_user): + @pytest.mark.bz1710848 + @pytest.mark.parametrize("ip_addr", ['127.0.0.1', "[::1]"]) + def test_user_can_access_from_ipv4_or_ipv6_address(topo, add_user, aci_of_user, ip_addr): +- """ +- User can modify the data when accessing the server from the allowed IPv4 and IPv6 addresses ++ """User can modify the data when accessing the server from the allowed IPv4 and IPv6 addresses + + :id: 461e761e-7ac5-11e8-9ae4-8c16451d917b + :parametrized: yes +@@ -451,7 +438,7 @@ def test_user_can_access_from_ipv4_or_ipv6_address(topo, add_user, aci_of_user, + """ + # Add ACI that contains both IPv4 and IPv6 + Domain(topo.standalone, DEFAULT_SUFFIX).\ +- add("aci", f'(target ="ldap:///{IP_OU_KEY}")(targetattr=*) ' ++ add("aci", f'(target ="ldap:///{IP_OU_KEY}")(targetattr="*") ' + f'(version 3.0; aci "IP aci"; allow(all) ' + f'userdn = "ldap:///{FULLIP_KEY}" and (ip = "127.0.0.1" or ip = "::1");)') + +diff --git a/dirsrvtests/tests/suites/acl/misc_test.py b/dirsrvtests/tests/suites/acl/misc_test.py +index 8f122b7a7..5f0e3eb72 100644 +--- a/dirsrvtests/tests/suites/acl/misc_test.py ++++ b/dirsrvtests/tests/suites/acl/misc_test.py +@@ -1,6 +1,6 @@ + """ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2019 RED Hat, Inc. ++# Copyright (C) 2020 RED Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -8,6 +8,7 @@ + # --- END COPYRIGHT BLOCK ---- + """ + ++import ldap + import os + import pytest + +@@ -21,8 +22,6 @@ from lib389.topologies import topology_st as topo + from lib389.idm.domain import Domain + from lib389.plugins import ACLPlugin + +-import ldap +- + pytestmark = pytest.mark.tier1 + + PEOPLE = "ou=PEOPLE,{}".format(DEFAULT_SUFFIX) +@@ -37,7 +36,19 @@ def aci_of_user(request, topo): + :param request: + :param topo: + """ +- aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') ++ ++ # Add anonymous access aci ++ ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ++ ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ++ ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" ++ ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT ++ suffix = Domain(topo.standalone, DEFAULT_SUFFIX) ++ try: ++ suffix.add('aci', ANON_ACI) ++ except ldap.TYPE_OR_VALUE_EXISTS: ++ pass ++ ++ aci_list = suffix.get_attr_vals('aci') + + def finofaci(): + """ +@@ -78,8 +89,8 @@ def clean(request, topo): + + + def test_accept_aci_in_addition_to_acl(topo, clean, aci_of_user): +- """ +- Misc Test 2 accept aci in addition to acl ++ """Misc Test 2 accept aci in addition to acl ++ + :id: 8e9408fa-7db8-11e8-adaa-8c16451d917b + :setup: Standalone Instance + :steps: +@@ -96,7 +107,7 @@ def test_accept_aci_in_addition_to_acl(topo, clean, aci_of_user): + for i in [('mail', 'anujborah@okok.com'), ('givenname', 'Anuj'), ('userPassword', PW_DM)]: + user.set(i[0], i[1]) + +- aci_target = "(targetattr=givenname)" ++ aci_target = '(targetattr="givenname")' + aci_allow = ('(version 3.0; acl "Name of the ACI"; deny (read, search, compare, write)') + aci_subject = 'userdn="ldap:///anyone";)' + Domain(topo.standalone, CONTAINER_1_DELADD).add("aci", aci_target + aci_allow + aci_subject) +@@ -115,9 +126,9 @@ def test_accept_aci_in_addition_to_acl(topo, clean, aci_of_user): + + @pytest.mark.bz334451 + def test_more_then_40_acl_will_crash_slapd(topo, clean, aci_of_user): +- """ +- bug 334451 : more then 40 acl will crash slapd ++ """bug 334451 : more then 40 acl will crash slapd + superseded by Bug 772778 - acl cache overflown problem with > 200 acis ++ + :id: 93a44c60-7db8-11e8-9439-8c16451d917b + :setup: Standalone Instance + :steps: +@@ -132,7 +143,7 @@ def test_more_then_40_acl_will_crash_slapd(topo, clean, aci_of_user): + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') + user = uas.create_test_user() + +- aci_target = '(target ="ldap:///{}")(targetattr !="userPassword")'.format(CONTAINER_1_DELADD) ++ aci_target = '(target ="ldap:///{}")(targetattr!="userPassword")'.format(CONTAINER_1_DELADD) + # more_then_40_acl_will not crash_slapd + for i in range(40): + aci_allow = '(version 3.0;acl "ACI_{}";allow (read, search, compare)'.format(i) +@@ -147,9 +158,9 @@ def test_more_then_40_acl_will_crash_slapd(topo, clean, aci_of_user): + + @pytest.mark.bz345643 + def test_search_access_should_not_include_read_access(topo, clean, aci_of_user): +- """ +- bug 345643 ++ """bug 345643 + Misc Test 4 search access should not include read access ++ + :id: 98ab173e-7db8-11e8-a309-8c16451d917b + :setup: Standalone Instance + :steps: +@@ -163,7 +174,7 @@ def test_search_access_should_not_include_read_access(topo, clean, aci_of_user): + """ + assert Domain(topo.standalone, DEFAULT_SUFFIX).present('aci') + Domain(topo.standalone, DEFAULT_SUFFIX)\ +- .add("aci", [f'(target ="ldap:///{DEFAULT_SUFFIX}")(targetattr !="userPassword")' ++ .replace("aci", [f'(target ="ldap:///{DEFAULT_SUFFIX}")(targetattr != "userPassword")' + '(version 3.0;acl "anonymous access";allow (search)' + '(userdn = "ldap:///anyone");)', + f'(target="ldap:///{DEFAULT_SUFFIX}") (targetattr = "*")(version 3.0; ' +@@ -176,13 +187,13 @@ def test_search_access_should_not_include_read_access(topo, clean, aci_of_user): + conn = Anonymous(topo.standalone).bind() + # search_access_should_not_include_read_access + suffix = Domain(conn, DEFAULT_SUFFIX) +- with pytest.raises(AssertionError): ++ with pytest.raises(Exception): + assert suffix.present('aci') + + + def test_only_allow_some_targetattr(topo, clean, aci_of_user): +- """ +- Misc Test 5 only allow some targetattr (1/2) ++ """Misc Test 5 only allow some targetattr (1/2) ++ + :id: 9d27f048-7db8-11e8-a71c-8c16451d917b + :setup: Standalone Instance + :steps: +@@ -211,17 +222,17 @@ def test_only_allow_some_targetattr(topo, clean, aci_of_user): + # aci will allow only mail targetattr + assert len(accounts.filter('(mail=*)')) == 2 + # aci will allow only mail targetattr +- assert not accounts.filter('(cn=*)') ++ assert not accounts.filter('(cn=*)', scope=1) + # with root no , blockage +- assert len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=*)')) == 2 ++ assert len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=*)', scope=1)) == 2 + + for i in uas.list(): + i.delete() + + +-def test_only_allow_some_targetattr_two(topo, clean, aci_of_user): +- """ +- Misc Test 6 only allow some targetattr (2/2)" ++def test_only_allow_some_targetattr_two(topo, clean, aci_of_user, request): ++ """Misc Test 6 only allow some targetattr (2/2)" ++ + :id: a188239c-7db8-11e8-903e-8c16451d917b + :setup: Standalone Instance + :steps: +@@ -244,15 +255,15 @@ def test_only_allow_some_targetattr_two(topo, clean, aci_of_user): + + Domain(topo.standalone, DEFAULT_SUFFIX).\ + replace("aci", '(target="ldap:///{}") (targetattr="mail||objectClass")' +- '(targetfilter="cn=Anuj") (version 3.0; acl "$tet_thistest"; ' ++ '(targetfilter="cn=Anuj") (version 3.0; acl "{}"; ' + 'allow (compare,read,search) ' +- '(userdn = "ldap:///anyone"); )'.format(DEFAULT_SUFFIX)) ++ '(userdn = "ldap:///anyone"); )'.format(DEFAULT_SUFFIX, request.node.name)) + + conn = UserAccount(topo.standalone, user.dn).bind(PW_DM) + # aci will allow only mail targetattr but only for cn=Anuj + account = Accounts(conn, DEFAULT_SUFFIX) +- assert len(account.filter('(mail=*)')) == 5 +- assert not account.filter('(cn=*)') ++ assert len(account.filter('(mail=*)', scope=1)) == 5 ++ assert not account.filter('(cn=*)', scope=1) + + for i in account.filter('(mail=*)'): + assert i.get_attr_val_utf8('mail') == 'anujborah@anujborah.com' +@@ -261,8 +272,8 @@ def test_only_allow_some_targetattr_two(topo, clean, aci_of_user): + conn = Anonymous(topo.standalone).bind() + # aci will allow only mail targetattr but only for cn=Anuj + account = Accounts(conn, DEFAULT_SUFFIX) +- assert len(account.filter('(mail=*)')) == 5 +- assert not account.filter('(cn=*)') ++ assert len(account.filter('(mail=*)', scope=1)) == 5 ++ assert not account.filter('(cn=*)', scope=1) + + for i in account.filter('(mail=*)'): + assert i.get_attr_val_utf8('mail') == 'anujborah@anujborah.com' +@@ -274,11 +285,10 @@ def test_only_allow_some_targetattr_two(topo, clean, aci_of_user): + i.delete() + + +- + @pytest.mark.bz326000 + def test_memberurl_needs_to_be_normalized(topo, clean, aci_of_user): +- """ +- Non-regression test for BUG 326000: MemberURL needs to be normalized ++ """Non-regression test for BUG 326000: MemberURL needs to be normalized ++ + :id: a5d172e6-7db8-11e8-aca7-8c16451d917b + :setup: Standalone Instance + :steps: +@@ -291,7 +301,7 @@ def test_memberurl_needs_to_be_normalized(topo, clean, aci_of_user): + 3. Operation should succeed + """ + ou_ou = OrganizationalUnit(topo.standalone, "ou=PEOPLE,{}".format(DEFAULT_SUFFIX)) +- ou_ou.set('aci', '(targetattr= *)' ++ ou_ou.set('aci', '(targetattr="*")' + '(version 3.0; acl "tester"; allow(all) ' + 'groupdn = "ldap:///cn =DYNGROUP,ou=PEOPLE, {}";)'.format(DEFAULT_SUFFIX)) + +@@ -323,8 +333,8 @@ def test_memberurl_needs_to_be_normalized(topo, clean, aci_of_user): + + @pytest.mark.bz624370 + def test_greater_than_200_acls_can_be_created(topo, clean, aci_of_user): +- """ +- Misc 10, check that greater than 200 ACLs can be created. Bug 624370 ++ """Misc 10, check that greater than 200 ACLs can be created. Bug 624370 ++ + :id: ac020252-7db8-11e8-8652-8c16451d917b + :setup: Standalone Instance + :steps: +@@ -355,8 +365,8 @@ def test_greater_than_200_acls_can_be_created(topo, clean, aci_of_user): + + @pytest.mark.bz624453 + def test_server_bahaves_properly_with_very_long_attribute_names(topo, clean, aci_of_user): +- """ +- Make sure the server bahaves properly with very long attribute names. Bug 624453. ++ """Make sure the server bahaves properly with very long attribute names. Bug 624453. ++ + :id: b0d31942-7db8-11e8-a833-8c16451d917b + :setup: Standalone Instance + :steps: +@@ -378,24 +388,23 @@ def test_server_bahaves_properly_with_very_long_attribute_names(topo, clean, aci + + + def test_do_bind_as_201_distinct_users(topo, clean, aci_of_user): +- """ +- Do bind as 201 distinct users +- Increase the nsslapd-aclpb-max-selected-acls in cn=ACL Plugin,cn=plugins,cn=config +- Restart the server +- Do bind as 201 distinct users ++ """Test bind as 201 distinct users ++ + :id: c0060532-7db8-11e8-a124-8c16451d917b + :setup: Standalone Instance + :steps: +- 1. Add test entry +- 2. Add ACI +- 3. User should follow ACI role ++ 1. Add test entries ++ 2. Increase the nsslapd-aclpb-max-selected-acls in cn=ACL Plugin,cn=plugins,cn=config ++ 3. Restart the server ++ 4. Do bind as 201 distinct users + :expectedresults: +- 1. Entry should be added +- 2. Operation should succeed +- 3. Operation should succeed ++ 1. Entries should be added ++ 2. Operation should succeed ++ 3. Operation should succeed ++ 4. Operation should succeed + """ + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX) +- for i in range(50): ++ for i in range(201): + user = uas.create_test_user(uid=i, gid=i) + user.set('userPassword', PW_DM) + +@@ -408,7 +417,6 @@ def test_do_bind_as_201_distinct_users(topo, clean, aci_of_user): + for i in range(len(uas.list())): + uas.list()[i].bind(PW_DM) + +- + if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) +diff --git a/dirsrvtests/tests/suites/acl/modrdn_test.py b/dirsrvtests/tests/suites/acl/modrdn_test.py +index f67f3e508..c4ae8eea5 100644 +--- a/dirsrvtests/tests/suites/acl/modrdn_test.py ++++ b/dirsrvtests/tests/suites/acl/modrdn_test.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2019 Red Hat, Inc. ++# Copyright (C) 2020 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -87,9 +87,9 @@ def _add_user(request, topo): + request.addfinalizer(fin) + + +-def test_allow_write_privilege_to_anyone(topo, _add_user, aci_of_user): +- """ +- Modrdn Test 1 Allow write privilege to anyone ++def test_allow_write_privilege_to_anyone(topo, _add_user, aci_of_user, request): ++ """Modrdn Test 1 Allow write privilege to anyone ++ + :id: 4406f12e-7932-11e8-9dea-8c16451d917b + :setup: server + :steps: +@@ -102,8 +102,8 @@ def test_allow_write_privilege_to_anyone(topo, _add_user, aci_of_user): + 3. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", +- '(target ="ldap:///{}")(targetattr=*)(version 3.0;acl "$tet_thistest";allow ' +- '(write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX)) ++ '(target ="ldap:///{}")(targetattr="*")(version 3.0;acl "{}";allow ' ++ '(write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX, request.node.name)) + conn = Anonymous(topo.standalone).bind() + # Allow write privilege to anyone + useraccount = UserAccount(conn, USER_WITH_ACI_DELADD) +@@ -115,22 +115,22 @@ def test_allow_write_privilege_to_anyone(topo, _add_user, aci_of_user): + + + def test_allow_write_privilege_to_dynamic_group_with_scope_set_to_base_in_ldap_url( +- topo, _add_user, aci_of_user ++ topo, _add_user, aci_of_user, request + ): ++ """Modrdn Test 2 Allow write privilege to DYNAMIC_MODRDN group with scope set to base in LDAP URL ++ ++ :id: 4c0f8c00-7932-11e8-8398-8c16451d917b ++ :setup: server ++ :steps: ++ 1. Add test entry ++ 2. Add ACI ++ 3. User should follow ACI role ++ :expectedresults: ++ 1. Entry should be added ++ 2. Operation should succeed ++ 3. Operation should succeed + """ +- Modrdn Test 2 Allow write privilege to DYNAMIC_MODRDN group with scope set to base in LDAP URL +- :id: 4c0f8c00-7932-11e8-8398-8c16451d917b +- :setup: server +- :steps: +- 1. Add test entry +- 2. Add ACI +- 3. User should follow ACI role +- :expectedresults: +- 1. Entry should be added +- 2. Operation should succeed +- 3. Operation should succeed +- """ +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(target = ldap:///{})(targetattr=*)(version 3.0; acl "$tet_thistest"; allow(all)(groupdn = "ldap:///{}"); )'.format(DEFAULT_SUFFIX, DYNAMIC_MODRDN)) ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(target = ldap:///{})(targetattr="*")(version 3.0; acl "{}"; allow(all)(groupdn = "ldap:///{}"); )'.format(DEFAULT_SUFFIX, request.node.name, DYNAMIC_MODRDN)) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # Allow write privilege to DYNAMIC_MODRDN group with scope set to base in LDAP URL + useraccount = UserAccount(conn, USER_DELADD) +@@ -141,22 +141,22 @@ def test_allow_write_privilege_to_dynamic_group_with_scope_set_to_base_in_ldap_u + assert 'cn=Jeff Vedder,ou=Product Development,dc=example,dc=com' == useraccount.dn + + +-def test_write_access_to_naming_atributes(topo, _add_user, aci_of_user): +- """ +- Test for write access to naming atributes (1) +- Test that check for add writes to the new naming attr +- :id: 532fc630-7932-11e8-8924-8c16451d917b +- :setup: server +- :steps: +- 1. Add test entry +- 2. Add ACI +- 3. User should follow ACI role +- :expectedresults: +- 1. Entry should be added +- 2. Operation should succeed +- 3. Operation should succeed ++def test_write_access_to_naming_atributes(topo, _add_user, aci_of_user, request): ++ """Test for write access to naming atributes ++ Test that check for add writes to the new naming attr ++ ++ :id: 532fc630-7932-11e8-8924-8c16451d917b ++ :setup: server ++ :steps: ++ 1. Add test entry ++ 2. Add ACI ++ 3. User should follow ACI role ++ :expectedresults: ++ 1. Entry should be added ++ 2. Operation should succeed ++ 3. Operation should succeed + """ +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target ="ldap:///{}")(targetattr != "uid")(version 3.0;acl "$tet_thistest";allow (write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX)) ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target ="ldap:///{}")(targetattr != "uid")(version 3.0;acl "{}";allow (write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX, request.node.name)) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + #Test for write access to naming atributes + useraccount = UserAccount(conn, USER_WITH_ACI_DELADD) +@@ -164,23 +164,23 @@ def test_write_access_to_naming_atributes(topo, _add_user, aci_of_user): + useraccount.rename("uid=Jeffbo Vedder") + + +-def test_write_access_to_naming_atributes_two(topo, _add_user, aci_of_user): +- """ +- Test for write access to naming atributes (2) +- :id: 5a2077d2-7932-11e8-9e7b-8c16451d917b +- :setup: server +- :steps: +- 1. Add test entry +- 2. Add ACI +- 3. User should follow ACI role +- 4. Now try to modrdn it to cn, won't work if request deleteoldrdn. +- :expectedresults: +- 1. Entry should be added +- 2. Operation should succeed +- 3. Operation should succeed +- 4. Operation should not succeed ++def test_write_access_to_naming_atributes_two(topo, _add_user, aci_of_user, request): ++ """Test for write access to naming atributes (2) ++ ++ :id: 5a2077d2-7932-11e8-9e7b-8c16451d917b ++ :setup: server ++ :steps: ++ 1. Add test entry ++ 2. Add ACI ++ 3. User should follow ACI role ++ 4. Now try to modrdn it to cn, won't work if request deleteoldrdn. ++ :expectedresults: ++ 1. Entry should be added ++ 2. Operation should succeed ++ 3. Operation should succeed ++ 4. Operation should not succeed + """ +- Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target ="ldap:///{}")(targetattr != "uid")(version 3.0;acl "$tet_thistest";allow (write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX)) ++ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target ="ldap:///{}")(targetattr != "uid")(version 3.0;acl "{}";allow (write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX, request.node.name)) + properties = { + 'uid': 'Sam Carter1', + 'cn': 'Sam Carter1', +@@ -202,22 +202,22 @@ def test_write_access_to_naming_atributes_two(topo, _add_user, aci_of_user): + + @pytest.mark.bz950351 + def test_access_aci_list_contains_any_deny_rule(topo, _add_user, aci_of_user): +- """ +- Testing bug #950351: RHDS denies MODRDN access if ACI list contains any DENY rule +- Bug description: If you create a deny ACI for some or more attributes there is incorrect behaviour +- as you cannot rename the entry anymore +- :id: 62cbbb8a-7932-11e8-96a7-8c16451d917b +- :setup: server +- :steps: +- 1. Add test entry +- 2. Adding a new ou ou=People to $BASEDN +- 3. Adding a user NEWENTRY9_MODRDN to ou=People,$BASEDN +- 4. Adding an allow rule for NEWENTRY9_MODRDN and for others an aci deny rule +- :expectedresults: +- 1. Entry should be added +- 2. Operation should succeed +- 3. Operation should succeed +- 4. Operation should succeed ++ """RHDS denies MODRDN access if ACI list contains any DENY rule ++ Bug description: If you create a deny ACI for some or more attributes there is incorrect behaviour ++ as you cannot rename the entry anymore ++ ++ :id: 62cbbb8a-7932-11e8-96a7-8c16451d917b ++ :setup: server ++ :steps: ++ 1. Add test entry ++ 2. Adding a new ou ou=People to $BASEDN ++ 3. Adding a user NEWENTRY9_MODRDN to ou=People,$BASEDN ++ 4. Adding an allow rule for NEWENTRY9_MODRDN and for others an aci deny rule ++ :expectedresults: ++ 1. Entry should be added ++ 2. Operation should succeed ++ 3. Operation should succeed ++ 4. Operation should succeed + """ + properties = { + 'uid': 'NEWENTRY9_MODRDN', +@@ -245,28 +245,28 @@ def test_access_aci_list_contains_any_deny_rule(topo, _add_user, aci_of_user): + + + def test_renaming_target_entry(topo, _add_user, aci_of_user): +- """ +- Test for renaming target entry +- :id: 6be1d33a-7932-11e8-9115-8c16451d917b +- :setup: server +- :steps: +- 1. Add test entry +- 2. Create a test user entry +- 3.Create a new ou entry with an aci +- 4. Make sure uid=$MYUID has the access +- 5. Rename ou=OU0 to ou=OU1 +- 6. Create another ou=OU2 +- 7. Move ou=OU1 under ou=OU2 +- 8. Make sure uid=$MYUID still has the access +- :expectedresults: +- 1. Entry should be added +- 2. Operation should succeed +- 3. Operation should succeed +- 4. Operation should succeed +- 5. Operation should succeed +- 6. Operation should succeed +- 7. Operation should succeed +- 8. Operation should succeed ++ """Test for renaming target entry ++ ++ :id: 6be1d33a-7932-11e8-9115-8c16451d917b ++ :setup: server ++ :steps: ++ 1. Add test entry ++ 2. Create a test user entry ++ 3. Create a new ou entry with an aci ++ 4. Make sure uid=$MYUID has the access ++ 5. Rename ou=OU0 to ou=OU1 ++ 6. Create another ou=OU2 ++ 7. Move ou=OU1 under ou=OU2 ++ 8. Make sure uid=$MYUID still has the access ++ :expectedresults: ++ 1. Entry should be added ++ 2. Operation should succeed ++ 3. Operation should succeed ++ 4. Operation should succeed ++ 5. Operation should succeed ++ 6. Operation should succeed ++ 7. Operation should succeed ++ 8. Operation should succeed + """ + properties = { + 'uid': 'TRAC340_MODRDN', +@@ -281,7 +281,7 @@ def test_renaming_target_entry(topo, _add_user, aci_of_user): + user.set("userPassword", "password") + ou = OrganizationalUnit(topo.standalone, 'ou=OU0,{}'.format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'OU0'}) +- ou.set('aci', '(targetattr=*)(version 3.0; acl "$MYUID";allow(read, search, compare) userdn = "ldap:///{}";)'.format(TRAC340_MODRDN)) ++ ou.set('aci', '(targetattr="*")(version 3.0; acl "$MYUID";allow(read, search, compare) userdn = "ldap:///{}";)'.format(TRAC340_MODRDN)) + conn = UserAccount(topo.standalone, TRAC340_MODRDN).bind(PW_DM) + assert OrganizationalUnits(conn, DEFAULT_SUFFIX).get('OU0') + # Test for renaming target entry +diff --git a/dirsrvtests/tests/suites/acl/roledn_test.py b/dirsrvtests/tests/suites/acl/roledn_test.py +index 227ebd95f..6ccd652cf 100644 +--- a/dirsrvtests/tests/suites/acl/roledn_test.py ++++ b/dirsrvtests/tests/suites/acl/roledn_test.py +@@ -78,10 +78,10 @@ def _add_user(request, topo): + f'(target="ldap:///{OR_RULE_ACCESS}")(targetattr="*")' + f'(version 3.0; aci "or role aci"; allow(all) ' + f'roledn = "ldap:///{ROLE1} || ldap:///{ROLE21}";)', +- f'(target="ldap:///{ALL_ACCESS}")(targetattr=*)' ++ f'(target="ldap:///{ALL_ACCESS}")(targetattr="*")' + f'(version 3.0; aci "anyone role aci"; allow(all) ' + f'roledn = "ldap:///anyone";)', +- f'(target="ldap:///{NOT_RULE_ACCESS}")(targetattr=*)' ++ f'(target="ldap:///{NOT_RULE_ACCESS}")(targetattr="*")' + f'(version 3.0; aci "not role aci"; allow(all)' + f'roledn != "ldap:///{ROLE1} || ldap:///{ROLE21}";)']) + +diff --git a/dirsrvtests/tests/suites/acl/selfdn_permissions_test.py b/dirsrvtests/tests/suites/acl/selfdn_permissions_test.py +index af7501338..dd506a786 100644 +--- a/dirsrvtests/tests/suites/acl/selfdn_permissions_test.py ++++ b/dirsrvtests/tests/suites/acl/selfdn_permissions_test.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2016 Red Hat, Inc. ++# Copyright (C) 2020 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -90,8 +90,8 @@ def test_selfdn_permission_add(topology_st, allow_user_init): + + :id: e837a9ef-be92-48da-ad8b-ebf42b0fede1 + :setup: Standalone instance, add a entry which is used to bind, +- enable acl error logging by setting 'nsslapd-errorlog-level' to '128', +- remove aci's to start with a clean slate, and add dummy entries ++ enable acl error logging by setting 'nsslapd-errorlog-level' to '128', ++ remove aci's to start with a clean slate, and add dummy entries + :steps: + 1. Check we can not ADD an entry without the proper SELFDN aci + 2. Check with the proper ACI we can not ADD with 'member' attribute +@@ -191,8 +191,8 @@ def test_selfdn_permission_search(topology_st, allow_user_init): + + :id: 06d51ef9-c675-4583-99b2-4852dbda190e + :setup: Standalone instance, add a entry which is used to bind, +- enable acl error logging by setting 'nsslapd-errorlog-level' to '128', +- remove aci's to start with a clean slate, and add dummy entries ++ enable acl error logging by setting 'nsslapd-errorlog-level' to '128', ++ remove aci's to start with a clean slate, and add dummy entries + :steps: + 1. Check we can not search an entry without the proper SELFDN aci + 2. Add proper ACI +@@ -217,7 +217,7 @@ def test_selfdn_permission_search(topology_st, allow_user_init): + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX +- ACI_TARGETATTR = "(targetattr = *)" ++ ACI_TARGETATTR = '(targetattr="*")' + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN search-read\"; allow (read, search, compare)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" +@@ -241,8 +241,8 @@ def test_selfdn_permission_modify(topology_st, allow_user_init): + + :id: 97a58844-095f-44b0-9029-dd29a7d83d68 + :setup: Standalone instance, add a entry which is used to bind, +- enable acl error logging by setting 'nsslapd-errorlog-level' to '128', +- remove aci's to start with a clean slate, and add dummy entries ++ enable acl error logging by setting 'nsslapd-errorlog-level' to '128', ++ remove aci's to start with a clean slate, and add dummy entries + :steps: + 1. Check we can not modify an entry without the proper SELFDN aci + 2. Add proper ACI +@@ -272,7 +272,7 @@ def test_selfdn_permission_modify(topology_st, allow_user_init): + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX +- ACI_TARGETATTR = "(targetattr = *)" ++ ACI_TARGETATTR = '(targetattr="*")' + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" +@@ -300,8 +300,8 @@ def test_selfdn_permission_delete(topology_st, allow_user_init): + + :id: 0ec4c0ec-e7b0-4ef1-8373-ab25aae34516 + :setup: Standalone instance, add a entry which is used to bind, +- enable acl error logging by setting 'nsslapd-errorlog-level' to '128', +- remove aci's to start with a clean slate, and add dummy entries ++ enable acl error logging by setting 'nsslapd-errorlog-level' to '128', ++ remove aci's to start with a clean slate, and add dummy entries + :steps: + 1. Check we can not delete an entry without the proper SELFDN aci + 2. Add proper ACI +@@ -309,6 +309,7 @@ def test_selfdn_permission_delete(topology_st, allow_user_init): + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful ++ 3. Operation should be successful + """ + topology_st.standalone.log.info("\n\n######################### DELETE ######################\n") + +diff --git a/dirsrvtests/tests/suites/acl/syntax_test.py b/dirsrvtests/tests/suites/acl/syntax_test.py +index c143ff7c9..b8f27480a 100644 +--- a/dirsrvtests/tests/suites/acl/syntax_test.py ++++ b/dirsrvtests/tests/suites/acl/syntax_test.py +@@ -1,12 +1,10 @@ +-""" + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2019 Red Hat, Inc. ++# Copyright (C) 2020 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). + # See LICENSE for details. + # --- END COPYRIGHT BLOCK ---- +-""" + + import os + import pytest +@@ -74,66 +72,66 @@ INVALID = [('test_targattrfilters_1', + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_19', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' +- f'(targetattr=*)' ++ f'(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny(write)gropdn="ldap:///anyone";)'), + ('test_targattrfilters_21', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' +- f'(targetattr=*)' ++ f'(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny(rite)userdn="ldap:///anyone";)'), + ('test_targattrfilters_22', + f'(targt = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' +- f'(targetattr=*)' ++ f'(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_23', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' +- f'(targetattr=*)' ++ f'(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; absolute (all)userdn="ldap:///anyone";)'), + ('test_Missing_acl_mispel', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' +- f'(targetattr=*)' ++ f'(targetattr="*")' + f'(version 3.0; alc "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Missing_acl_string', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' +- f'(targetattr=*)' ++ f'(targetattr="*")' + f'(version 3.0; "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Wrong_version_string', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' +- f'(targetattr=*)' ++ f'(targetattr="*")' + f'(version 2.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Missing_version_string', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' +- f'(targetattr=*)' ++ f'(targetattr="*")' + f'(; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Authenticate_statement', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr != "uid")' +- f'(targetattr=*)(version 3.0; acl "Name of the ACI"; deny absolute (all)' ++ f'(targetattr="*")(version 3.0; acl "Name of the ACI"; deny absolute (all)' + f'userdn="ldap:///anyone";)'), + ('test_Multiple_targets', + f'(target = ldap:///ou=Product Development,{DEFAULT_SUFFIX})' +- f'(target = ldap:///ou=Product Testing,{DEFAULT_SUFFIX})(targetattr=*)' ++ f'(target = ldap:///ou=Product Testing,{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Target_set_to_self', +- f'(target = ldap:///self)(targetattr=*)' ++ f'(target = ldap:///self)(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_target_set_with_ldap_instead_of_ldap', +- f'(target = ldap:\\\{DEFAULT_SUFFIX})(targetattr=*)' ++ f'(target = ldap:\\\{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_target_set_with_more_than_three', +- f'(target = ldap:////{DEFAULT_SUFFIX})(targetattr=*)' ++ f'(target = ldap:////{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_target_set_with_less_than_three', +- f'(target = ldap://{DEFAULT_SUFFIX})(targetattr=*)' ++ f'(target = ldap://{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_bind_rule_set_with_less_than_three', +- f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' ++ f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:/anyone";)'), + ('test_Use_semicolon_instead_of_comma_in_permission', +- f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' ++ f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny ' + f'(read; search; compare; write)userdn="ldap:///anyone";)'), + ('test_Use_double_equal_instead_of_equal_in_the_target', +- f'(target == ldap:///{DEFAULT_SUFFIX})(targetattr=*)' ++ f'(target == ldap:///{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_use_double_equal_instead_of_equal_in_user_and_group_access', + f'(target = ldap:///{DEFAULT_SUFFIX})' +@@ -143,21 +141,21 @@ INVALID = [('test_targattrfilters_1', + f'(target = ldap:///{DEFAULT_SUFFIX})' + f'(version 3.0; acl Name of the ACI ; deny absolute (all)userdn = "ldap:///anyone";)'), + ('test_extra_parentheses_case_1', +- f'( )(target = ldap:///{DEFAULT_SUFFIX}) (targetattr=*)' ++ f'( )(target = ldap:///{DEFAULT_SUFFIX}) (targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone";)'), + ('test_extra_parentheses_case_2', +- f'(((((target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' ++ f'(((((target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)' + f'userdn == "ldap:///anyone";)'), + ('test_extra_parentheses_case_3', +- f'(((target = ldap:///{DEFAULT_SUFFIX}) (targetattr=*)' ++ f'(((target = ldap:///{DEFAULT_SUFFIX}) (targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute ' + f'(all)userdn = "ldap:///anyone";)))'), + ('test_no_semicolon_at_the_end_of_the_aci', +- f'(target = ldap:///{DEFAULT_SUFFIX}) (targetattr=*)' ++ f'(target = ldap:///{DEFAULT_SUFFIX}) (targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone")'), + ('test_a_character_different_of_a_semicolon_at_the_end_of_the_aci', +- f'(target = ldap:///{DEFAULT_SUFFIX}) (targetattr=*)' ++ f'(target = ldap:///{DEFAULT_SUFFIX}) (targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone"%)'), + ('test_bad_filter', + f'(target = ldap:///{DEFAULT_SUFFIX}) ' +@@ -173,14 +171,14 @@ INVALID = [('test_targattrfilters_1', + + FAILED = [('test_targattrfilters_18', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' +- f'(targetattr=*)' ++ f'(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny(write)userdn="ldap:///{"123" * 300}";)'), + ('test_targattrfilters_20', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' +- f'(targetattr=*)' ++ f'(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny(write)userdns="ldap:///anyone";)'), + ('test_bind_rule_set_with_more_than_three', +- f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' ++ f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)' + f'userdn="ldap:////////anyone";)'), + ('test_Use_double_equal_instead_of_equal_in_the_targetattr', +@@ -253,7 +251,7 @@ def test_target_set_above_the_entry_test(topo): + domain = Domain(topo.standalone, "ou=People,{}".format(DEFAULT_SUFFIX)) + with pytest.raises(ldap.INVALID_SYNTAX): + domain.add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})' +- f'(targetattr=*)(version 3.0; acl "Name of the ACI"; deny absolute ' ++ f'(targetattr="*")(version 3.0; acl "Name of the ACI"; deny absolute ' + f'(all)userdn="ldap:///anyone";)') + + +diff --git a/dirsrvtests/tests/suites/acl/userattr_test.py b/dirsrvtests/tests/suites/acl/userattr_test.py +index 542d7afc9..3a13d32dc 100644 +--- a/dirsrvtests/tests/suites/acl/userattr_test.py ++++ b/dirsrvtests/tests/suites/acl/userattr_test.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2019 Red Hat, Inc. ++# Copyright (C) 2020 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -55,7 +55,7 @@ def _add_user(topo): + """ + This function will create user for the test and in the end entries will be deleted . + """ +- role_aci_body = '(targetattr=*)(version 3.0; aci "role aci"; allow(all)' ++ role_aci_body = '(targetattr="*")(version 3.0; aci "role aci"; allow(all)' + # Creating OUs + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou_accounting = ous.create(properties={'ou': 'Accounting'}) +@@ -77,7 +77,7 @@ def _add_user(topo): + 'description': LEVEL_1, + 'businessCategory': LEVEL_0}) + +- inheritance_aci_body = '(targetattr=*)(version 3.0; aci "Inheritance aci"; allow(all) ' ++ inheritance_aci_body = '(targetattr="*")(version 3.0; aci "Inheritance aci"; allow(all) ' + ou_inheritance.set('aci', [f'{inheritance_aci_body} ' + f'userattr = "parent[0].businessCategory#USERDN";)', + f'{inheritance_aci_body} ' +diff --git a/dirsrvtests/tests/suites/acl/valueacl_part2_test.py b/dirsrvtests/tests/suites/acl/valueacl_part2_test.py +index 5f5b1c64e..763c0b5a2 100644 +--- a/dirsrvtests/tests/suites/acl/valueacl_part2_test.py ++++ b/dirsrvtests/tests/suites/acl/valueacl_part2_test.py +@@ -28,6 +28,17 @@ HUMAN_OU_GLOBAL = "ou=Human Resources,{}".format(DEFAULT_SUFFIX) + + @pytest.fixture(scope="function") + def aci_of_user(request, topo): ++ # Add anonymous access aci ++ ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ++ ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ++ ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" ++ ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT ++ suffix = Domain(topo.standalone, DEFAULT_SUFFIX) ++ try: ++ suffix.add('aci', ANON_ACI) ++ except ldap.TYPE_OR_VALUE_EXISTS: ++ pass ++ + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): +@@ -107,10 +118,10 @@ def _add_user(request, topo): + request.addfinalizer(fin) + + +-def test_we_can_search_as_expected(topo, _add_user, aci_of_user): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) ++def test_we_can_search_as_expected(topo, _add_user, aci_of_user, request): ++ """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) + Test that we can search as expected ++ + :id: e845dbba-7aa9-11e8-8988-8c16451d917b + :setup: server + :steps: +@@ -124,8 +135,8 @@ def test_we_can_search_as_expected(topo, _add_user, aci_of_user): + """ + ACI_BODY = '(target="ldap:///cn=*,ou=Product Development, {}")' \ + '(targetfilter="cn=Jeff*")(targetattr="secretary || objectclass || mail")' \ +- '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "$tet_thistest"; ' \ +- 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX) ++ '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "{}"; ' \ ++ 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX, request.node.name) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = Anonymous(topo.standalone).bind() + # aci will allow secretary , mail , objectclass +@@ -135,11 +146,11 @@ def test_we_can_search_as_expected(topo, _add_user, aci_of_user): + assert user.get_attr_vals('objectclass') + + +-def test_we_can_mod_title_as_expected(topo, _add_user, aci_of_user): +- """ +- Testing the targattrfilters keyword that allows access control based on the ++def test_we_can_mod_title_as_expected(topo, _add_user, aci_of_user, request): ++ """Testing the targattrfilters keyword that allows access control based on the + value of the attributes being added (or deleted)) +- "Valueacl Test $tet_thistest Test search will work with targattrfilters present." ++ Test search will work with targattrfilters present. ++ + :id: f8c1ea88-7aa9-11e8-a55c-8c16451d917b + :setup: server + :steps: +@@ -153,8 +164,8 @@ def test_we_can_mod_title_as_expected(topo, _add_user, aci_of_user): + """ + ACI_BODY = '(target="ldap:///cn=*,ou=Product Development, {}")' \ + '(targetfilter="cn=Jeff*")(targetattr="secretary || objectclass || mail")' \ +- '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "$tet_thistest"; ' \ +- 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX) ++ '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "{}"; ' \ ++ 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX, request.node.name) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + # aci will not allow 'title', 'topdog' + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) +@@ -163,11 +174,11 @@ def test_we_can_mod_title_as_expected(topo, _add_user, aci_of_user): + user.add('title', 'topdog') + + +-def test_modify_with_multiple_filters(topo, _add_user, aci_of_user): +- """ +- Testing the targattrfilters keyword that allows access control based on the ++def test_modify_with_multiple_filters(topo, _add_user, aci_of_user, request): ++ """Testing the targattrfilters keyword that allows access control based on the + value of the attributes being added (or deleted)) +- "Valueacl Test $tet_thistest Allowed by multiple." ++ Allowed by multiple filters ++ + :id: fd9d223e-7aa9-11e8-a83b-8c16451d917b + :setup: server + :steps: +@@ -181,9 +192,9 @@ def test_modify_with_multiple_filters(topo, _add_user, aci_of_user): + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect) && secretary:' \ + '(secretary=cn=Meylan,{}), del=title:(title=architect) && secretary:' \ +- '(secretary=cn=Meylan,{})")(version 3.0; acl "$tet_thistest"; allow (write) ' \ ++ '(secretary=cn=Meylan,{})")(version 3.0; acl "{}"; allow (write) ' \ + '(userdn = "ldap:///anyone") ;)'.format( +- DEFAULT_SUFFIX, DEFAULT_SUFFIX ++ DEFAULT_SUFFIX, DEFAULT_SUFFIX, request.node.name + ) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) +@@ -195,11 +206,11 @@ def test_modify_with_multiple_filters(topo, _add_user, aci_of_user): + assert user.get_attr_val('secretary') + + +-def test_denied_by_multiple_filters(topo, _add_user, aci_of_user): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the ++def test_denied_by_multiple_filters(topo, _add_user, aci_of_user, request): ++ """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) +- "Valueacl Test $tet_thistest Denied by multiple filters." ++ Denied by multiple filters ++ + :id: 034c6c62-7aaa-11e8-8634-8c16451d917b + :setup: server + :steps: +@@ -213,8 +224,8 @@ def test_denied_by_multiple_filters(topo, _add_user, aci_of_user): + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect) && secretary:' \ + '(secretary=cn=Meylan,{}), del=title:(title=architect) && secretary:' \ +- '(secretary=cn=Meylan,{})")(version 3.0; acl "$tet_thistest"; allow (write) ' \ +- '(userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX, DEFAULT_SUFFIX) ++ '(secretary=cn=Meylan,{})")(version 3.0; acl "{}"; allow (write) ' \ ++ '(userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX, DEFAULT_SUFFIX, request.node.name) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # aci will allow title some attribute only +@@ -228,11 +239,11 @@ def test_denied_by_multiple_filters(topo, _add_user, aci_of_user): + user.add("secretary", "cn=Grenoble,dc=example,dc=com") + + +-def test_allowed_add_one_attribute(topo, _add_user, aci_of_user): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the ++def test_allowed_add_one_attribute(topo, _add_user, aci_of_user, request): ++ """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) +- "Valueacl Test $tet_thistest Allowed add one attribute (in presence of multiple filters)" ++ Allowed add one attribute (in presence of multiple filters) ++ + :id: 086c7f0c-7aaa-11e8-b69f-8c16451d917b + :setup: server + :steps: +@@ -245,9 +256,9 @@ def test_allowed_add_one_attribute(topo, _add_user, aci_of_user): + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect) && secretary:(secretary=cn=Meylan, {}), ' \ +- 'del=title:(title=architect) && secretary:(secretary=cn=Meylan, {})")(version 3.0; acl "$tet_thistest"; ' \ ++ 'del=title:(title=architect) && secretary:(secretary=cn=Meylan, {})")(version 3.0; acl "{}"; ' \ + 'allow (write) (userdn = "ldap:///{}") ;)'.format( +- DEFAULT_SUFFIX, DEFAULT_SUFFIX, USER_WITH_ACI_DELADD) ++ DEFAULT_SUFFIX, DEFAULT_SUFFIX, request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + user = UserAccount(conn, USER_DELADD) +@@ -258,12 +269,12 @@ def test_allowed_add_one_attribute(topo, _add_user, aci_of_user): + + + def test_cannot_add_an_entry_with_attribute_values_we_are_not_allowed_add( +- topo, _add_user, aci_of_user ++ topo, _add_user, aci_of_user, request + ): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the ++ """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) +- "Valueacl Test $tet_thistest Test not allowed add an entry" ++ Test not allowed add an entry ++ + :id: 0d0effee-7aaa-11e8-b673-8c16451d917b + :setup: server + :steps: +@@ -277,8 +288,8 @@ def test_cannot_add_an_entry_with_attribute_values_we_are_not_allowed_add( + """ + ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)) ' \ + '&& secretary:(secretary=cn=Meylan, {}), del=title:(|(title=engineer)(title=cool dude)' \ +- '(title=scum))")(version 3.0; aci "$tet_thistest"; allow (add) userdn = "ldap:///{}";)'.format( +- DEFAULT_SUFFIX, DEFAULT_SUFFIX) ++ '(title=scum))")(version 3.0; aci "{}"; allow (add) userdn = "ldap:///{}";)'.format( ++ DEFAULT_SUFFIX, request.node.name, DEFAULT_SUFFIX) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + properties = { + 'uid': 'FRED', +@@ -298,11 +309,11 @@ def test_cannot_add_an_entry_with_attribute_values_we_are_not_allowed_add( + user.add("objectclass", "person") + + +-def test_on_modrdn(topo, _add_user, aci_of_user): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the ++def test_on_modrdn(topo, _add_user, aci_of_user, request): ++ """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that valuacls kick in for modrdn operation. ++ + :id: 12985dde-7aaa-11e8-abde-8c16451d917b + :setup: server + :steps: +@@ -315,8 +326,8 @@ def test_on_modrdn(topo, _add_user, aci_of_user): + 3. Operation should succeed + """ + ACI_BODY = '(target="ldap:///cn=*,ou=Accounting,{}")(targattrfilters = "add=cn:(|(cn=engineer)), ' \ +- 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; aci "$tet_thistest"; ' \ +- 'allow (write) userdn = "ldap:///{}";)'.format(DEFAULT_SUFFIX, USER_WITH_ACI_DELADD) ++ 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; aci "{}"; ' \ ++ 'allow (write) userdn = "ldap:///{}";)'.format(DEFAULT_SUFFIX, request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # modrdn_s is not allowed with ou=OU1 +@@ -325,11 +336,11 @@ def test_on_modrdn(topo, _add_user, aci_of_user): + useraccount.rename("ou=OU1") + + +-def test_on_modrdn_allow(topo, _add_user, aci_of_user): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the attributes being ++def test_on_modrdn_allow(topo, _add_user, aci_of_user, request): ++ """Testing the targattrfilters keyword that allows access control based on the value of the attributes being + added (or deleted)) +- "Valueacl Test $tet_thistest Test modrdn still works (2)" ++ Test modrdn still works (2) ++ + :id: 17720562-7aaa-11e8-82ee-8c16451d917b + :setup: server + :steps: +@@ -342,8 +353,8 @@ def test_on_modrdn_allow(topo, _add_user, aci_of_user): + 3. Operation should succeed + """ + ACI_BODY = '(target="ldap:///{}")(targattrfilters = "add=cn:((cn=engineer)), del=cn:((cn=jonny))")' \ +- '(version 3.0; aci "$tet_thistest"; allow (write) ' \ +- 'userdn = "ldap:///{}";)'.format(DEFAULT_SUFFIX, USER_WITH_ACI_DELADD) ++ '(version 3.0; aci "{}"; allow (write) ' \ ++ 'userdn = "ldap:///{}";)'.format(DEFAULT_SUFFIX, request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + properties = { + 'uid': 'jonny', +@@ -364,12 +375,12 @@ def test_on_modrdn_allow(topo, _add_user, aci_of_user): + + @pytest.mark.bz979515 + def test_targattrfilters_keyword(topo): +- """ +- Testing the targattrfilters keyword that allows access control based on the value ++ """Testing the targattrfilters keyword that allows access control based on the value + of the attributes being added (or deleted)) + "Bug #979515 - ACLs inoperative in some search scenarios [rhel-6.5]" + "Bug #979516 is a clone for DS8.2 on RHEL5.9" + "Bug #979514 is a clone for RHEL6.4 zStream errata" ++ + :id: 23f9e9d0-7aaa-11e8-b16b-8c16451d917b + :setup: server + :steps: +diff --git a/dirsrvtests/tests/suites/acl/valueacl_test.py b/dirsrvtests/tests/suites/acl/valueacl_test.py +index 54bc13452..3bbbdcabb 100644 +--- a/dirsrvtests/tests/suites/acl/valueacl_test.py ++++ b/dirsrvtests/tests/suites/acl/valueacl_test.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2019 Red Hat, Inc. ++# Copyright (C) 2020 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -28,6 +28,17 @@ HUMAN_OU_GLOBAL = "ou=Human Resources,{}".format(DEFAULT_SUFFIX) + + @pytest.fixture(scope="function") + def aci_of_user(request, topo): ++ # Add anonymous access aci ++ ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ++ ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ++ ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" ++ ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT ++ suffix = Domain(topo.standalone, DEFAULT_SUFFIX) ++ try: ++ suffix.add('aci', ANON_ACI) ++ except ldap.TYPE_OR_VALUE_EXISTS: ++ pass ++ + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): +@@ -167,10 +178,10 @@ class _AddFREDWithRoot: + def test_delete_an_attribute_value_we_are_not_allowed_to_delete( + topo, _add_user, aci_of_user + ): +- """ +- Testing the targattrfilters keyword that allows access control based on the value ++ """Testing the targattrfilters keyword that allows access control based on the value + of the attributes being added (or deleted)) + Test that we can MODIFY:add an attribute value we are allowed to add ++ + :id: 7c41baa6-7aa9-11e8-9bdc-8c16451d917b + :setup: server + :steps: +@@ -192,12 +203,12 @@ def test_delete_an_attribute_value_we_are_not_allowed_to_delete( + + + def test_donot_allow_write_access_to_title_if_value_is_not_architect( +- topo, _add_user, aci_of_user ++ topo, _add_user, aci_of_user, request + ): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the ++ """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we cannot MODIFY:add an attribute value we are not allowed to add ++ + :id: 822c607e-7aa9-11e8-b2e7-8c16451d917b + :setup: server + :steps: +@@ -210,7 +221,7 @@ def test_donot_allow_write_access_to_title_if_value_is_not_architect( + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ +- '(version 3.0; acl "$tet_thistest"; allow (write) (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) ++ '(version 3.0; acl "{}"; allow (write) (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + # aci will allow to add title architect + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) +@@ -221,12 +232,12 @@ def test_donot_allow_write_access_to_title_if_value_is_not_architect( + + + def test_delete_an_attribute_value_we_are_allowed_to_delete( +- topo, _add_user, aci_of_user ++ topo, _add_user, aci_of_user, request + ): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of ++ """Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) +- Test that we can MODIFY:delete an attribute value we are allowed to delete, ++ Test that we can MODIFY:delete an attribute value we are allowed to delete ++ + :id: 86f36b34-7aa9-11e8-ab16-8c16451d917b + :setup: server + :steps: +@@ -239,7 +250,7 @@ def test_delete_an_attribute_value_we_are_allowed_to_delete( + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ +- '(version 3.0; acl "$tet_thistest"; allow (write) (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) ++ '(version 3.0; acl "{}"; allow (write) (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + # aci will allow to delete title architect +@@ -249,12 +260,12 @@ def test_delete_an_attribute_value_we_are_allowed_to_delete( + + + def test_delete_an_attribute_value_we_are_not_allowed_to_deleted( +- topo, _add_user, aci_of_user ++ topo, _add_user, aci_of_user, request + ): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the ++ """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) +- Test that we cannot MODIFY:delete an attribute value we are allowed to delete, ++ Test that we cannot MODIFY:delete an attribute value we are allowed to delete ++ + :id: 8c9f3a90-7aa9-11e8-bf2e-8c16451d917b + :setup: server + :steps: +@@ -267,7 +278,7 @@ def test_delete_an_attribute_value_we_are_not_allowed_to_deleted( + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ +- '(version 3.0; acl "$tet_thistest"; allow (write) (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) ++ '(version 3.0; acl "{}"; allow (write) (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "engineer").add() + # acl will not allow to delete title engineer +@@ -276,11 +287,11 @@ def test_delete_an_attribute_value_we_are_not_allowed_to_deleted( + _ModTitleArchitectJeffVedder(topo, "engineer", conn).delete() + + +-def test_allow_modify_replace(topo, _add_user, aci_of_user): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the ++def test_allow_modify_replace(topo, _add_user, aci_of_user, request): ++ """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we can MODIFY:replace an attribute if we have correct add/delete rights. ++ + :id: 9148a234-7aa9-11e8-a1f1-8c16451d917b + :setup: server + :steps: +@@ -293,8 +304,8 @@ def test_allow_modify_replace(topo, _add_user, aci_of_user): + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ +- '(title=idiot))")(version 3.0; acl "$tet_thistest"; ' \ +- 'allow (write) (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) ++ '(title=idiot))")(version 3.0; acl "{}"; ' \ ++ 'allow (write) (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "idiot").add() +@@ -305,11 +316,11 @@ def test_allow_modify_replace(topo, _add_user, aci_of_user): + _ModTitleArchitectJeffVedder(topo, "engineer", conn).delete() + + +-def test_allow_modify_delete(topo, _add_user, aci_of_user): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the ++def test_allow_modify_delete(topo, _add_user, aci_of_user, request): ++ """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) +- "Valueacl Test $tet_thistest Don't Allow modify:replace because of lack of delete rights" ++ Don't Allow modify:replace because of lack of delete rights ++ + :id: 962842d2-7aa9-11e8-b39e-8c16451d917b + :setup: server + :steps: +@@ -322,8 +333,8 @@ def test_allow_modify_delete(topo, _add_user, aci_of_user): + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect))")' \ +- '(version 3.0; acl "$tet_thistest"; allow (write) ' \ +- '(userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) ++ '(version 3.0; acl "{}"; allow (write) ' \ ++ '(userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "idiot").add() +@@ -335,11 +346,11 @@ def test_allow_modify_delete(topo, _add_user, aci_of_user): + _ModTitleArchitectJeffVedder(topo, "idiot", conn).delete() + + +-def test_replace_an_attribute_if_we_lack(topo, _add_user, aci_of_user): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the ++def test_replace_an_attribute_if_we_lack(topo, _add_user, aci_of_user, request): ++ """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we cannot MODIFY:replace an attribute if we lack ++ + :id: 9b1e6afa-7aa9-11e8-ac5b-8c16451d917b + :setup: server + :steps: +@@ -352,8 +363,8 @@ def test_replace_an_attribute_if_we_lack(topo, _add_user, aci_of_user): + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect))")' \ +- '(version 3.0; acl "$tet_thistest"; allow (write) ' \ +- '(userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) ++ '(version 3.0; acl "{}"; allow (write) ' \ ++ '(userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "idiot").add() +@@ -365,13 +376,13 @@ def test_replace_an_attribute_if_we_lack(topo, _add_user, aci_of_user): + + + def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_value( +- topo, _add_user, aci_of_user ++ topo, _add_user, aci_of_user, request + ): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the +- attributes being added (or deleted)) ++ """Testing the targattrfilters keyword that allows access control based on the value of the ++ attributes being added (or deleted)) + Test that we can use MODIFY:delete to entirely remove an attribute if we have del rights + to all attr values negative case tested next. ++ + :id: a0c9e0c4-7aa9-11e8-8880-8c16451d917b + :setup: server + :steps: +@@ -384,8 +395,8 @@ def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_value( + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ +- '(title=idiot))")(version 3.0; acl "$tet_thistest"; allow (write)' \ +- ' (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) ++ '(title=idiot))")(version 3.0; acl "{}"; allow (write)' \ ++ ' (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "idiot").add() +@@ -395,13 +406,13 @@ def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_value( + + + def test_remove_an_attribute_if_we_donot_have_del_rights_to_all_attr_value( +- topo, _add_user, aci_of_user ++ topo, _add_user, aci_of_user, request + ): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the ++ """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we can use MODIFY:delete to entirely remove an attribute if we have not del + rights to all attr values ++ + :id: a6862eaa-7aa9-11e8-8bf9-8c16451d917b + :setup: server + :steps: +@@ -414,8 +425,8 @@ def test_remove_an_attribute_if_we_donot_have_del_rights_to_all_attr_value( + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ +- '(title=idiot))")(version 3.0; acl "$tet_thistest"; allow (write) ' \ +- '(userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) ++ '(title=idiot))")(version 3.0; acl "{}"; allow (write) ' \ ++ '(userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "sailor").add() +@@ -426,12 +437,12 @@ def test_remove_an_attribute_if_we_donot_have_del_rights_to_all_attr_value( + + + def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_values( +- topo, _add_user, aci_of_user ++ topo, _add_user, aci_of_user, request + ): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the ++ """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we can use MODIFY:replace to entirely remove an attribute if we have del rights to all attr values ++ + :id: ab04c7e8-7aa9-11e8-84db-8c16451d917b + :setup: server + :steps: +@@ -444,8 +455,8 @@ def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_values( + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ +- '(title=idiot))")(version 3.0; acl "$tet_thistest"; allow (write) ' \ +- '(userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) ++ '(title=idiot))")(version 3.0; acl "{}"; allow (write) ' \ ++ '(userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "idiot").add() +@@ -455,12 +466,12 @@ def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_values( + + + def test_cantnot_delete_an_entry_with_attribute_values_we_are_not_allowed_delete( +- topo, _add_user, aci_of_user ++ topo, _add_user, aci_of_user, request + ): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of ++ """Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) +- Test we cannot DELETE an entry with attribute values we are not allowed delete, ++ Test we cannot DELETE an entry with attribute values we are not allowed delete ++ + :id: b525d94c-7aa9-11e8-8539-8c16451d917b + :setup: server + :steps: +@@ -474,7 +485,7 @@ def test_cantnot_delete_an_entry_with_attribute_values_we_are_not_allowed_delete + """ + ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)), ' \ + 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; ' \ +- 'aci "$tet_thistest"; allow (delete) userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) ++ 'aci "{}"; allow (delete) userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddFREDWithRoot(topo, "engineer", "cool dude", "ANuj").create() + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) +@@ -484,12 +495,12 @@ def test_cantnot_delete_an_entry_with_attribute_values_we_are_not_allowed_delete + + + def test_we_can_add_and_delete_an_entry_with_attribute_values_we_are_allowed_add_and_delete( +- topo, _add_user, aci_of_user ++ topo, _add_user, aci_of_user, request + ): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the ++ """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test we can DELETE an entry with attribute values we are allowed delete ++ + :id: ba138e54-7aa9-11e8-8037-8c16451d917b + :setup: server + :steps: +@@ -503,7 +514,7 @@ def test_we_can_add_and_delete_an_entry_with_attribute_values_we_are_allowed_add + """ + ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)), ' \ + 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; ' \ +- 'aci "$tet_thistest"; allow (delete) userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) ++ 'aci "{}"; allow (delete) userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddFREDWithRoot(topo, "engineer", "cool dude", "scum").create() + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) +@@ -511,12 +522,12 @@ def test_we_can_add_and_delete_an_entry_with_attribute_values_we_are_allowed_add + UserAccount(conn, FRED).delete() + + +-def test_allow_title(topo, _add_user, aci_of_user): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the ++def test_allow_title(topo, _add_user, aci_of_user, request): ++ """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that if attr appears in targetattr and in targattrfilters then targattrfilters + applies--ie. targattrfilters is a refinement of targattrfilters. ++ + :id: beadf328-7aa9-11e8-bb08-8c16451d917b + :setup: server + :steps: +@@ -530,8 +541,8 @@ def test_allow_title(topo, _add_user, aci_of_user): + """ + ACI_BODY = '(targetattr="title")(targattrfilters = "add=title:(|(title=engineer)' \ + '(title=cool dude)(title=scum)), del=title:(|(title=engineer)(title=cool dude)' \ +- '(title=scum))")(version 3.0; aci "$tet_thistest"; allow (write) ' \ +- 'userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) ++ '(title=scum))")(version 3.0; aci "{}"; allow (write) ' \ ++ 'userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "engineer").add() + _AddTitleWithRoot(topo, "cool dude").add() +@@ -541,11 +552,11 @@ def test_allow_title(topo, _add_user, aci_of_user): + _ModTitleArchitectJeffVedder(topo, "topdog", conn).add() + + +-def test_allow_to_modify(topo, _add_user, aci_of_user): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the ++def test_allow_to_modify(topo, _add_user, aci_of_user, request): ++ """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that I can have secretary in targetattr and title in targattrfilters. ++ + :id: c32e4704-7aa9-11e8-951d-8c16451d917b + :setup: server + :steps: +@@ -559,8 +570,8 @@ def test_allow_to_modify(topo, _add_user, aci_of_user): + """ + ACI_BODY = '(targetattr="secretary")(targattrfilters = "add=title:(|(title=engineer)' \ + '(title=cool dude)(title=scum)), del=title:(|(title=engineer)(title=cool dude)' \ +- '(title=scum))")(version 3.0; aci "$tet_thistest"; allow (write)' \ +- ' userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) ++ '(title=scum))")(version 3.0; aci "{}"; allow (write)' \ ++ ' userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "engineer").add() + _AddTitleWithRoot(topo, "cool dude").add() +@@ -571,11 +582,11 @@ def test_allow_to_modify(topo, _add_user, aci_of_user): + assert user.get_attr_val('secretary') + + +-def test_selfwrite_does_not_confer_write_on_a_targattrfilters_atribute(topo, _add_user, aci_of_user): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of ++def test_selfwrite_does_not_confer_write_on_a_targattrfilters_atribute(topo, _add_user, aci_of_user, request): ++ """Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) + Selfwrite does not confer "write" on a targattrfilters atribute. ++ + :id: c7b9ec2e-7aa9-11e8-ba4a-8c16451d917b + :setup: server + :steps: +@@ -589,7 +600,7 @@ def test_selfwrite_does_not_confer_write_on_a_targattrfilters_atribute(topo, _ad + """ + ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)), ' \ + 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; ' \ +- 'aci "$tet_thistest"; allow (selfwrite) userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) ++ 'aci "{}"; allow (selfwrite) userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + # aci will not allow to add selfwrite_does_not_confer_write_on_a_targattrfilters_atribute + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) +@@ -598,12 +609,12 @@ def test_selfwrite_does_not_confer_write_on_a_targattrfilters_atribute(topo, _ad + + + def test_selfwrite_continues_to_give_rights_to_attr_in_targetattr_list( +- topo, _add_user, aci_of_user ++ topo, _add_user, aci_of_user, request + ): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of ++ """Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) + Selfwrite continues to give rights to attr in targetattr list. ++ + :id: cd287680-7aa9-11e8-a8e2-8c16451d917b + :setup: server + :steps: +@@ -617,8 +628,8 @@ def test_selfwrite_continues_to_give_rights_to_attr_in_targetattr_list( + """ + ACI_BODY = '(targetattr="secretary")(targattrfilters = "add=title:(|(title=engineer)' \ + '(title=cool dude)(title=scum)), del=title:(|(title=engineer)(title=cool dude)' \ +- '(title=scum))")(version 3.0; aci "$tet_thistest"; allow (selfwrite) ' \ +- 'userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) ++ '(title=scum))")(version 3.0; aci "{}"; allow (selfwrite) ' \ ++ 'userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + # selfwrite_continues_to_give_rights_to_attr_in_targetattr_list + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) +@@ -627,12 +638,12 @@ def test_selfwrite_continues_to_give_rights_to_attr_in_targetattr_list( + + + def test_add_an_attribute_value_we_are_allowed_to_add_with_ldapanyone( +- topo, _add_user, aci_of_user ++ topo, _add_user, aci_of_user, request + ): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the ++ """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we can MODIFY:add an attribute value we are allowed to add with ldap:///anyone ++ + :id: d1e1d7ac-7aa9-11e8-b968-8c16451d917b + :setup: server + :steps: +@@ -645,7 +656,7 @@ def test_add_an_attribute_value_we_are_allowed_to_add_with_ldapanyone( + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ +- '(version 3.0; acl "$tet_thistest"; allow (write) userdn = "ldap:///anyone";)' ++ '(version 3.0; acl "{}"; allow (write) userdn = "ldap:///anyone";)'.format(request.node.name) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "engineer").add() + # aci will allow to add title architect +@@ -653,12 +664,12 @@ def test_add_an_attribute_value_we_are_allowed_to_add_with_ldapanyone( + _ModTitleArchitectJeffVedder(topo, "architect", conn).add() + + +-def test_hierarchy(topo, _add_user, aci_of_user): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of ++def test_hierarchy(topo, _add_user, aci_of_user, request): ++ """Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) + Test that with two targattrfilters in the hierarchy that the general one applies. +- This is the correct behaviour, even if it's a bit ++ This is the correct behaviour, even if it's a bit confusing ++ + :id: d7ae354a-7aa9-11e8-8b0d-8c16451d917b + :setup: server + :steps: +@@ -670,10 +681,10 @@ def test_hierarchy(topo, _add_user, aci_of_user): + 2. Operation should succeed + 3. Operation should succeed + """ +- ACI_BODY = '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "$tet_thistest"; ' \ +- 'allow (write) (userdn = "ldap:///anyone") ;)' ++ ACI_BODY = '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "{}"; ' \ ++ 'allow (write) (userdn = "ldap:///anyone") ;)'.format(request.node.name) + ACI_BODY1 = '(targattrfilters = "add=title:(title=architect)")(version 3.0; ' \ +- 'acl "$tet_thistest"; allow (write) (userdn = "ldap:///anyone") ;)' ++ 'acl "{}"; allow (write) (userdn = "ldap:///anyone") ;)'.format(request.node.name) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY1) + _AddTitleWithRoot(topo, "engineer").add() +@@ -686,12 +697,12 @@ def test_hierarchy(topo, _add_user, aci_of_user): + + + def test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_expected( +- topo, _add_user, aci_of_user ++ topo, _add_user, aci_of_user, request + ): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of the ++ """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we can have targattrfilters and search permissions and that ldapmodify works as expected. ++ + :id: ddae7a22-7aa9-11e8-ad6b-8c16451d917b + :setup: server + :steps: +@@ -704,8 +715,8 @@ def test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_exp + 3. Operation should succeed + """ + ACI_BODY = '(targetattr="secretary || objectclass || mail")(targattrfilters = "add=title:' \ +- '(title=arch*)")(version 3.0; acl "$tet_thistest"; ' \ +- 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)' ++ '(title=arch*)")(version 3.0; acl "{}"; ' \ ++ 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(request.node.name) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + # aci will allow to add title architect + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) +@@ -713,12 +724,12 @@ def test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_exp + + + def test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_expected_two( +- topo, _add_user, aci_of_user ++ topo, _add_user, aci_of_user, request + ): +- """ +- Testing the targattrfilters keyword that allows access control based on the value of ++ """Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) + Test that we can have targattrfilters and search permissions and that ldapsearch works as expected. ++ + :id: e25d116e-7aa9-11e8-81d8-8c16451d917b + :setup: server + :steps: +@@ -731,8 +742,8 @@ def test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_exp + 3. Operation should succeed + """ + ACI_BODY = '(targetattr="secretary || objectclass || mail")(targattrfilters = ' \ +- '"add=title:(title=arch*)")(version 3.0; acl "$tet_thistest"; allow ' \ +- '(write,read,search,compare) (userdn = "ldap:///anyone") ;)' ++ '"add=title:(title=arch*)")(version 3.0; acl "{}"; allow ' \ ++ '(write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(request.node.name) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = Anonymous(topo.standalone).bind() + user = UserAccount(conn, USER_DELADD) +diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py +index 02b73ee85..97908c31c 100644 +--- a/dirsrvtests/tests/suites/basic/basic_test.py ++++ b/dirsrvtests/tests/suites/basic/basic_test.py +@@ -7,10 +7,6 @@ + # --- END COPYRIGHT BLOCK --- + # + +-""" +- :Requirement: Basic Directory Server Operations +-""" +- + from subprocess import check_output, PIPE, run + from lib389 import DirSrv + from lib389.idm.user import UserAccounts +@@ -255,11 +251,11 @@ def test_basic_import_export(topology_st, import_example_ldif): + """ + + log.info('Running test_basic_import_export...') +- + # + # Test online/offline LDIF imports + # + topology_st.standalone.start() ++ # topology_st.standalone.config.set('nsslapd-errorlog-level', '1') + + # Generate a test ldif (50k entries) + log.info("Generating LDIF...") +@@ -267,6 +263,7 @@ def test_basic_import_export(topology_st, import_example_ldif): + import_ldif = ldif_dir + '/basic_import.ldif' + dbgen_users(topology_st.standalone, 50000, import_ldif, DEFAULT_SUFFIX) + ++ + # Online + log.info("Importing LDIF online...") + import_task = ImportTask(topology_st.standalone) +@@ -937,7 +934,7 @@ def test_mod_def_rootdse_attr(topology_st, import_example_ldif, rootdse_attr): + :id: c7831e04-f458-4e23-83c7-b6f66109f639 + :parametrized: yes + :setup: Standalone instance and we are using rootdse_attr fixture which +-adds nsslapd-return-default-opattr attr with value of one operation attribute. ++ adds nsslapd-return-default-opattr attr with value of one operation attribute. + + :steps: + 1. Make an ldapsearch for rootdse attribute +@@ -1003,7 +1000,7 @@ def test_basic_anonymous_search(topology_st, create_users): + @pytest.mark.bz915801 + def test_search_original_type(topology_st, create_users): + """Test ldapsearch returning original attributes +- using nsslapd-search-return-original-type-switch ++ using nsslapd-search-return-original-type-switch + + :id: d7831d04-f558-4e50-93c7-b6f77109f640 + :setup: Standalone instance +@@ -1095,7 +1092,7 @@ def test_critical_msg_on_empty_range_idl(topology_st): + :setup: Standalone instance + :steps: + 1. Create an index for internationalISDNNumber. (attribute chosen because it is +- unlikely that previous tests used it) ++ unlikely that previous tests used it) + 2. telephoneNumber being indexed by default create 20 users without telephoneNumber + 3. add a telephoneNumber value and delete it to trigger an empty index database + 4. Do a search that triggers a range lookup on empty telephoneNumber +@@ -1105,7 +1102,7 @@ def test_critical_msg_on_empty_range_idl(topology_st): + 2. This should pass + 3. This should pass + 4. This should pass on normal build but could abort a debug build +- 4. This should pass ++ 5. This should pass + """ + indexedAttr = 'internationalISDNNumber' + +@@ -1206,7 +1203,7 @@ def test_ldbm_modification_audit_log(topology_st): + assert conn.searchAuditLog('%s: %s' % (attr, VALUE)) + + +-@pytest.mark.skipif(not get_user_is_root() or not default_paths.perl_enabled or ds_is_older('1.4.0.0'), ++@pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.0.0'), + reason="This test is only required if perl is enabled, and requires root.") + def test_dscreate(request): + """Test that dscreate works, we need this for now until setup-ds.pl is +@@ -1356,7 +1353,7 @@ sample_entries = yes + return inst + + +-@pytest.mark.skipif(not get_user_is_root() or not default_paths.perl_enabled or ds_is_older('1.4.2.0'), ++@pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.2.0'), + reason="This test is only required with new admin cli, and requires root.") + @pytest.mark.bz1748016 + @pytest.mark.ds50581 +@@ -1367,7 +1364,7 @@ def test_dscreate_ldapi(dscreate_long_instance): + :id: 5d72d955-aff8-4741-8c9a-32c1c707cf1f + :setup: None + :steps: +- 1. create an instance with a long serverId name, that open a ldapi connection ++ 1. Ccreate an instance with a long serverId name, that open a ldapi connection + 2. Connect with ldapi, that hit 50581 and crash the instance + :expectedresults: + 1. Should succeeds +@@ -1378,7 +1375,7 @@ def test_dscreate_ldapi(dscreate_long_instance): + log.info(root_dse.get_supported_ctrls()) + + +-@pytest.mark.skipif(not get_user_is_root() or not default_paths.perl_enabled or ds_is_older('1.4.2.0'), ++@pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.2.0'), + reason="This test is only required with new admin cli, and requires root.") + @pytest.mark.bz1715406 + @pytest.mark.ds50923 +diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py +index 94686f5f2..d67bcb13e 100644 +--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py ++++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py +@@ -1,25 +1,26 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2015 Red Hat, Inc. ++# Copyright (C) 2020 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). + # See LICENSE for details. + # --- END COPYRIGHT BLOCK --- + # ++from decimal import * + import os + import logging + import pytest +-import subprocess + from lib389._mapped_object import DSLdapObject + from lib389.topologies import topology_st + from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, AutoMembershipDefinitions + from lib389.idm.user import UserAccounts + from lib389.idm.group import Groups + from lib389.idm.organizationalunit import OrganizationalUnits +-from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, DN_CONFIG, HOST_STANDALONE, PORT_STANDALONE, DN_DM, PASSWORD +-from lib389.utils import ds_is_older ++from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL ++from lib389.utils import ds_is_older, ds_is_newer + import ldap + import glob ++import re + + pytestmark = pytest.mark.tier1 + +@@ -30,7 +31,6 @@ PLUGIN_TIMESTAMP = 'nsslapd-logging-hr-timestamps-enabled' + PLUGIN_LOGGING = 'nsslapd-plugin-logging' + USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX + +- + def add_users(topology_st, users_num): + users = UserAccounts(topology_st, DEFAULT_SUFFIX) + log.info('Adding %d users' % users_num) +@@ -161,6 +161,20 @@ def clean_access_logs(topology_st, request): + + return clean_access_logs + ++@pytest.fixture(scope="function") ++def remove_users(topology_st, request): ++ def _remove_users(): ++ topo = topology_st.standalone ++ users = UserAccounts(topo, DEFAULT_SUFFIX) ++ entries = users.list() ++ assert len(entries) > 0 ++ ++ log.info("Removing all added users") ++ for entry in entries: ++ delete_obj(entry) ++ ++ request.addfinalizer(_remove_users) ++ + + def set_audit_log_config_values(topology_st, request, enabled, logsize): + topo = topology_st.standalone +@@ -181,6 +195,17 @@ def set_audit_log_config_values(topology_st, request, enabled, logsize): + def set_audit_log_config_values_to_rotate(topology_st, request): + set_audit_log_config_values(topology_st, request, 'on', '1') + ++@pytest.fixture(scope="function") ++def disable_access_log_buffering(topology_st, request): ++ log.info('Disable access log buffering') ++ topology_st.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') ++ def fin(): ++ log.info('Enable access log buffering') ++ topology_st.standalone.config.set('nsslapd-accesslog-logbuffering', 'on') ++ ++ request.addfinalizer(fin) ++ ++ return disable_access_log_buffering + + @pytest.mark.bz1273549 + def test_check_default(topology_st): +@@ -226,11 +251,11 @@ def test_plugin_set_invalid(topology_st): + + log.info('test_plugin_set_invalid - Expect to fail with junk value') + with pytest.raises(ldap.OPERATIONS_ERROR): +- result = topology_st.standalone.config.set(PLUGIN_TIMESTAMP, 'JUNK') ++ topology_st.standalone.config.set(PLUGIN_TIMESTAMP, 'JUNK') + + + @pytest.mark.bz1273549 +-def test_log_plugin_on(topology_st): ++def test_log_plugin_on(topology_st, remove_users): + """Check access logs for millisecond, when + nsslapd-logging-hr-timestamps-enabled=ON + +@@ -266,7 +291,7 @@ def test_log_plugin_on(topology_st): + + + @pytest.mark.bz1273549 +-def test_log_plugin_off(topology_st): ++def test_log_plugin_off(topology_st, remove_users): + """Milliseconds should be absent from access logs when + nsslapd-logging-hr-timestamps-enabled=OFF + +@@ -303,6 +328,7 @@ def test_log_plugin_off(topology_st): + topology_st.standalone.deleteAccessLogs() + + # Now generate some fresh logs ++ add_users(topology_st.standalone, 10) + search_users(topology_st.standalone) + + log.info('Restart the server to flush the logs') +@@ -317,8 +343,9 @@ def test_log_plugin_off(topology_st): + @pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") + @pytest.mark.bz1358706 + @pytest.mark.ds49029 +-def test_internal_log_server_level_0(topology_st, clean_access_logs): ++def test_internal_log_server_level_0(topology_st, clean_access_logs, disable_access_log_buffering): + """Tests server-initiated internal operations ++ + :id: 798d06fe-92e8-4648-af66-21349c20638e + :setup: Standalone instance + :steps: +@@ -362,22 +389,23 @@ def test_internal_log_server_level_0(topology_st, clean_access_logs): + @pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") + @pytest.mark.bz1358706 + @pytest.mark.ds49029 +-def test_internal_log_server_level_4(topology_st, clean_access_logs): ++def test_internal_log_server_level_4(topology_st, clean_access_logs, disable_access_log_buffering): + """Tests server-initiated internal operations ++ + :id: a3500e47-d941-4575-b399-e3f4b49bc4b6 + :setup: Standalone instance + :steps: + 1. Set nsslapd-plugin-logging to on + 2. Configure access log level to only 4 + 3. Check the access logs, it should contain info about MOD operation of cn=config and other +- internal operations should have the conn field set to Internal +- and all values inside parenthesis set to 0. ++ internal operations should have the conn field set to Internal ++ and all values inside parenthesis set to 0. + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Access log should contain correct internal log formats with cn=config modification: +- "(Internal) op=2(1)(1)" +- "conn=Internal(0)" ++ "(Internal) op=2(1)(1)" ++ "conn=Internal(0)" + """ + + topo = topology_st.standalone +@@ -398,8 +426,8 @@ def test_internal_log_server_level_4(topology_st, clean_access_logs): + log.info("Check if access log contains internal MOD operation in correct format") + # (Internal) op=2(2)(1) SRCH base="cn=config + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="cn=config.*') +- # (Internal) op=2(2)(1) RESULT err=0 tag=48 nentries=1 +- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') ++ # (Internal) op=2(2)(1) RESULT err=0 tag=48 nentries= ++ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=.*') + + log.info("Check if the other internal operations have the correct format") + # conn=Internal(0) op=0 +@@ -411,8 +439,9 @@ def test_internal_log_server_level_4(topology_st, clean_access_logs): + @pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") + @pytest.mark.bz1358706 + @pytest.mark.ds49029 +-def test_internal_log_level_260(topology_st, add_user_log_level_260): ++def test_internal_log_level_260(topology_st, add_user_log_level_260, disable_access_log_buffering): + """Tests client initiated operations when automember plugin is enabled ++ + :id: e68a303e-c037-42b2-a5a0-fbea27c338a9 + :setup: Standalone instance with internal operation + logging on and nsslapd-plugin-logging to on +@@ -465,9 +494,10 @@ def test_internal_log_level_260(topology_st, add_user_log_level_260): + # 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com" + assert topo.ds_access_log.match(r'.*op=[0-9]+ MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' + 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com".*') +- # (Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" +- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' +- 'ou=branch1,dc=example,dc=com".*') ++ if ds_is_older(('1.4.3.9', '1.4.4.3')): ++ # (Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" ++ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' ++ 'ou=branch1,dc=example,dc=com".*') + # (Internal) op=12(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + # op=12 RESULT err=0 tag=109 +@@ -476,9 +506,10 @@ def test_internal_log_level_260(topology_st, add_user_log_level_260): + log.info("Check the access logs for DEL operation of the user") + # op=15 DEL dn="uid=new_test_user_777,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*op=[0-9]+ DEL dn="uid=new_test_user_777,dc=example,dc=com".*') +- # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" +- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' +- 'dc=example,dc=com".*') ++ if ds_is_older(('1.4.3.9', '1.4.4.3')): ++ # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" ++ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' ++ 'dc=example,dc=com".*') + # (Internal) op=15(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + # op=15 RESULT err=0 tag=107 +@@ -492,8 +523,9 @@ def test_internal_log_level_260(topology_st, add_user_log_level_260): + @pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") + @pytest.mark.bz1358706 + @pytest.mark.ds49029 +-def test_internal_log_level_131076(topology_st, add_user_log_level_131076): ++def test_internal_log_level_131076(topology_st, add_user_log_level_131076, disable_access_log_buffering): + """Tests client-initiated operations while referential integrity plugin is enabled ++ + :id: 44836ac9-dabd-4a8c-abd5-ecd7c2509739 + :setup: Standalone instance + Configure access log level to - 131072 + 4 +@@ -547,9 +579,10 @@ def test_internal_log_level_131076(topology_st, add_user_log_level_131076): + # 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com" + assert not topo.ds_access_log.match(r'.*op=[0-9]+ MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' + 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com".*') +- # (Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" +- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' +- 'ou=branch1,dc=example,dc=com".*') ++ if ds_is_older(('1.4.3.9', '1.4.4.3')): ++ # (Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" ++ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' ++ 'ou=branch1,dc=example,dc=com".*') + # (Internal) op=12(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + # op=12 RESULT err=0 tag=109 +@@ -558,9 +591,10 @@ def test_internal_log_level_131076(topology_st, add_user_log_level_131076): + log.info("Check the access logs for DEL operation of the user") + # op=15 DEL dn="uid=new_test_user_777,dc=example,dc=com" + assert not topo.ds_access_log.match(r'.*op=[0-9]+ DEL dn="uid=new_test_user_777,dc=example,dc=com".*') +- # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" +- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' +- 'dc=example,dc=com".*') ++ if ds_is_older(('1.4.3.9', '1.4.4.3')): ++ # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" ++ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' ++ 'dc=example,dc=com".*') + # (Internal) op=15(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + # op=15 RESULT err=0 tag=107 +@@ -574,8 +608,9 @@ def test_internal_log_level_131076(topology_st, add_user_log_level_131076): + @pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") + @pytest.mark.bz1358706 + @pytest.mark.ds49029 +-def test_internal_log_level_516(topology_st, add_user_log_level_516): ++def test_internal_log_level_516(topology_st, add_user_log_level_516, disable_access_log_buffering): + """Tests client initiated operations when referential integrity plugin is enabled ++ + :id: bee1d681-763d-4fa5-aca2-569cf93f8b71 + :setup: Standalone instance + Configure access log level to - 512+4 +@@ -624,34 +659,34 @@ def test_internal_log_level_516(topology_st, add_user_log_level_516): + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1*') + # (Internal) op=10(1)(1) RESULT err=0 tag=48 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48.*') +- # op=10 RESULT err=0 tag=105 +- assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=105.*') + + log.info("Check the access logs for MOD operation of the user") + # op=12 MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' + # 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com" + assert not topo.ds_access_log.match(r'.*op=[0-9]+ MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' + 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com".*') +- # Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" +- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' +- 'ou=branch1,dc=example,dc=com".*') +- # (Internal) op=12(1)(1) ENTRY dn="uid=test_user_777, ou=branch1,dc=example,dc=com" +- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ENTRY dn="uid=test_user_777,' +- 'ou=branch1,dc=example,dc=com".*') ++ if ds_is_older(('1.4.3.9', '1.4.4.3')): ++ # Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" ++ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' ++ 'ou=branch1,dc=example,dc=com".*') ++ # (Internal) op=12(1)(1) ENTRY dn="uid=test_user_777, ou=branch1,dc=example,dc=com" ++ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ENTRY dn="uid=test_user_777,' ++ 'ou=branch1,dc=example,dc=com".*') + # (Internal) op=12(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') +- # op=12 RESULT err=0 tag=109 +- assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=109.*') ++ # op=12 RESULT err=0 tag=48 ++ assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=48.*') + + log.info("Check the access logs for DEL operation of the user") + # op=15 DEL dn="uid=new_test_user_777,dc=example,dc=com" + assert not topo.ds_access_log.match(r'.*op=[0-9]+ DEL dn="uid=new_test_user_777,dc=example,dc=com".*') +- # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" +- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' +- 'dc=example,dc=com".*') +- # (Internal) op=15(1)(1) ENTRY dn="uid=new_test_user_777, dc=example,dc=com" +- assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ENTRY dn="uid=new_test_user_777,' +- 'dc=example,dc=com".*') ++ if ds_is_older(('1.4.3.9', '1.4.4.3')): ++ # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" ++ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' ++ 'dc=example,dc=com".*') ++ # (Internal) op=15(1)(1) ENTRY dn="uid=new_test_user_777, dc=example,dc=com" ++ assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ENTRY dn="uid=new_test_user_777,' ++ 'dc=example,dc=com".*') + # (Internal) op=15(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + # op=15 RESULT err=0 tag=107 +@@ -698,14 +733,13 @@ def test_access_log_truncated_search_message(topology_st, clean_access_logs): + assert not topo.ds_access_log.match(r'.*cn500.*') + + +- ++@pytest.mark.skipif(ds_is_newer("1.4.3"), reason="rsearch was removed") + @pytest.mark.xfail(ds_is_older('1.4.2.0'), reason="May fail because of bug 1732053") + @pytest.mark.bz1732053 + @pytest.mark.ds50510 + def test_etime_at_border_of_second(topology_st, clean_access_logs): + topo = topology_st.standalone + +- + prog = os.path.join(topo.ds_paths.bin_dir, 'rsearch') + + cmd = [prog] +@@ -741,11 +775,167 @@ def test_etime_at_border_of_second(topology_st, clean_access_logs): + assert not invalid_etime + + ++@pytest.mark.skipif(ds_is_older('1.3.10.1', '1.4.1'), reason="Fail because of bug 1749236") ++@pytest.mark.bz1749236 ++def test_etime_order_of_magnitude(topology_st, clean_access_logs, remove_users, disable_access_log_buffering): ++ """Test that the etime reported in the access log has a correct order of magnitude ++ ++ :id: e815cfa0-8136-4932-b50f-c3dfac34b0e6 ++ :setup: Standalone instance ++ :steps: ++ 1. Unset log buffering for the access log ++ 2. Delete potential existing access logs ++ 3. Add users ++ 4. Search users ++ 5. Restart the server to flush the logs ++ 6. Parse the access log looking for the SRCH operation log ++ 7. From the SRCH string get the start time and op number of the operation ++ 8. From the op num find the associated RESULT string in the access log ++ 9. From the RESULT string get the end time and the etime for the operation ++ 10. Calculate the ratio between the calculated elapsed time (end time - start time) and the logged etime ++ :expectedresults: ++ 1. access log buffering is off ++ 2. Previously existing access logs are deleted ++ 3. Users are successfully added ++ 4. Search operation is successful ++ 5. Server is restarted and logs are flushed ++ 6. SRCH operation log string is catched ++ 7. start time and op number are collected ++ 8. RESULT string is catched from the access log ++ 9. end time and etime are collected ++ 10. ratio between calculated elapsed time and logged etime is less or equal to 1 ++ """ ++ ++ DSLdapObject(topology_st.standalone, DEFAULT_SUFFIX) ++ ++ log.info('add_users') ++ add_users(topology_st.standalone, 30) ++ ++ log.info ('search users') ++ search_users(topology_st.standalone) ++ ++ log.info('parse the access logs to get the SRCH string') ++ # Here we are looking at the whole string logged for the search request with base ou=People,dc=example,dc=com ++ search_str = str(topology_st.standalone.ds_access_log.match(r'.*SRCH base="ou=People,dc=example,dc=com.*'))[1:-1] ++ assert len(search_str) > 0 ++ ++ # the search_str returned looks like : ++ # [23/Apr/2020:06:06:14.360857624 -0400] conn=1 op=93 SRCH base="ou=People,dc=example,dc=com" scope=2 filter="(&(objectClass=account)(objectClass=posixaccount)(objectClass=inetOrgPerson)(objectClass=organizationalPerson))" attrs="distinguishedName" ++ ++ log.info('get the operation start time from the SRCH string') ++ # Here we are getting the sec.nanosec part of the date, '14.360857624' in the example above ++ start_time = (search_str.split()[0]).split(':')[3] ++ ++ log.info('get the OP number from the SRCH string') ++ # Here we are getting the op number, 'op=93' in the above example ++ op_num = search_str.split()[3] ++ ++ log.info('get the RESULT string matching the SRCH OP number') ++ # Here we are looking at the RESULT string for the above search op, 'op=93' in this example ++ result_str = str(topology_st.standalone.ds_access_log.match(r'.*{} RESULT*'.format(op_num)))[1:-1] ++ assert len(result_str) > 0 ++ ++ # The result_str returned looks like : ++ # For ds older than 1.4.3.8: [23/Apr/2020:06:06:14.366429900 -0400] conn=1 op=93 RESULT err=0 tag=101 nentries=30 etime=0.005723017 ++ # For ds newer than 1.4.3.8: [21/Oct/2020:09:27:50.095209871 -0400] conn=1 op=96 RESULT err=0 tag=101 nentries=30 wtime=0.000412584 optime=0.005428971 etime=0.005836077 ++ ++ log.info('get the operation end time from the RESULT string') ++ # Here we are getting the sec.nanosec part of the date, '14.366429900' in the above example ++ end_time = (result_str.split()[0]).split(':')[3] ++ ++ log.info('get the logged etime for the operation from the RESULT string') ++ # Here we are getting the etime value, '0.005723017' in the example above ++ if ds_is_older('1.4.3.8'): ++ etime = result_str.split()[8].split('=')[1][:-3] ++ else: ++ etime = result_str.split()[10].split('=')[1][:-3] ++ ++ log.info('Calculate the ratio between logged etime for the operation and elapsed time from its start time to its end time - should be around 1') ++ etime_ratio = (Decimal(end_time) - Decimal(start_time)) // Decimal(etime) ++ assert etime_ratio <= 1 ++ ++ ++@pytest.mark.skipif(ds_is_older('1.4.3.8'), reason="Fail because of bug 1850275") ++@pytest.mark.bz1850275 ++def test_optime_and_wtime_keywords(topology_st, clean_access_logs, remove_users, disable_access_log_buffering): ++ """Test that the new optime and wtime keywords are present in the access log and have correct values ++ ++ :id: dfb4a49d-1cfc-400e-ba43-c107f58d62cf ++ :setup: Standalone instance ++ :steps: ++ 1. Unset log buffering for the access log ++ 2. Delete potential existing access logs ++ 3. Add users ++ 4. Search users ++ 5. Parse the access log looking for the SRCH operation log ++ 6. From the SRCH string get the op number of the operation ++ 7. From the op num find the associated RESULT string in the access log ++ 8. Search for the wtime optime keywords in the RESULT string ++ 9. From the RESULT string get the wtime, optime and etime values for the operation ++ 10. Check that optime + wtime is approximatively etime ++ :expectedresults: ++ 1. access log buffering is off ++ 2. Previously existing access logs are deleted ++ 3. Users are successfully added ++ 4. Search operation is successful ++ 5. SRCH operation log string is catched ++ 6. op number is collected ++ 7. RESULT string is catched from the access log ++ 8. wtime and optime keywords are collected ++ 9. wtime, optime and etime values are collected ++ 10. (optime + wtime) =~ etime ++ """ ++ ++ log.info('add_users') ++ add_users(topology_st.standalone, 30) ++ ++ log.info ('search users') ++ search_users(topology_st.standalone) ++ ++ log.info('parse the access logs to get the SRCH string') ++ # Here we are looking at the whole string logged for the search request with base ou=People,dc=example,dc=com ++ search_str = str(topology_st.standalone.ds_access_log.match(r'.*SRCH base="ou=People,dc=example,dc=com.*'))[1:-1] ++ assert len(search_str) > 0 ++ ++ # the search_str returned looks like : ++ # [22/Oct/2020:09:47:11.951316798 -0400] conn=1 op=96 SRCH base="ou=People,dc=example,dc=com" scope=2 filter="(&(objectClass=account)(objectClass=posixaccount)(objectClass=inetOrgPerson)(objectClass=organizationalPerson))" attrs="distinguishedName" ++ ++ log.info('get the OP number from the SRCH string') ++ # Here we are getting the op number, 'op=96' in the above example ++ op_num = search_str.split()[3] ++ ++ log.info('get the RESULT string matching the SRCH op number') ++ # Here we are looking at the RESULT string for the above search op, 'op=96' in this example ++ result_str = str(topology_st.standalone.ds_access_log.match(r'.*{} RESULT*'.format(op_num)))[1:-1] ++ assert len(result_str) > 0 ++ ++ # The result_str returned looks like : ++ # [22/Oct/2020:09:47:11.963276018 -0400] conn=1 op=96 RESULT err=0 tag=101 nentries=30 wtime=0.000180294 optime=0.011966632 etime=0.012141311 ++ log.info('Search for the wtime keyword in the RESULT string') ++ assert re.search('wtime', result_str) ++ ++ log.info('get the wtime value from the RESULT string') ++ wtime_value = result_str.split()[8].split('=')[1][:-3] ++ ++ log.info('Search for the optime keyword in the RESULT string') ++ assert re.search('optime', result_str) ++ ++ log.info('get the optime value from the RESULT string') ++ optime_value = result_str.split()[9].split('=')[1][:-3] ++ ++ log.info('get the etime value from the RESULT string') ++ etime_value = result_str.split()[10].split('=')[1][:-3] ++ ++ log.info('Check that (wtime + optime) is approximately equal to etime i.e. their ratio is 1') ++ etime_ratio = (Decimal(wtime_value) + Decimal(optime_value)) // Decimal(etime_value) ++ assert etime_ratio == 1 ++ ++ + @pytest.mark.xfail(ds_is_older('1.3.10.1'), reason="May fail because of bug 1662461") + @pytest.mark.bz1662461 + @pytest.mark.ds50428 + @pytest.mark.ds49969 +-def test_log_base_dn_when_invalid_attr_request(topology_st): ++def test_log_base_dn_when_invalid_attr_request(topology_st, disable_access_log_buffering): + """Test that DS correctly logs the base dn when a search with invalid attribute request is performed + + :id: 859de962-c261-4ffb-8705-97bceab1ba2c +@@ -753,7 +943,7 @@ def test_log_base_dn_when_invalid_attr_request(topology_st): + :steps: + 1. Disable the accesslog-logbuffering config parameter + 2. Delete the previous access log +- 3. Perform a base search on the DEFAULT_SUFFIX, using invalid "" "" attribute request ++ 3. Perform a base search on the DEFAULT_SUFFIX, using ten empty attribute requests + 4. Check the access log file for 'invalid attribute request' + 5. Check the access log file for 'SRCH base="\(null\)"' + 6. Check the access log file for 'SRCH base="DEFAULT_SUFFIX"' +@@ -768,17 +958,14 @@ def test_log_base_dn_when_invalid_attr_request(topology_st): + + entry = DSLdapObject(topology_st.standalone, DEFAULT_SUFFIX) + +- log.info('Set accesslog logbuffering to off to get the log in real time') +- topology_st.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') +- + log.info('delete the previous access logs to get a fresh new one') + topology_st.standalone.deleteAccessLogs() + + log.info("Search the default suffix, with invalid '\"\" \"\"' attribute request") +- log.info("A Protocol error exception should be raised, see https://pagure.io/389-ds-base/issue/49969") +- # A ldap.PROTOCOL_ERROR exception is expected ++ log.info("A Protocol error exception should be raised, see https://github.com/389ds/389-ds-base/issues/3028") ++ # A ldap.PROTOCOL_ERROR exception is expected after 10 empty values + with pytest.raises(ldap.PROTOCOL_ERROR): +- assert entry.get_attrs_vals_utf8(['', '']) ++ assert entry.get_attrs_vals_utf8(['', '', '', '', '', '', '', '', '', '', '']) + + # Search for appropriate messages in the access log + log.info('Check the access logs for correct messages') +diff --git a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py +index db2be9f67..c882bea5f 100644 +--- a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py ++++ b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py +@@ -11,6 +11,7 @@ from lib389.tasks import * + from lib389.utils import * + from lib389.topologies import topology_st + from lib389.idm.user import UserAccounts ++from lib389.idm.domain import Domain + + from lib389._constants import DN_DM, DEFAULT_SUFFIX, DN_CONFIG, PASSWORD + +@@ -26,15 +27,15 @@ TEST_USER_PWD = 'all_attrs_test' + TEST_PARAMS = [(DN_ROOT, False, [ + 'aci', 'createTimestamp', 'creatorsName', + 'modifiersName', 'modifyTimestamp', 'namingContexts', +- 'nsBackendSuffix', 'nsUniqueId', 'subschemaSubentry', ++ 'nsBackendSuffix', 'subschemaSubentry', + 'supportedControl', 'supportedExtension', + 'supportedFeatures', 'supportedLDAPVersion', + 'supportedSASLMechanisms', 'vendorName', 'vendorVersion' +-]), ++ ]), + (DN_ROOT, True, [ + 'createTimestamp', 'creatorsName', + 'modifiersName', 'modifyTimestamp', 'namingContexts', +- 'nsBackendSuffix', 'nsUniqueId', 'subschemaSubentry', ++ 'nsBackendSuffix', 'subschemaSubentry', + 'supportedControl', 'supportedExtension', + 'supportedFeatures', 'supportedLDAPVersion', + 'supportedSASLMechanisms', 'vendorName', 'vendorVersion' +@@ -80,6 +81,18 @@ def create_user(topology_st): + 'homeDirectory': '/home/test' + }) + ++ # Add anonymous access aci ++ ACI_TARGET = "(targetattr != \"userpassword || aci\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ++ ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ++ ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" ++ ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT ++ suffix = Domain(topology_st.standalone, DEFAULT_SUFFIX) ++ try: ++ suffix.add('aci', ANON_ACI) ++ except ldap.TYPE_OR_VALUE_EXISTS: ++ pass ++ ++ + @pytest.fixture(scope="module") + def user_aci(topology_st): + """Don't allow modifiersName attribute for the test user +@@ -156,7 +169,9 @@ def test_search_basic(topology_st, create_user, user_aci, add_attr, + entries = topology_st.standalone.search_s(search_suffix, ldap.SCOPE_BASE, + '(objectclass=*)', + search_filter) +- found_attrs = entries[0].data.keys() ++ found_attrs = set(entries[0].data.keys()) ++ if search_suffix == DN_ROOT and "nsUniqueId" in found_attrs: ++ found_attrs.remove("nsUniqueId") + + if add_attr == '*': + assert set(expected_attrs) - set(found_attrs) == set() +diff --git a/dirsrvtests/tests/suites/mapping_tree/acceptance_test.py b/dirsrvtests/tests/suites/mapping_tree/acceptance_test.py +new file mode 100644 +index 000000000..387c313ad +--- /dev/null ++++ b/dirsrvtests/tests/suites/mapping_tree/acceptance_test.py +@@ -0,0 +1,65 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2020 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import ldap ++import logging ++import pytest ++import os ++from lib389._constants import * ++from lib389.topologies import topology_st as topo ++from lib389.mappingTree import MappingTrees ++ ++DEBUGGING = os.getenv("DEBUGGING", default=False) ++if DEBUGGING: ++ logging.getLogger(__name__).setLevel(logging.DEBUG) ++else: ++ logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ ++ ++def test_invalid_mt(topo): ++ """Test that you can not add a new suffix/mapping tree ++ that does not already have the backend entry created. ++ ++ :id: caabd407-f541-4695-b13f-8f92af1112a0 ++ :setup: Standalone Instance ++ :steps: ++ 1. Create a new suffix that specifies an existing backend which has a ++ different suffix. ++ 2. Create a suffix that has no backend entry at all. ++ :expectedresults: ++ 1. Should fail with UNWILLING_TO_PERFORM ++ 1. Should fail with UNWILLING_TO_PERFORM ++ """ ++ ++ bad_suffix = 'dc=does,dc=not,dc=exist' ++ mts = MappingTrees(topo.standalone) ++ ++ properties = { ++ 'cn': bad_suffix, ++ 'nsslapd-state': 'backend', ++ 'nsslapd-backend': 'userroot', ++ } ++ with pytest.raises(ldap.UNWILLING_TO_PERFORM): ++ mts.create(properties=properties) ++ ++ properties = { ++ 'cn': bad_suffix, ++ 'nsslapd-state': 'backend', ++ 'nsslapd-backend': 'notCreatedRoot', ++ } ++ with pytest.raises(ldap.UNWILLING_TO_PERFORM): ++ mts.create(properties=properties) ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main(["-s", CURRENT_FILE]) ++ +diff --git a/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py b/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py +index 34a2de2ad..c25d89cb0 100644 +--- a/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py ++++ b/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py +@@ -6,6 +6,8 @@ from lib389.topologies import topology_m1 as topo + from lib389.backend import Backends + from lib389.encrypted_attributes import EncryptedAttrs + ++pytestmark = pytest.mark.tier1 ++ + DEBUGGING = os.getenv("DEBUGGING", default=False) + if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +@@ -26,13 +28,13 @@ def test_be_delete(topo): + :steps: + 1. Create second backend/suffix + 2. Add an encrypted attribute to the default suffix +- 2. Delete default suffix +- 3. Check the nsslapd-defaultnamingcontext is updated +- 4. Delete the last backend +- 5. Check the namingcontext has not changed +- 6. Add new backend +- 7. Set default naming context +- 8. Verify the naming context is correct ++ 3. Delete default suffix ++ 4. Check the nsslapd-defaultnamingcontext is updated ++ 5. Delete the last backend ++ 6. Check the namingcontext has not changed ++ 7. Add new backend ++ 8. Set default naming context ++ 9. Verify the naming context is correct + :expectedresults: + 1. Success + 2. Success +@@ -42,6 +44,7 @@ def test_be_delete(topo): + 6. Success + 7. Success + 8. Success ++ 9. Success + """ + + inst = topo.ms["master1"] +diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py +index b37eff70f..882faf513 100644 +--- a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py ++++ b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py +@@ -99,6 +99,7 @@ def test_pwd_reset(topology_st, create_user): + # Reset user's password + our_user = UserAccount(topology_st.standalone, TEST_USER_DN) + our_user.replace('userpassword', PASSWORD) ++ time.sleep(.5) + + # Check that pwdReset is TRUE + assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' +@@ -106,6 +107,7 @@ def test_pwd_reset(topology_st, create_user): + # Bind as user and change its own password + our_user.rebind(PASSWORD) + our_user.replace('userpassword', PASSWORD) ++ time.sleep(.5) + + # Check that pwdReset is FALSE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) +@@ -114,6 +116,9 @@ def test_pwd_reset(topology_st, create_user): + # Reset password policy config + topology_st.standalone.config.replace('passwordMustChange', 'off') + ++ # Reset user's password ++ our_user.replace('userpassword', TEST_USER_PWD) ++ + + @pytest.mark.parametrize('subtree_pwchange,user_pwchange,exception', + [('on', 'off', ldap.UNWILLING_TO_PERFORM), +@@ -171,7 +176,7 @@ def test_change_pwd(topology_st, create_user, password_policy, + user.reset_password('new_pass') + except ldap.LDAPError as e: + log.error('Failed to change userpassword for {}: error {}'.format( +- TEST_USER_DN, e.message['info'])) ++ TEST_USER_DN, e.args[0['info']])) + raise e + finally: + log.info('Bind as DM') +@@ -245,7 +250,7 @@ def test_pwd_min_age(topology_st, create_user, password_policy): + user.reset_password(TEST_USER_PWD) + except ldap.LDAPError as e: + log.error('Failed to change userpassword for {}: error {}'.format( +- TEST_USER_DN, e.message['info'])) ++ TEST_USER_DN, e.args[0]['info'])) + raise e + finally: + log.info('Bind as DM') +diff --git a/dirsrvtests/tests/suites/replication/changelog_test.py b/dirsrvtests/tests/suites/replication/changelog_test.py +index e395f0e7c..66599286f 100644 +--- a/dirsrvtests/tests/suites/replication/changelog_test.py ++++ b/dirsrvtests/tests/suites/replication/changelog_test.py +@@ -367,7 +367,7 @@ def test_dsconf_dump_changelog_files_removed(topo): + # primary condition before executing the core goal of this case : management of generated files. + + log.info("Use dsconf dump-changelog with invalid parameters") +- cmdline=['python', '/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', 'badpasswd', 'replication', 'dump-changelog'] ++ cmdline=['/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', 'badpasswd', 'replication', 'dump-changelog'] + log.info('Command used : %s' % cmdline) + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + msg = proc.communicate() +@@ -377,7 +377,7 @@ def test_dsconf_dump_changelog_files_removed(topo): + # Now the core goal of the test case + # Using dsconf replication changelog without -l option + log.info('Use dsconf replication changelog without -l option: no generated ldif files should be present in %s ' % changelog_dir) +- cmdline=['python', '/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', PASSWORD, 'replication', 'dump-changelog'] ++ cmdline=['/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', PASSWORD, 'replication', 'dump-changelog'] + log.info('Command used : %s' % cmdline) + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + proc.communicate() +@@ -396,7 +396,7 @@ def test_dsconf_dump_changelog_files_removed(topo): + + # Using dsconf replication changelog without -l option + log.info('Use dsconf replication changelog with -l option: generated ldif files should be kept in %s ' % changelog_dir) +- cmdline=['python', '/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', PASSWORD, 'replication', 'dump-changelog', '-l'] ++ cmdline=['/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', PASSWORD, 'replication', 'dump-changelog', '-l'] + log.info('Command used : %s' % cmdline) + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + proc.communicate() +diff --git a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py +index 48d0067db..ea3eacc48 100644 +--- a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py ++++ b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2018 Red Hat, Inc. ++# Copyright (C) 2020 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -117,7 +117,7 @@ def _test_base(topology): + M1 = topology.ms["master1"] + + conts = nsContainers(M1, SUFFIX) +- base_m2 = conts.create(properties={'cn': 'test_container'}) ++ base_m2 = conts.ensure_state(properties={'cn': 'test_container'}) + + for inst in topology: + inst.config.loglevel([ErrorLog.DEFAULT, ErrorLog.REPLICA], service='error') +diff --git a/dirsrvtests/tests/suites/replication/rfc2307compat.py b/dirsrvtests/tests/suites/replication/rfc2307compat.py +new file mode 100644 +index 000000000..ec98e9dac +--- /dev/null ++++ b/dirsrvtests/tests/suites/replication/rfc2307compat.py +@@ -0,0 +1,174 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2020 Red Hat, Inc. ++# Copyright (C) 2020 William Brown ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import pytest ++from lib389.replica import Replicas ++from lib389.tasks import * ++from lib389.utils import * ++from lib389.topologies import topology_m2 as topo_m2 ++from . import get_repl_entries ++from lib389.idm.user import UserAccount ++from lib389.replica import ReplicationManager ++from lib389._constants import * ++ ++pytestmark = pytest.mark.tier0 ++ ++TEST_ENTRY_NAME = 'mmrepl_test' ++TEST_ENTRY_DN = 'uid={},{}'.format(TEST_ENTRY_NAME, DEFAULT_SUFFIX) ++NEW_SUFFIX_NAME = 'test_repl' ++NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) ++NEW_BACKEND = 'repl_base' ++ ++DEBUGGING = os.getenv("DEBUGGING", default=False) ++if DEBUGGING: ++ logging.getLogger(__name__).setLevel(logging.DEBUG) ++else: ++ logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ ++pytest.mark.skipif(not os.environ.get('UNSAFE_ACK', False), reason="UNSAFE tests may damage system configuration.") ++def test_rfc2307compat(topo_m2): ++ """ Test to verify if 10rfc2307compat.ldif does not prevent replication of schema ++ - Create 2 masters and a test entry ++ - Move 10rfc2307compat.ldif to be private to M1 ++ - Move 10rfc2307.ldif to be private to M2 ++ - Add 'objectCategory' to the schema of M1 ++ - Force a replication session ++ - Check 'objectCategory' on M1 and M2 ++ """ ++ m1 = topo_m2.ms["master1"] ++ m2 = topo_m2.ms["master2"] ++ ++ m1.config.loglevel(vals=(ErrorLog.DEFAULT, ErrorLog.REPLICA)) ++ m2.config.loglevel(vals=(ErrorLog.DEFAULT, ErrorLog.REPLICA)) ++ ++ m1.add_s(Entry(( ++ TEST_ENTRY_DN, { ++ "objectClass": "top", ++ "objectClass": "extensibleObject", ++ 'uid': TEST_ENTRY_NAME, ++ 'cn': TEST_ENTRY_NAME, ++ 'sn': TEST_ENTRY_NAME, ++ } ++ ))) ++ ++ entries = get_repl_entries(topo_m2, TEST_ENTRY_NAME, ["uid"]) ++ assert all(entries), "Entry {} wasn't replicated successfully".format(TEST_ENTRY_DN) ++ ++ # Clean the old locations (if any) ++ m1_temp_schema = os.path.join(m1.get_config_dir(), 'schema') ++ m2_temp_schema = os.path.join(m2.get_config_dir(), 'schema') ++ m1_schema = os.path.join(m1.get_data_dir(), 'dirsrv/schema') ++ m1_opt_schema = os.path.join(m1.get_data_dir(), 'dirsrv/data') ++ m1_temp_backup = os.path.join(m1.get_tmp_dir(), 'schema') ++ ++ # Does the system schema exist? ++ if os.path.islink(m1_schema): ++ # Then we need to put the m1 schema back. ++ os.unlink(m1_schema) ++ shutil.copytree(m1_temp_backup, m1_schema) ++ if not os.path.exists(m1_temp_backup): ++ shutil.copytree(m1_schema, m1_temp_backup) ++ ++ shutil.rmtree(m1_temp_schema, ignore_errors=True) ++ shutil.rmtree(m2_temp_schema, ignore_errors=True) ++ ++ # Build a new copy ++ shutil.copytree(m1_schema, m1_temp_schema) ++ shutil.copytree(m1_schema, m2_temp_schema) ++ # Ensure 99user.ldif exists ++ with open(os.path.join(m1_temp_schema, '99user.ldif'), 'w') as f: ++ f.write('dn: cn=schema') ++ ++ with open(os.path.join(m2_temp_schema, '99user.ldif'), 'w') as f: ++ f.write('dn: cn=schema') ++ ++ # m1 has compat, m2 has legacy. ++ os.unlink(os.path.join(m2_temp_schema, '10rfc2307compat.ldif')) ++ shutil.copy(os.path.join(m1_opt_schema, '10rfc2307.ldif'), m2_temp_schema) ++ ++ # Configure the instances ++ # m1.config.replace('nsslapd-schemadir', m1_temp_schema) ++ # m2.config.replace('nsslapd-schemadir', m2_temp_schema) ++ ++ # Now mark the system schema as empty. ++ shutil.rmtree(m1_schema) ++ os.symlink('/var/lib/empty', m1_schema) ++ ++ print("SETUP COMPLETE -->") ++ ++ # Stop all instances ++ m1.stop() ++ m2.stop() ++ ++ # udpate the schema on M1 to tag a schemacsn ++ m1.start() ++ objectcategory_attr = '( NAME \'objectCategory\' DESC \'test of objectCategory\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 )' ++ m1.schema.add_schema('attributetypes', [ensure_bytes(objectcategory_attr)]) ++ ++ # Now start M2 and trigger a replication M1->M2 ++ m2.start() ++ m1.modify_s(TEST_ENTRY_DN, [(ldap.MOD_ADD, 'cn', [ensure_bytes('value_m1')])]) ++ ++ # Now check that objectCategory is in both schema ++ time.sleep(10) ++ ents = m1.search_s("cn=schema", ldap.SCOPE_SUBTREE, 'objectclass=*',['attributetypes']) ++ for value in ents[0].getValues('attributetypes'): ++ if ensure_bytes('objectCategory') in value: ++ log.info("M1: " + str(value)) ++ break ++ assert ensure_bytes('objectCategory') in value ++ ++ ents = m2.search_s("cn=schema", ldap.SCOPE_SUBTREE, 'objectclass=*',['attributetypes']) ++ for value in ents[0].getValues('attributetypes'): ++ if ensure_bytes('objectCategory') in value: ++ log.info("M2: " + str(value)) ++ break ++ assert ensure_bytes('objectCategory') in value ++ ++ # Stop m2 ++ m2.stop() ++ ++ # "Update" it's schema, ++ os.unlink(os.path.join(m2_temp_schema, '10rfc2307.ldif')) ++ shutil.copy(os.path.join(m1_temp_backup, '10rfc2307compat.ldif'), m2_temp_schema) ++ ++ # Add some more to m1 ++ objectcategory_attr = '( NAME \'objectCategoryX\' DESC \'test of objectCategoryX\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 )' ++ m1.schema.add_schema('attributetypes', [ensure_bytes(objectcategory_attr)]) ++ ++ # Start m2. ++ m2.start() ++ m1.modify_s(TEST_ENTRY_DN, [(ldap.MOD_ADD, 'cn', [ensure_bytes('value_m2')])]) ++ ++ time.sleep(10) ++ ents = m1.search_s("cn=schema", ldap.SCOPE_SUBTREE, 'objectclass=*',['attributetypes']) ++ for value in ents[0].getValues('attributetypes'): ++ if ensure_bytes('objectCategoryX') in value: ++ log.info("M1: " + str(value)) ++ break ++ assert ensure_bytes('objectCategoryX') in value ++ ++ ents = m2.search_s("cn=schema", ldap.SCOPE_SUBTREE, 'objectclass=*',['attributetypes']) ++ for value in ents[0].getValues('attributetypes'): ++ if ensure_bytes('objectCategoryX') in value: ++ log.info("M2: " + str(value)) ++ break ++ assert ensure_bytes('objectCategoryX') in value ++ ++ # Success cleanup ++ os.unlink(m1_schema) ++ shutil.copytree(m1_temp_backup, m1_schema) ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) +diff --git a/dirsrvtests/tests/suites/roles/__init__.py b/dirsrvtests/tests/suites/roles/__init__.py +new file mode 100644 +index 000000000..1981985fb +--- /dev/null ++++ b/dirsrvtests/tests/suites/roles/__init__.py +@@ -0,0 +1,3 @@ ++""" ++ :Requirement: 389-ds-base: Roles ++""" +diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py +index 3f1b7568c..47a531794 100644 +--- a/dirsrvtests/tests/suites/roles/basic_test.py ++++ b/dirsrvtests/tests/suites/roles/basic_test.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2019 Red Hat, Inc. ++# Copyright (C) 2020 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -36,18 +36,19 @@ FILTERROLEENGROLE = "cn=FILTERROLEENGROLE,{}".format(DNBASE) + + + def test_filterrole(topo): +- ''' +- :id: 8ada4064-786b-11e8-8634-8c16451d917b +- :setup: server +- :steps: +- 1. Add test entry +- 2. Add ACI +- 3. Search nsconsole role +- :expectedresults: +- 1. Entry should be added +- 2. Operation should succeed +- 3. Operation should succeed +- ''' ++ """Test Filter Role ++ ++ :id: 8ada4064-786b-11e8-8634-8c16451d917b ++ :setup: server ++ :steps: ++ 1. Add test entry ++ 2. Add ACI ++ 3. Search nsconsole role ++ :expectedresults: ++ 1. Entry should be added ++ 2. Operation should succeed ++ 3. Operation should succeed ++ """ + Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX) + properties = { + 'ou': 'eng', +@@ -137,18 +138,19 @@ def test_filterrole(topo): + + + def test_managedrole(topo): +- ''' +- :id: d52a9c00-3bf6-11e9-9b7b-8c16451d917b +- :setup: server +- :steps: +- 1. Add test entry +- 2. Add ACI +- 3. Search managed role entries +- :expectedresults: +- 1. Entry should be added +- 2. Operation should succeed +- 3. Operation should succeed +- ''' ++ """Test Managed Role ++ ++ :id: d52a9c00-3bf6-11e9-9b7b-8c16451d917b ++ :setup: server ++ :steps: ++ 1. Add test entry ++ 2. Add ACI ++ 3. Search managed role entries ++ :expectedresults: ++ 1. Entry should be added ++ 2. Operation should succeed ++ 3. Operation should succeed ++ """ + # Create Managed role entry + roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) + role = roles.create(properties={"cn": 'ROLE1'}) +@@ -184,8 +186,12 @@ def test_managedrole(topo): + + # Set an aci that will deny ROLE1 manage role + Domain(topo.standalone, DEFAULT_SUFFIX).\ +- add('aci', '(targetattr=*)(version 3.0; aci "role aci";' ++ add('aci', '(targetattr="*")(version 3.0; aci "role aci";' + ' deny(all) roledn="ldap:///{}";)'.format(role.dn),) ++ # Add self user modification and anonymous aci ++ ANON_ACI = "(targetattr=\"*\")(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare) userdn = \"ldap:///anyone\";)" ++ suffix = Domain(topo.standalone, DEFAULT_SUFFIX) ++ suffix.add('aci', ANON_ACI) + + # Crate a connection with cn=Fail which is member of ROLE1 + conn = UserAccount(topo.standalone, "uid=Fail,{}".format(DEFAULT_SUFFIX)).bind(PW_DM) +@@ -232,17 +238,18 @@ def _final(request, topo): + + + def test_nestedrole(topo, _final): +- """ +- :id: 867b40c0-7fcf-4332-afc7-bd01025b77f2 +- :setup: Standalone server +- :steps: +- 1. Add test entry +- 2. Add ACI +- 3. Search managed role entries +- :expectedresults: +- 1. Entry should be added +- 2. Operation should succeed +- 3. Operation should succeed ++ """Test Nested Role ++ ++ :id: 867b40c0-7fcf-4332-afc7-bd01025b77f2 ++ :setup: Standalone server ++ :steps: ++ 1. Add test entry ++ 2. Add ACI ++ 3. Search managed role entries ++ :expectedresults: ++ 1. Entry should be added ++ 2. Operation should succeed ++ 3. Operation should succeed + """ + # Create Managed role entry + managed_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) +@@ -271,7 +278,7 @@ def test_nestedrole(topo, _final): + + # Create a ACI with deny access to nested role entry + Domain(topo.standalone, DEFAULT_SUFFIX).\ +- add('aci', f'(targetattr=*)(version 3.0; aci ' ++ add('aci', f'(targetattr="*")(version 3.0; aci ' + f'"role aci"; deny(all) roledn="ldap:///{nested_role.dn}";)') + + # Create connection with 'uid=test_user_1,ou=People,dc=example,dc=com' member of managed_role1 +diff --git a/dirsrvtests/tests/suites/sasl/regression_test.py b/dirsrvtests/tests/suites/sasl/regression_test.py +index 2db76ce98..58ff9a225 100644 +--- a/dirsrvtests/tests/suites/sasl/regression_test.py ++++ b/dirsrvtests/tests/suites/sasl/regression_test.py +@@ -1,15 +1,14 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2016 Red Hat, Inc. ++# Copyright (C) 2020 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). + # See LICENSE for details. + # --- END COPYRIGHT BLOCK --- + # +-import base64 ++ + import os + import pytest +-import subprocess + from lib389.tasks import * + from lib389.utils import * + from lib389.topologies import topology_m2 +@@ -48,7 +47,7 @@ def check_pems(confdir, mycacert, myservercert, myserverkey, notexist): + log.info("\n######################### Check PEM files (%s, %s, %s)%s in %s ######################\n" + % (mycacert, myservercert, myserverkey, notexist, confdir)) + global cacert +- cacert = '%s/%s.pem' % (confdir, mycacert) ++ cacert = f"{mycacert}.pem" + if os.path.isfile(cacert): + if notexist == "": + log.info('%s is successfully generated.' % cacert) +@@ -61,7 +60,7 @@ def check_pems(confdir, mycacert, myservercert, myserverkey, notexist): + assert False + else: + log.info('%s is correctly not generated.' % cacert) +- servercert = '%s/%s.pem' % (confdir, myservercert) ++ servercert = f"{myservercert}.pem" + if os.path.isfile(servercert): + if notexist == "": + log.info('%s is successfully generated.' % servercert) +@@ -74,7 +73,7 @@ def check_pems(confdir, mycacert, myservercert, myserverkey, notexist): + assert False + else: + log.info('%s is correctly not generated.' % servercert) +- serverkey = '%s/%s.pem' % (confdir, myserverkey) ++ serverkey = f"{myserverkey}.pem" + if os.path.isfile(serverkey): + if notexist == "": + log.info('%s is successfully generated.' % serverkey) +@@ -91,16 +90,16 @@ def check_pems(confdir, mycacert, myservercert, myserverkey, notexist): + + def relocate_pem_files(topology_m2): + log.info("######################### Relocate PEM files on master1 ######################") +- mycacert = 'MyCA' ++ certdir_prefix = "/dev/shm" ++ mycacert = os.path.join(certdir_prefix, "MyCA") + topology_m2.ms["master1"].encryption.set('CACertExtractFile', mycacert) +- myservercert = 'MyServerCert1' +- myserverkey = 'MyServerKey1' ++ myservercert = os.path.join(certdir_prefix, "MyServerCert1") ++ myserverkey = os.path.join(certdir_prefix, "MyServerKey1") + topology_m2.ms["master1"].rsa.apply_mods([(ldap.MOD_REPLACE, 'ServerCertExtractFile', myservercert), + (ldap.MOD_REPLACE, 'ServerKeyExtractFile', myserverkey)]) + log.info("##### restart master1") + topology_m2.ms["master1"].restart() +- m1confdir = topology_m2.ms["master1"].confdir +- check_pems(m1confdir, mycacert, myservercert, myserverkey, "") ++ check_pems(certdir_prefix, mycacert, myservercert, myserverkey, "") + + @pytest.mark.ds47536 + def test_openldap_no_nss_crypto(topology_m2): +diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/__init__.py b/dirsrvtests/tests/suites/syncrepl_plugin/__init__.py +new file mode 100644 +index 000000000..699d58f79 +--- /dev/null ++++ b/dirsrvtests/tests/suites/syncrepl_plugin/__init__.py +@@ -0,0 +1,163 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2020 William Brown ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++ ++import logging ++import ldap ++import time ++from ldap.syncrepl import SyncreplConsumer ++import pytest ++from lib389 import DirSrv ++from lib389.idm.user import nsUserAccounts, UserAccounts ++from lib389.topologies import topology_st as topology ++from lib389.paths import Paths ++from lib389.utils import ds_is_older ++from lib389.plugins import RetroChangelogPlugin, ContentSynchronizationPlugin ++from lib389._constants import * ++ ++log = logging.getLogger(__name__) ++ ++class ISyncRepl(DirSrv, SyncreplConsumer): ++ """ ++ This implements a test harness for checking syncrepl, and allowing us to check various actions or ++ behaviours. During a "run" it stores the results in it's instance, so that they can be inspected ++ later to ensure that syncrepl worked as expected. ++ """ ++ def __init__(self, inst, openldap=False): ++ self.inst = inst ++ self.msgid = None ++ ++ self.last_cookie = None ++ self.next_cookie = None ++ self.cookie = None ++ self.openldap = openldap ++ if self.openldap: ++ # In openldap mode, our initial cookie needs to be a rid. ++ self.cookie = "rid=123" ++ self.delete = [] ++ self.present = [] ++ self.entries = {} ++ ++ super().__init__() ++ ++ def result4(self, *args, **kwargs): ++ return self.inst.result4(*args, **kwargs, escapehatch='i am sure') ++ ++ def search_ext(self, *args, **kwargs): ++ return self.inst.search_ext(*args, **kwargs, escapehatch='i am sure') ++ ++ def syncrepl_search(self, base=DEFAULT_SUFFIX, scope=ldap.SCOPE_SUBTREE, mode='refreshOnly', cookie=None, **search_args): ++ # Wipe the last result set. ++ self.delete = [] ++ self.present = [] ++ self.entries = {} ++ self.next_cookie = None ++ # Start the sync ++ # If cookie is none, will call "get_cookie" we have. ++ self.msgid = super().syncrepl_search(base, scope, mode, cookie, **search_args) ++ log.debug(f'syncrepl_search -> {self.msgid}') ++ assert self.msgid is not None ++ ++ def syncrepl_complete(self): ++ log.debug(f'syncrepl_complete -> {self.msgid}') ++ assert self.msgid is not None ++ # Loop until the operation is complete. ++ while super().syncrepl_poll(msgid=self.msgid) is True: ++ pass ++ assert self.next_cookie is not None ++ self.last_cookie = self.cookie ++ self.cookie = self.next_cookie ++ ++ def check_cookie(self): ++ assert self.last_cookie != self.cookie ++ ++ def syncrepl_set_cookie(self, cookie): ++ log.debug(f'set_cookie -> {cookie}') ++ if self.openldap: ++ assert self.cookie.startswith("rid=123") ++ self.next_cookie = cookie ++ ++ def syncrepl_get_cookie(self): ++ log.debug('get_cookie -> %s' % self.cookie) ++ if self.openldap: ++ assert self.cookie.startswith("rid=123") ++ return self.cookie ++ ++ def syncrepl_present(self, uuids, refreshDeletes=False): ++ log.debug(f'=====> refdel -> {refreshDeletes} uuids -> {uuids}') ++ if uuids is not None: ++ self.present = self.present + uuids ++ ++ def syncrepl_delete(self, uuids): ++ log.debug(f'delete -> {uuids}') ++ self.delete = uuids ++ ++ def syncrepl_entry(self, dn, attrs, uuid): ++ log.debug(f'entry -> {dn}') ++ self.entries[dn] = (uuid, attrs) ++ ++ def syncrepl_refreshdone(self): ++ log.debug('refreshdone') ++ ++def syncstate_assert(st, sync): ++ # How many entries do we have? ++ r = st.search_ext_s( ++ base=DEFAULT_SUFFIX, ++ scope=ldap.SCOPE_SUBTREE, ++ filterstr='(objectClass=*)', ++ attrsonly=1, ++ escapehatch='i am sure' ++ ) ++ ++ # Initial sync ++ log.debug("*test* initial") ++ sync.syncrepl_search() ++ sync.syncrepl_complete() ++ # check we caught them all ++ assert len(r) == len(sync.entries.keys()) ++ assert len(r) == len(sync.present) ++ assert 0 == len(sync.delete) ++ ++ # Add a new entry ++ ++ account = nsUserAccounts(st, DEFAULT_SUFFIX).create_test_user() ++ # Check ++ log.debug("*test* add") ++ sync.syncrepl_search() ++ sync.syncrepl_complete() ++ sync.check_cookie() ++ assert 1 == len(sync.entries.keys()) ++ assert 1 == len(sync.present) ++ assert 0 == len(sync.delete) ++ ++ # Mod ++ account.replace('description', 'change') ++ # Check ++ log.debug("*test* mod") ++ sync.syncrepl_search() ++ sync.syncrepl_complete() ++ sync.check_cookie() ++ assert 1 == len(sync.entries.keys()) ++ assert 1 == len(sync.present) ++ assert 0 == len(sync.delete) ++ ++ ## Delete ++ account.delete() ++ ++ # Check ++ log.debug("*test* del") ++ sync.syncrepl_search() ++ sync.syncrepl_complete() ++ # In a delete, the cookie isn't updated (?) ++ sync.check_cookie() ++ log.debug(f'{sync.entries.keys()}') ++ log.debug(f'{sync.present}') ++ log.debug(f'{sync.delete}') ++ assert 0 == len(sync.entries.keys()) ++ assert 0 == len(sync.present) ++ assert 1 == len(sync.delete) ++ +diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py +index 7b35537d5..64b7425a5 100644 +--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py ++++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py +@@ -20,7 +20,7 @@ from lib389.idm.group import Groups + from lib389.topologies import topology_st as topology + from lib389.paths import Paths + from lib389.utils import ds_is_older +-from lib389.plugins import RetroChangelogPlugin, ContentSyncPlugin, AutoMembershipPlugin, MemberOfPlugin, MemberOfSharedConfig, AutoMembershipDefinitions, MEPTemplates, MEPConfigs, ManagedEntriesPlugin, MEPTemplate ++from lib389.plugins import RetroChangelogPlugin, ContentSynchronizationPlugin, AutoMembershipPlugin, MemberOfPlugin, MemberOfSharedConfig, AutoMembershipDefinitions, MEPTemplates, MEPConfigs, ManagedEntriesPlugin, MEPTemplate + from lib389._constants import * + + from . import ISyncRepl, syncstate_assert +@@ -54,7 +54,7 @@ def test_syncrepl_basic(topology): + # Set the default targetid + rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') + # Enable sync repl +- csp = ContentSyncPlugin(st) ++ csp = ContentSynchronizationPlugin(st) + csp.enable() + # Restart DS + st.restart() +@@ -176,7 +176,7 @@ def test_sync_repl_mep(topology, request): + plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') + + # Enable sync plugin +- plugin = ContentSyncPlugin(inst) ++ plugin = ContentSynchronizationPlugin(inst) + plugin.enable() + + # Check the plug-in status +@@ -232,6 +232,8 @@ def test_sync_repl_mep(topology, request): + prev = int(cookie) + sync_repl.join() + log.info('test_sync_repl_map: PASS\n') ++ inst.start() ++ + + def test_sync_repl_cookie(topology, request): + """Test sync_repl cookie are progressing is an increasing order +@@ -240,33 +242,33 @@ def test_sync_repl_cookie(topology, request): + :id: d7fbde25-5702-46ac-b38e-169d7a68e97c + :setup: Standalone Instance + :steps: +- 1.: enable retroCL +- 2.: configure retroCL to log nsuniqueid as targetUniqueId +- 3.: enable content_sync plugin +- 4.: enable automember +- 5.: create (2) groups. Few groups can help to reproduce the concurrent updates problem. +- 6.: configure automember to provision those groups with 'member' +- 7.: enable and configure memberof plugin +- 8.: enable plugin log level +- 9.: restart the server +- 10.: create a thread dedicated to run a sync repl client +- 11.: Create (9) users that will generate nested updates (automember/memberof) +- 12.: stop sync repl client and collect the list of cookie.change_no +- 13.: check that cookies.change_no are in increasing order ++ 1. enable retroCL ++ 2. configure retroCL to log nsuniqueid as targetUniqueId ++ 3. enable content_sync plugin ++ 4. enable automember ++ 5. create (2) groups. Few groups can help to reproduce the concurrent updates problem. ++ 6. configure automember to provision those groups with 'member' ++ 7. enable and configure memberof plugin ++ 8. enable plugin log level ++ 9. restart the server ++ 10. create a thread dedicated to run a sync repl client ++ 11. Create (9) users that will generate nested updates (automember/memberof) ++ 12. stop sync repl client and collect the list of cookie.change_no ++ 13. check that cookies.change_no are in increasing order + :expectedresults: +- 1.: succeeds +- 2.: succeeds +- 3.: succeeds +- 4.: succeeds +- 5.: succeeds +- 6.: succeeds +- 7.: succeeds +- 8.: succeeds +- 9.: succeeds +- 10.: succeeds +- 11.: succeeds +- 12.: succeeds +- 13.: succeeds ++ 1. succeeds ++ 2. succeeds ++ 3. succeeds ++ 4. succeeds ++ 5. succeeds ++ 6. succeeds ++ 7. succeeds ++ 8. succeeds ++ 9. succeeds ++ 10. succeeds ++ 11. succeeds ++ 12. succeeds ++ 13. succeeds + """ + inst = topology[0] + +@@ -277,7 +279,7 @@ def test_sync_repl_cookie(topology, request): + plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') + + # Enable sync plugin +- plugin = ContentSyncPlugin(inst) ++ plugin = ContentSynchronizationPlugin(inst) + plugin.enable() + + # Enable automember +@@ -409,7 +411,7 @@ def test_sync_repl_cookie_add_del(topology, request): + plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') + + # Enable sync plugin +- plugin = ContentSyncPlugin(inst) ++ plugin = ContentSynchronizationPlugin(inst) + plugin.enable() + + # Enable automember +@@ -541,7 +543,7 @@ def test_sync_repl_cookie_with_failure(topology, request): + plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') + + # Enable sync plugin +- plugin = ContentSyncPlugin(inst) ++ plugin = ContentSynchronizationPlugin(inst) + plugin.enable() + + # Enable automember +diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py +index 646cd97ba..2e1637a21 100644 +--- a/dirsrvtests/tests/suites/vlv/regression_test.py ++++ b/dirsrvtests/tests/suites/vlv/regression_test.py +@@ -84,8 +84,8 @@ def test_bulk_import_when_the_backend_with_vlv_was_recreated(topology_m2): + MappingTrees(M2).list()[0].delete() + Backends(M2).list()[0].delete() + # Recreate the backend and the VLV index on Master 2. +- M2.mappingtree.create(DEFAULT_SUFFIX, "userRoot") + M2.backend.create(DEFAULT_SUFFIX, {BACKEND_NAME: "userRoot"}) ++ M2.mappingtree.create(DEFAULT_SUFFIX, "userRoot") + # Recreating vlvSrchDn and vlvIndexDn on Master 2. + vlv_searches.create( + basedn="cn=userRoot,cn=ldbm database,cn=plugins,cn=config", +-- +2.26.2 + diff --git a/SOURCES/0033-Issue-5442-Search-results-are-different-between-RHDS.patch b/SOURCES/0033-Issue-5442-Search-results-are-different-between-RHDS.patch new file mode 100644 index 0000000..362f5f3 --- /dev/null +++ b/SOURCES/0033-Issue-5442-Search-results-are-different-between-RHDS.patch @@ -0,0 +1,782 @@ +From 788d7c69a446d1ae324b2c58daaa5d4fd5528748 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 20 Jan 2021 16:42:15 -0500 +Subject: [PATCH 1/3] Issue 5442 - Search results are different between RHDS10 + and RHDS11 + +Bug Description: In 1.4.x we introduced a change that was overly strict about + how a search on a non-existent subtree returned its error code. + It was changed from returning an error 32 to an error 0 with + zero entries returned. + +Fix Description: When finding the entry and processing acl's make sure to + gather the aci's that match the resource even if the resource + does not exist. This requires some extra checks when processing + the target attribute. + +relates: https://github.com/389ds/389-ds-base/issues/4542 + +Reviewed by: firstyear, elkris, and tbordaz (Thanks!) + +Apply Thierry's changes + +round 2 + +Apply more suggestions from Thierry +--- + dirsrvtests/tests/suites/acl/misc_test.py | 108 +++++++- + ldap/servers/plugins/acl/acl.c | 296 ++++++++++------------ + ldap/servers/slapd/back-ldbm/findentry.c | 6 +- + src/lib389/lib389/_mapped_object.py | 4 +- + 4 files changed, 239 insertions(+), 175 deletions(-) + +diff --git a/dirsrvtests/tests/suites/acl/misc_test.py b/dirsrvtests/tests/suites/acl/misc_test.py +index 5f0e3eb72..c640e60ad 100644 +--- a/dirsrvtests/tests/suites/acl/misc_test.py ++++ b/dirsrvtests/tests/suites/acl/misc_test.py +@@ -12,7 +12,7 @@ import ldap + import os + import pytest + +-from lib389._constants import DEFAULT_SUFFIX, PW_DM ++from lib389._constants import DEFAULT_SUFFIX, PW_DM, DN_DM + from lib389.idm.user import UserAccount, UserAccounts + from lib389._mapped_object import DSLdapObject + from lib389.idm.account import Accounts, Anonymous +@@ -408,14 +408,112 @@ def test_do_bind_as_201_distinct_users(topo, clean, aci_of_user): + user = uas.create_test_user(uid=i, gid=i) + user.set('userPassword', PW_DM) + +- for i in range(len(uas.list())): +- uas.list()[i].bind(PW_DM) ++ users = uas.list() ++ for user in users: ++ user.bind(PW_DM) + + ACLPlugin(topo.standalone).replace("nsslapd-aclpb-max-selected-acls", '220') + topo.standalone.restart() + +- for i in range(len(uas.list())): +- uas.list()[i].bind(PW_DM) ++ users = uas.list() ++ for user in users: ++ user.bind(PW_DM) ++ ++ ++def test_info_disclosure(request, topo): ++ """Test that a search returns 32 when base entry does not exist ++ ++ :id: f6dec4c2-65a3-41e4-a4c0-146196863333 ++ :setup: Standalone Instance ++ :steps: ++ 1. Add aci ++ 2. Add test user ++ 3. Bind as user and search for non-existent entry ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Error 32 is returned ++ """ ++ ++ ACI_TARGET = "(targetattr = \"*\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ++ ACI_ALLOW = "(version 3.0; acl \"Read/Search permission for all users\"; allow (read,search)" ++ ACI_SUBJECT = "(userdn=\"ldap:///all\");)" ++ ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT ++ ++ # Get current ACi's so we can restore them when we are done ++ suffix = Domain(topo.standalone, DEFAULT_SUFFIX) ++ preserved_acis = suffix.get_attr_vals_utf8('aci') ++ ++ def finofaci(): ++ domain = Domain(topo.standalone, DEFAULT_SUFFIX) ++ try: ++ domain.remove_all('aci') ++ domain.replace_values('aci', preserved_acis) ++ except: ++ pass ++ request.addfinalizer(finofaci) ++ ++ # Remove aci's ++ suffix.remove_all('aci') ++ ++ # Add test user ++ USER_DN = "uid=test,ou=people," + DEFAULT_SUFFIX ++ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) ++ users.create(properties={ ++ 'uid': 'test', ++ 'cn': 'test', ++ 'sn': 'test', ++ 'uidNumber': '1000', ++ 'gidNumber': '2000', ++ 'homeDirectory': '/home/test', ++ 'userPassword': PW_DM ++ }) ++ ++ # bind as user ++ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM) ++ ++ # Search fo existing base DN ++ test = Domain(conn, DEFAULT_SUFFIX) ++ try: ++ test.get_attr_vals_utf8_l('dc') ++ assert False ++ except IndexError: ++ pass ++ ++ # Search for a non existent bases ++ subtree = Domain(conn, "ou=does_not_exist," + DEFAULT_SUFFIX) ++ try: ++ subtree.get_attr_vals_utf8_l('objectclass') ++ except IndexError: ++ pass ++ subtree = Domain(conn, "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX) ++ try: ++ subtree.get_attr_vals_utf8_l('objectclass') ++ except IndexError: ++ pass ++ # Try ONE level search instead of BASE ++ try: ++ Accounts(conn, "ou=does_not_exist," + DEFAULT_SUFFIX).filter("(objectclass=top)", ldap.SCOPE_ONELEVEL) ++ except IndexError: ++ pass ++ ++ # add aci ++ suffix.add('aci', ACI) ++ ++ # Search for a non existent entry which should raise an exception ++ with pytest.raises(ldap.NO_SUCH_OBJECT): ++ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM) ++ subtree = Domain(conn, "ou=does_not_exist," + DEFAULT_SUFFIX) ++ subtree.get_attr_vals_utf8_l('objectclass') ++ with pytest.raises(ldap.NO_SUCH_OBJECT): ++ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM) ++ subtree = Domain(conn, "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX) ++ subtree.get_attr_vals_utf8_l('objectclass') ++ with pytest.raises(ldap.NO_SUCH_OBJECT): ++ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM) ++ DN = "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX ++ Accounts(conn, DN).filter("(objectclass=top)", ldap.SCOPE_ONELEVEL, strict=True) ++ + + if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) +diff --git a/ldap/servers/plugins/acl/acl.c b/ldap/servers/plugins/acl/acl.c +index 41a909a18..4e811f73a 100644 +--- a/ldap/servers/plugins/acl/acl.c ++++ b/ldap/servers/plugins/acl/acl.c +@@ -2111,10 +2111,11 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + aci_right = aci->aci_access; + res_right = aclpb->aclpb_access; + if (!(aci_right & res_right)) { +- /* If we are looking for read/search and the acl has read/search +- ** then go further because if targets match we may keep that +- ** acl in the entry cache list. +- */ ++ /* ++ * If we are looking for read/search and the acl has read/search ++ * then go further because if targets match we may keep that ++ * acl in the entry cache list. ++ */ + if (!((res_right & (SLAPI_ACL_SEARCH | SLAPI_ACL_READ)) && + (aci_right & (SLAPI_ACL_SEARCH | SLAPI_ACL_READ)))) { + matches = ACL_FALSE; +@@ -2122,30 +2123,29 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + } + } + +- +- /* first Let's see if the entry is under the subtree where the +- ** ACL resides. We can't let somebody affect a target beyond the +- ** scope of where the ACL resides +- ** Example: ACL is located in "ou=engineering, o=ace industry, c=us +- ** but if the target is "o=ace industry, c=us", then we are in trouble. +- ** +- ** If the aci is in the rootdse and the entry is not, then we do not +- ** match--ie. acis in the rootdse do NOT apply below...for the moment. +- ** +- */ ++ /* ++ * First Let's see if the entry is under the subtree where the ++ * ACL resides. We can't let somebody affect a target beyond the ++ * scope of where the ACL resides ++ * Example: ACL is located in "ou=engineering, o=ace industry, c=us ++ * but if the target is "o=ace industry, c=us", then we are in trouble. ++ * ++ * If the aci is in the rootdse and the entry is not, then we do not ++ * match--ie. acis in the rootdse do NOT apply below...for the moment. ++ */ + res_ndn = slapi_sdn_get_ndn(aclpb->aclpb_curr_entry_sdn); + aci_ndn = slapi_sdn_get_ndn(aci->aci_sdn); +- if (!slapi_sdn_issuffix(aclpb->aclpb_curr_entry_sdn, aci->aci_sdn) || (!slapi_is_rootdse(res_ndn) && slapi_is_rootdse(aci_ndn))) { +- +- /* cant' poke around */ ++ if (!slapi_sdn_issuffix(aclpb->aclpb_curr_entry_sdn, aci->aci_sdn) || ++ (!slapi_is_rootdse(res_ndn) && slapi_is_rootdse(aci_ndn))) ++ { ++ /* can't poke around */ + matches = ACL_FALSE; + goto acl__resource_match_aci_EXIT; + } + + /* +- ** We have a single ACI which we need to find if it applies to +- ** the resource or not. +- */ ++ * We have a single ACI which we need to find if it applies to the resource or not. ++ */ + if ((aci->aci_type & ACI_TARGET_DN) && (aclpb->aclpb_curr_entry_sdn)) { + char *avaType; + struct berval *avaValue; +@@ -2173,25 +2173,23 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + char *avaType; + struct berval *avaValue; + char logbuf[1024]; +- +- /* We are evaluating the moddn permission. +- * The aci contains target_to and target_from +- * +- * target_to filter must be checked against the resource ndn that was stored in +- * aclpb->aclpb_curr_entry_sdn +- * +- * target_from filter must be check against the entry ndn that is in aclpb->aclpb_moddn_source_sdn +- * (sdn was stored in the pblock) +- */ ++ /* ++ * We are evaluating the moddn permission. ++ * The aci contains target_to and target_from ++ * ++ * target_to filter must be checked against the resource ndn that was stored in ++ * aclpb->aclpb_curr_entry_sdn ++ * ++ * target_from filter must be check against the entry ndn that is in aclpb->aclpb_moddn_source_sdn ++ * (sdn was stored in the pblock) ++ */ + if (aci->target_to) { + f = aci->target_to; + dn_matched = ACL_TRUE; + + /* Now check if the filter is a simple or substring filter */ + if (aci->aci_type & ACI_TARGET_MODDN_TO_PATTERN) { +- /* This is a filter with substring +- * e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com +- */ ++ /* This is a filter with substring e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com */ + slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_to substring: %s\n", + slapi_filter_to_string(f, logbuf, sizeof(logbuf))); + if ((rv = acl_match_substring(f, (char *)res_ndn, 0 /* match suffix */)) != ACL_TRUE) { +@@ -2204,9 +2202,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + } + } + } else { +- /* This is a filter without substring +- * e.g. ldap:///cn=accounts,dc=example,dc=com +- */ ++ /* This is a filter without substring e.g. ldap:///cn=accounts,dc=example,dc=com */ + slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_to: %s\n", + slapi_filter_to_string(f, logbuf, sizeof(logbuf))); + slapi_filter_get_ava(f, &avaType, &avaValue); +@@ -2230,8 +2226,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + /* Now check if the filter is a simple or substring filter */ + if (aci->aci_type & ACI_TARGET_MODDN_FROM_PATTERN) { + /* This is a filter with substring +- * e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com +- */ ++ * e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com ++ */ + slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_from substring: %s\n", + slapi_filter_to_string(f, logbuf, sizeof(logbuf))); + if ((rv = acl_match_substring(f, (char *)slapi_sdn_get_dn(aclpb->aclpb_moddn_source_sdn), 0 /* match suffix */)) != ACL_TRUE) { +@@ -2243,11 +2239,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + goto acl__resource_match_aci_EXIT; + } + } +- + } else { +- /* This is a filter without substring +- * e.g. ldap:///cn=accounts,dc=example,dc=com +- */ ++ /* This is a filter without substring e.g. ldap:///cn=accounts,dc=example,dc=com */ + slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_from: %s\n", + slapi_filter_to_string(f, logbuf, sizeof(logbuf))); + if (!slapi_dn_issuffix(slapi_sdn_get_dn(aclpb->aclpb_moddn_source_sdn), avaValue->bv_val)) { +@@ -2269,10 +2262,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + } + + if (aci->aci_type & ACI_TARGET_PATTERN) { +- + f = aci->target; + dn_matched = ACL_TRUE; +- + if ((rv = acl_match_substring(f, (char *)res_ndn, 0 /* match suffux */)) != ACL_TRUE) { + dn_matched = ACL_FALSE; + if (rv == ACL_ERR) { +@@ -2296,7 +2287,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + + /* + * Is it a (target="ldap://cn=*,($dn),o=sun.com") kind of thing. +- */ ++ */ + if (aci->aci_type & ACI_TARGET_MACRO_DN) { + /* + * See if the ($dn) component matches the string and +@@ -2306,8 +2297,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + * entry is the same one don't recalculate it-- + * this flag only works for search right now, could + * also optimise for mods by making it work for mods. +- */ +- ++ */ + if ((aclpb->aclpb_res_type & ACLPB_NEW_ENTRY) == 0) { + /* + * Here same entry so just look up the matched value, +@@ -2356,8 +2346,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + * If there is already an entry for this aci in this + * aclpb then remove it--it's an old value for a + * different entry. +- */ +- ++ */ + acl_ht_add_and_freeOld(aclpb->aclpb_macro_ht, + (PLHashNumber)aci->aci_index, + matched_val); +@@ -2381,30 +2370,27 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + } + + /* +- ** Here, if there's a targetfilter field, see if it matches. +- ** +- ** The commented out code below was an erroneous attempt to skip +- ** this test. It is wrong because: 1. you need to store +- ** whether the last test matched or not (you cannot just assume it did) +- ** and 2. It may not be the same aci, so the previous matched +- ** value is a function of the aci. +- ** May be interesting to build such a cache...but no evidence for +- ** for that right now. See Bug 383424. +- ** +- ** +- ** && ((aclpb->aclpb_state & ACLPB_SEARCH_BASED_ON_LIST) || +- ** (aclpb->aclpb_res_type & ACLPB_NEW_ENTRY)) +- */ ++ * Here, if there's a targetfilter field, see if it matches. ++ * ++ * The commented out code below was an erroneous attempt to skip ++ * this test. It is wrong because: 1. you need to store ++ * whether the last test matched or not (you cannot just assume it did) ++ * and 2. It may not be the same aci, so the previous matched ++ * value is a function of the aci. ++ * May be interesting to build such a cache...but no evidence for ++ * for that right now. See Bug 383424. ++ * ++ * ++ * && ((aclpb->aclpb_state & ACLPB_SEARCH_BASED_ON_LIST) || ++ * (aclpb->aclpb_res_type & ACLPB_NEW_ENTRY)) ++ */ + if (aci->aci_type & ACI_TARGET_FILTER) { + int filter_matched = ACL_TRUE; +- + /* + * Check for macros. + * For targetfilter we need to fake the lasinfo structure--it's + * created "naturally" for subjects but not targets. +- */ +- +- ++ */ + if (aci->aci_type & ACI_TARGET_FILTER_MACRO_DN) { + + lasInfo *lasinfo = NULL; +@@ -2419,11 +2405,9 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + ACL_EVAL_TARGET_FILTER); + slapi_ch_free((void **)&lasinfo); + } else { +- +- + if (slapi_vattr_filter_test(NULL, aclpb->aclpb_curr_entry, + aci->targetFilter, +- 0 /*don't do acess chk*/) != 0) { ++ 0 /*don't do access check*/) != 0) { + filter_matched = ACL_FALSE; + } + } +@@ -2450,7 +2434,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + * Check to see if we need to evaluate any targetattrfilters. + * They look as follows: + * (targetattrfilters="add=sn:(sn=rob) && gn:(gn!=byrne), +- * del=sn:(sn=rob) && gn:(gn=byrne)") ++ * del=sn:(sn=rob) && gn:(gn=byrne)") + * + * For ADD/DELETE: + * If theres's a targetattrfilter then each add/del filter +@@ -2458,29 +2442,25 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + * by each value of the attribute in the entry. + * + * For MODIFY: +- * If there's a targetattrfilter then the add/del filter ++ * If there's a targetattrfilter then the add/del filter + * must be satisfied by the attribute to be added/deleted. + * (MODIFY acl is evaluated one value at a time). + * + * +- */ +- ++ */ + if (((aclpb->aclpb_access & SLAPI_ACL_ADD) && + (aci->aci_type & ACI_TARGET_ATTR_ADD_FILTERS)) || + ((aclpb->aclpb_access & SLAPI_ACL_DELETE) && +- (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS))) { +- ++ (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS))) ++ { + Targetattrfilter **attrFilterArray = NULL; +- + Targetattrfilter *attrFilter = NULL; +- + Slapi_Attr *attr_ptr = NULL; + Slapi_Value *sval; + const struct berval *attrVal; + int k; + int done; + +- + if ((aclpb->aclpb_access & SLAPI_ACL_ADD) && + (aci->aci_type & ACI_TARGET_ATTR_ADD_FILTERS)) { + +@@ -2497,28 +2477,20 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + + while (attrFilterArray && attrFilterArray[num_attrs] && attr_matched) { + attrFilter = attrFilterArray[num_attrs]; +- + /* +- * If this filter applies to an attribute in the entry, +- * apply it to the entry. +- * Otherwise just ignore it. +- * +- */ +- +- if (slapi_entry_attr_find(aclpb->aclpb_curr_entry, +- attrFilter->attr_str, +- &attr_ptr) == 0) { +- ++ * If this filter applies to an attribute in the entry, ++ * apply it to the entry. ++ * Otherwise just ignore it. ++ * ++ */ ++ if (slapi_entry_attr_find(aclpb->aclpb_curr_entry, attrFilter->attr_str, &attr_ptr) == 0) { + /* +- * This is an applicable filter. +- * The filter is to be appplied to the entry being added +- * or deleted. +- * The filter needs to be satisfied by _each_ occurence +- * of the attribute in the entry--otherwise you +- * could satisfy the filter and then put loads of other +- * values in on the back of it. +- */ +- ++ * This is an applicable filter. ++ * The filter is to be applied to the entry being added or deleted. ++ * The filter needs to be satisfied by _each_ occurrence of the ++ * attribute in the entry--otherwise you could satisfy the filter ++ * and then put loads of other values in on the back of it. ++ */ + sval = NULL; + attrVal = NULL; + k = slapi_attr_first_value(attr_ptr, &sval); +@@ -2528,12 +2500,11 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + + if (acl__make_filter_test_entry(&aclpb->aclpb_filter_test_entry, + attrFilter->attr_str, +- (struct berval *)attrVal) == LDAP_SUCCESS) { +- ++ (struct berval *)attrVal) == LDAP_SUCCESS) ++ { + attr_matched = acl__test_filter(aclpb->aclpb_filter_test_entry, + attrFilter->filter, +- 1 /* Do filter sense evaluation below */ +- ); ++ 1 /* Do filter sense evaluation below */); + done = !attr_matched; + slapi_entry_free(aclpb->aclpb_filter_test_entry); + } +@@ -2542,19 +2513,19 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + } /* while */ + + /* +- * Here, we applied an applicable filter to the entry. +- * So if attr_matched is ACL_TRUE then every value +- * of the attribute in the entry satisfied the filter. +- * Otherwise, attr_matched is ACL_FALSE and not every +- * value satisfied the filter, so we will teminate the +- * scan of the filter list. +- */ ++ * Here, we applied an applicable filter to the entry. ++ * So if attr_matched is ACL_TRUE then every value ++ * of the attribute in the entry satisfied the filter. ++ * Otherwise, attr_matched is ACL_FALSE and not every ++ * value satisfied the filter, so we will terminate the ++ * scan of the filter list. ++ */ + } + + num_attrs++; + } /* while */ + +-/* ++ /* + * Here, we've applied all the applicable filters to the entry. + * Each one must have been satisfied by all the values of the attribute. + * The result of this is stored in attr_matched. +@@ -2585,7 +2556,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + } else if (((aclpb->aclpb_access & ACLPB_SLAPI_ACL_WRITE_ADD) && + (aci->aci_type & ACI_TARGET_ATTR_ADD_FILTERS)) || + ((aclpb->aclpb_access & ACLPB_SLAPI_ACL_WRITE_DEL) && +- (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS))) { ++ (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS))) ++ { + /* + * Here, it's a modify add/del and we have attr filters. + * So, we need to scan the add/del filter list to find the filter +@@ -2629,11 +2601,10 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + * Otherwise, ignore the targetattrfilters. + */ + if (found) { +- + if (acl__make_filter_test_entry(&aclpb->aclpb_filter_test_entry, + aclpb->aclpb_curr_attrEval->attrEval_name, +- aclpb->aclpb_curr_attrVal) == LDAP_SUCCESS) { +- ++ aclpb->aclpb_curr_attrVal) == LDAP_SUCCESS) ++ { + attr_matched = acl__test_filter(aclpb->aclpb_filter_test_entry, + attrFilter->filter, + 1 /* Do filter sense evaluation below */ +@@ -2651,20 +2622,21 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + * Here this attribute appeared and was matched in a + * targetattrfilters list, so record this fact so we do + * not have to scan the targetattr list for the attribute. +- */ ++ */ + + attr_matched_in_targetattrfilters = 1; + } + } /* targetvaluefilters */ + + +- /* There are 3 cases by which acis are selected. +- ** 1) By scanning the whole list and picking based on the resource. +- ** 2) By picking a subset of the list which will be used for the whole +- ** acl evaluation. +- ** 3) A finer granularity, i.e, a selected list of acls which will be +- ** used for only that entry's evaluation. +- */ ++ /* ++ * There are 3 cases by which acis are selected. ++ * 1) By scanning the whole list and picking based on the resource. ++ * 2) By picking a subset of the list which will be used for the whole ++ * acl evaluation. ++ * 3) A finer granularity, i.e, a selected list of acls which will be ++ * used for only that entry's evaluation. ++ */ + if (!(skip_attrEval) && (aclpb->aclpb_state & ACLPB_SEARCH_BASED_ON_ENTRY_LIST) && + (res_right & SLAPI_ACL_SEARCH) && + ((aci->aci_access & SLAPI_ACL_READ) || (aci->aci_access & SLAPI_ACL_SEARCH))) { +@@ -2680,7 +2652,6 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + } + } + +- + /* If we are suppose to skip attr eval, then let's skip it */ + if ((aclpb->aclpb_access & SLAPI_ACL_SEARCH) && (!skip_attrEval) && + (aclpb->aclpb_res_type & ACLPB_NEW_ENTRY)) { +@@ -2697,9 +2668,10 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + goto acl__resource_match_aci_EXIT; + } + +- /* We need to check again because we don't want to select this handle +- ** if the right doesn't match for now. +- */ ++ /* ++ * We need to check again because we don't want to select this handle ++ * if the right doesn't match for now. ++ */ + if (!(aci_right & res_right)) { + matches = ACL_FALSE; + goto acl__resource_match_aci_EXIT; +@@ -2718,20 +2690,16 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + * rbyrneXXX if we had a proper permission for modrdn eg SLAPI_ACL_MODRDN + * then we would not need this crappy way of telling it was a MODRDN + * request ie. SLAPI_ACL_WRITE && !(c_attrEval). +- */ +- ++ */ + c_attrEval = aclpb->aclpb_curr_attrEval; + + /* + * If we've already matched on targattrfilter then do not + * bother to look at the attrlist. +- */ +- ++ */ + if (!attr_matched_in_targetattrfilters) { +- + /* match target attr */ +- if ((c_attrEval) && +- (aci->aci_type & ACI_TARGET_ATTR)) { ++ if ((c_attrEval) && (aci->aci_type & ACI_TARGET_ATTR)) { + /* there is a target ATTR */ + Targetattr **attrArray = aci->targetAttr; + Targetattr *attr = NULL; +@@ -2773,46 +2741,43 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + matches = (attr_matched ? ACL_TRUE : ACL_FALSE); + } + +- + aclpb->aclpb_state &= ~ACLPB_ATTR_STAR_MATCHED; + /* figure out how it matched, i.e star matched */ +- if (matches && star_matched && num_attrs == 1 && +- !(aclpb->aclpb_state & ACLPB_FOUND_ATTR_RULE)) ++ if (matches && star_matched && num_attrs == 1 && !(aclpb->aclpb_state & ACLPB_FOUND_ATTR_RULE)) { + aclpb->aclpb_state |= ACLPB_ATTR_STAR_MATCHED; +- else { ++ } else { + /* we are here means that there is a specific +- ** attr in the rule for this resource. +- ** We need to avoid this case +- ** Rule 1: (targetattr = "uid") +- ** Rule 2: (targetattr = "*") +- ** we cannot use STAR optimization +- */ ++ * attr in the rule for this resource. ++ * We need to avoid this case ++ * Rule 1: (targetattr = "uid") ++ * Rule 2: (targetattr = "*") ++ * we cannot use STAR optimization ++ */ + aclpb->aclpb_state |= ACLPB_FOUND_ATTR_RULE; + aclpb->aclpb_state &= ~ACLPB_ATTR_STAR_MATCHED; + } +- } else if ((c_attrEval) || +- (aci->aci_type & ACI_TARGET_ATTR)) { ++ } else if ((c_attrEval) || (aci->aci_type & ACI_TARGET_ATTR)) { + if ((aci_right & ACL_RIGHTS_TARGETATTR_NOT_NEEDED) && + (aclpb->aclpb_access & ACL_RIGHTS_TARGETATTR_NOT_NEEDED)) { + /* +- ** Targetattr rule doesn't make any sense +- ** in this case. So select this rule +- ** default: matches = ACL_TRUE; +- */ ++ * Targetattr rule doesn't make any sense ++ * in this case. So select this rule ++ * default: matches = ACL_TRUE; ++ */ + ; +- } else if (aci_right & SLAPI_ACL_WRITE && ++ } else if ((aci_right & SLAPI_ACL_WRITE) && + (aci->aci_type & ACI_TARGET_ATTR) && + !(c_attrEval) && + (aci->aci_type & ACI_HAS_ALLOW_RULE)) { + /* We need to handle modrdn operation. Modrdn doesn't +- ** change any attrs but changes the RDN and so (attr=NULL). +- ** Here we found an acl which has a targetattr but +- ** the resource doesn't need one. In that case, we should +- ** consider this acl. +- ** the opposite is true if it is a deny rule, only a deny without +- ** any targetattr should deny modrdn +- ** default: matches = ACL_TRUE; +- */ ++ * change any attrs but changes the RDN and so (attr=NULL). ++ * Here we found an acl which has a targetattr but ++ * the resource doesn't need one. In that case, we should ++ * consider this acl. ++ * the opposite is true if it is a deny rule, only a deny without ++ * any targetattr should deny modrdn ++ * default: matches = ACL_TRUE; ++ */ + ; + } else { + matches = ACL_FALSE; +@@ -2821,16 +2786,16 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a + } /* !attr_matched_in_targetattrfilters */ + + /* +- ** Here we are testing if we find a entry test rule (which should +- ** be rare). In that case, just remember it. An entry test rule +- ** doesn't have "(targetattr)". +- */ ++ * Here we are testing if we find a entry test rule (which should ++ * be rare). In that case, just remember it. An entry test rule ++ * doesn't have "(targetattr)". ++ */ + if ((aclpb->aclpb_state & ACLPB_EVALUATING_FIRST_ATTR) && + (!(aci->aci_type & ACI_TARGET_ATTR))) { + aclpb->aclpb_state |= ACLPB_FOUND_A_ENTRY_TEST_RULE; + } + +-/* ++ /* + * Generic exit point for this routine: + * matches is ACL_TRUE if the aci matches the target of the resource, + * ACL_FALSE othrewise. +@@ -2853,6 +2818,7 @@ acl__resource_match_aci_EXIT: + + return (matches); + } ++ + /* Macro to determine if the cached result is valid or not. */ + #define ACL_CACHED_RESULT_VALID(result) \ + (((result & ACLPB_CACHE_READ_RES_ALLOW) && \ +diff --git a/ldap/servers/slapd/back-ldbm/findentry.c b/ldap/servers/slapd/back-ldbm/findentry.c +index 6e53a0aea..bff751c88 100644 +--- a/ldap/servers/slapd/back-ldbm/findentry.c ++++ b/ldap/servers/slapd/back-ldbm/findentry.c +@@ -93,7 +93,6 @@ find_entry_internal_dn( + size_t tries = 0; + int isroot = 0; + int op_type; +- char *errbuf = NULL; + + /* get the managedsait ldap message control */ + slapi_pblock_get(pb, SLAPI_MANAGEDSAIT, &managedsait); +@@ -207,8 +206,8 @@ find_entry_internal_dn( + break; + } + if (acl_type > 0) { +- err = plugin_call_acl_plugin(pb, me->ep_entry, NULL, NULL, acl_type, +- ACLPLUGIN_ACCESS_DEFAULT, &errbuf); ++ char *dummy_attr = "1.1"; ++ err = slapi_access_allowed(pb, me->ep_entry, dummy_attr, NULL, acl_type); + } + if (((acl_type > 0) && err) || (op_type == SLAPI_OPERATION_BIND)) { + /* +@@ -237,7 +236,6 @@ find_entry_internal_dn( + CACHE_RETURN(&inst->inst_cache, &me); + } + +- slapi_ch_free_string(&errbuf); + slapi_log_err(SLAPI_LOG_TRACE, "find_entry_internal_dn", "<= Not found (%s)\n", + slapi_sdn_get_dn(sdn)); + return (NULL); +diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py +index c60837601..ca6ea6ef8 100644 +--- a/src/lib389/lib389/_mapped_object.py ++++ b/src/lib389/lib389/_mapped_object.py +@@ -1190,7 +1190,7 @@ class DSLdapObjects(DSLogging, DSLints): + # Now actually commit the creation req + return co.ensure_state(rdn, properties, self._basedn) + +- def filter(self, search, scope=None): ++ def filter(self, search, scope=None, strict=False): + # This will yield and & filter for objectClass with as many terms as needed. + if search: + search_filter = _gen_and([self._get_objectclass_filter(), search]) +@@ -1211,5 +1211,7 @@ class DSLdapObjects(DSLogging, DSLints): + insts = [self._entry_to_instance(dn=r.dn, entry=r) for r in results] + except ldap.NO_SUCH_OBJECT: + # There are no objects to select from, se we return an empty array ++ if strict: ++ raise ldap.NO_SUCH_OBJECT + insts = [] + return insts +-- +2.26.2 + diff --git a/SOURCES/0034-Issue-4526-sync_repl-when-completing-an-operation-in.patch b/SOURCES/0034-Issue-4526-sync_repl-when-completing-an-operation-in.patch new file mode 100644 index 0000000..3e12223 --- /dev/null +++ b/SOURCES/0034-Issue-4526-sync_repl-when-completing-an-operation-in.patch @@ -0,0 +1,452 @@ +From 5bca57b52069508a55b36fafe3729b7d1243743b Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Wed, 27 Jan 2021 11:58:38 +0100 +Subject: [PATCH 2/3] Issue 4526 - sync_repl: when completing an operation in + the pending list, it can select the wrong operation (#4553) + +Bug description: + When an operation complete, it was retrieved in the pending list with + the address of the Operation structure. In case of POST OP nested operations + the same address can be reused. So when completing an operation there could be + a confusion which operation actually completed. + A second problem is that if an update its DB_DEADLOCK, the BETXN_PREOP can + be called several times. During retry, the operation is already in the pending + list. + +Fix description: + The fix defines a new operation extension (sync_persist_extension_type). + This operation extension contains an index (idx_pl) of the op_pl in the + the pending list. + + And additional safety fix is to dump the pending list in case it becomes large (>10). + The pending list is dumped with SLAPI_LOG_PLUGIN. + + When there is a retry (operation extension exists) the call to sync_update_persist_betxn_pre_op + becomes a NOOP: the operation is not added again in the pending list. + +relates: https://github.com/389ds/389-ds-base/issues/4526 + +Reviewed by: William Brown (Thanks !!) +--- + ldap/servers/plugins/sync/sync.h | 9 ++ + ldap/servers/plugins/sync/sync_init.c | 64 +++++++- + ldap/servers/plugins/sync/sync_persist.c | 194 ++++++++++++++++------- + 3 files changed, 208 insertions(+), 59 deletions(-) + +diff --git a/ldap/servers/plugins/sync/sync.h b/ldap/servers/plugins/sync/sync.h +index 7241fddbf..2fdf24476 100644 +--- a/ldap/servers/plugins/sync/sync.h ++++ b/ldap/servers/plugins/sync/sync.h +@@ -82,6 +82,12 @@ typedef enum _pl_flags { + OPERATION_PL_IGNORED = 5 + } pl_flags_t; + ++typedef struct op_ext_ident ++{ ++ uint32_t idx_pl; /* To uniquely identify an operation in PL, the operation extension ++ * contains the index of that operation in the pending list ++ */ ++} op_ext_ident_t; + /* Pending list operations. + * it contains a list ('next') of nested operations. The + * order the same order that the server applied the operation +@@ -90,6 +96,7 @@ typedef enum _pl_flags { + typedef struct OPERATION_PL_CTX + { + Operation *op; /* Pending operation, should not be freed as it belongs to the pblock */ ++ uint32_t idx_pl; /* index of the operation in the pending list */ + pl_flags_t flags; /* operation is completed (set to TRUE in POST) */ + Slapi_Entry *entry; /* entry to be store in the enqueued node. 1st arg sync_queue_change */ + Slapi_Entry *eprev; /* pre-entry to be stored in the enqueued node. 2nd arg sync_queue_change */ +@@ -99,6 +106,8 @@ typedef struct OPERATION_PL_CTX + + OPERATION_PL_CTX_T * get_thread_primary_op(void); + void set_thread_primary_op(OPERATION_PL_CTX_T *op); ++const op_ext_ident_t * sync_persist_get_operation_extension(Slapi_PBlock *pb); ++void sync_persist_set_operation_extension(Slapi_PBlock *pb, op_ext_ident_t *op_ident); + + int sync_register_operation_extension(void); + int sync_unregister_operation_entension(void); +diff --git a/ldap/servers/plugins/sync/sync_init.c b/ldap/servers/plugins/sync/sync_init.c +index 74af14512..9e6a12000 100644 +--- a/ldap/servers/plugins/sync/sync_init.c ++++ b/ldap/servers/plugins/sync/sync_init.c +@@ -16,6 +16,7 @@ static int sync_preop_init(Slapi_PBlock *pb); + static int sync_postop_init(Slapi_PBlock *pb); + static int sync_be_postop_init(Slapi_PBlock *pb); + static int sync_betxn_preop_init(Slapi_PBlock *pb); ++static int sync_persist_register_operation_extension(void); + + static PRUintn thread_primary_op; + +@@ -43,7 +44,8 @@ sync_init(Slapi_PBlock *pb) + slapi_pblock_set(pb, SLAPI_PLUGIN_CLOSE_FN, + (void *)sync_close) != 0 || + slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION, +- (void *)&pdesc) != 0) { ++ (void *)&pdesc) != 0 || ++ sync_persist_register_operation_extension()) { + slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, + "sync_init - Failed to register plugin\n"); + rc = 1; +@@ -242,4 +244,64 @@ set_thread_primary_op(OPERATION_PL_CTX_T *op) + PR_SetThreadPrivate(thread_primary_op, (void *) head); + } + head->next = op; ++} ++ ++/* The following definitions are used for the operation pending list ++ * (used by sync_repl). To retrieve a specific operation in the pending ++ * list, the operation extension contains the index of the operation in ++ * the pending list ++ */ ++static int sync_persist_extension_type; /* initialized in sync_persist_register_operation_extension */ ++static int sync_persist_extension_handle; /* initialized in sync_persist_register_operation_extension */ ++ ++const op_ext_ident_t * ++sync_persist_get_operation_extension(Slapi_PBlock *pb) ++{ ++ Slapi_Operation *op; ++ op_ext_ident_t *ident; ++ ++ slapi_pblock_get(pb, SLAPI_OPERATION, &op); ++ ident = slapi_get_object_extension(sync_persist_extension_type, op, ++ sync_persist_extension_handle); ++ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_get_operation_extension operation (op=0x%lx) -> %d\n", ++ (ulong) op, ident ? ident->idx_pl : -1); ++ return (const op_ext_ident_t *) ident; ++ ++} ++ ++void ++sync_persist_set_operation_extension(Slapi_PBlock *pb, op_ext_ident_t *op_ident) ++{ ++ Slapi_Operation *op; ++ ++ slapi_pblock_get(pb, SLAPI_OPERATION, &op); ++ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_set_operation_extension operation (op=0x%lx) -> %d\n", ++ (ulong) op, op_ident ? op_ident->idx_pl : -1); ++ slapi_set_object_extension(sync_persist_extension_type, op, ++ sync_persist_extension_handle, (void *)op_ident); ++} ++/* operation extension constructor */ ++static void * ++sync_persist_operation_extension_constructor(void *object __attribute__((unused)), void *parent __attribute__((unused))) ++{ ++ /* we only set the extension value explicitly in sync_update_persist_betxn_pre_op */ ++ return NULL; /* we don't set anything in the ctor */ ++} ++ ++/* consumer operation extension destructor */ ++static void ++sync_persist_operation_extension_destructor(void *ext, void *object __attribute__((unused)), void *parent __attribute__((unused))) ++{ ++ op_ext_ident_t *op_ident = (op_ext_ident_t *)ext; ++ slapi_ch_free((void **)&op_ident); ++} ++static int ++sync_persist_register_operation_extension(void) ++{ ++ return slapi_register_object_extension(SYNC_PLUGIN_SUBSYSTEM, ++ SLAPI_EXT_OPERATION, ++ sync_persist_operation_extension_constructor, ++ sync_persist_operation_extension_destructor, ++ &sync_persist_extension_type, ++ &sync_persist_extension_handle); + } +\ No newline at end of file +diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c +index d13f142b0..e93a8fa83 100644 +--- a/ldap/servers/plugins/sync/sync_persist.c ++++ b/ldap/servers/plugins/sync/sync_persist.c +@@ -47,6 +47,9 @@ static int sync_release_connection(Slapi_PBlock *pb, Slapi_Connection *conn, Sla + * per thread pending list of nested operation.. + * being a betxn_preop the pending list has the same order + * that the server received the operation ++ * ++ * In case of DB_RETRY, this callback can be called several times ++ * The detection of the DB_RETRY is done via the operation extension + */ + int + sync_update_persist_betxn_pre_op(Slapi_PBlock *pb) +@@ -54,64 +57,128 @@ sync_update_persist_betxn_pre_op(Slapi_PBlock *pb) + OPERATION_PL_CTX_T *prim_op; + OPERATION_PL_CTX_T *new_op; + Slapi_DN *sdn; ++ uint32_t idx_pl = 0; ++ op_ext_ident_t *op_ident; ++ Operation *op; + + if (!SYNC_IS_INITIALIZED()) { + /* not initialized if sync plugin is not started */ + return 0; + } + ++ prim_op = get_thread_primary_op(); ++ op_ident = sync_persist_get_operation_extension(pb); ++ slapi_pblock_get(pb, SLAPI_OPERATION, &op); ++ slapi_pblock_get(pb, SLAPI_TARGET_SDN, &sdn); ++ ++ /* Check if we are in a DB retry case */ ++ if (op_ident && prim_op) { ++ OPERATION_PL_CTX_T *current_op; ++ ++ /* This callback is called (with the same operation) because of a DB_RETRY */ ++ ++ /* It already existed (in the operation extension) an index of the operation in the pending list */ ++ for (idx_pl = 0, current_op = prim_op; current_op->next; idx_pl++, current_op = current_op->next) { ++ if (op_ident->idx_pl == idx_pl) { ++ break; ++ } ++ } ++ ++ /* The retrieved operation in the pending list is at the right ++ * index and state. Just return making this callback a noop ++ */ ++ PR_ASSERT(current_op); ++ PR_ASSERT(current_op->op == op); ++ PR_ASSERT(current_op->flags == OPERATION_PL_PENDING); ++ slapi_log_err(SLAPI_LOG_WARNING, SYNC_PLUGIN_SUBSYSTEM, "sync_update_persist_betxn_pre_op - DB retried operation targets " ++ "\"%s\" (op=0x%lx idx_pl=%d) => op not changed in PL\n", ++ slapi_sdn_get_dn(sdn), (ulong) op, idx_pl); ++ return 0; ++ } ++ + /* Create a new pending operation node */ + new_op = (OPERATION_PL_CTX_T *)slapi_ch_calloc(1, sizeof(OPERATION_PL_CTX_T)); + new_op->flags = OPERATION_PL_PENDING; +- slapi_pblock_get(pb, SLAPI_OPERATION, &new_op->op); +- slapi_pblock_get(pb, SLAPI_TARGET_SDN, &sdn); ++ new_op->op = op; + +- prim_op = get_thread_primary_op(); + if (prim_op) { + /* It already exists a primary operation, so the current + * operation is a nested one that we need to register at the end + * of the pending nested operations ++ * Also computes the idx_pl that will be the identifier (index) of the operation ++ * in the pending list + */ + OPERATION_PL_CTX_T *current_op; +- for (current_op = prim_op; current_op->next; current_op = current_op->next); ++ for (idx_pl = 0, current_op = prim_op; current_op->next; idx_pl++, current_op = current_op->next); + current_op->next = new_op; ++ idx_pl++; /* idx_pl is currently the index of the last op ++ * as we are adding a new op we need to increase that index ++ */ + slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "sync_update_persist_betxn_pre_op - nested operation targets " +- "\"%s\" (0x%lx)\n", +- slapi_sdn_get_dn(sdn), (ulong) new_op->op); ++ "\"%s\" (op=0x%lx idx_pl=%d)\n", ++ slapi_sdn_get_dn(sdn), (ulong) new_op->op, idx_pl); + } else { + /* The current operation is the first/primary one in the txn + * registers it directly in the thread private data (head) + */ + set_thread_primary_op(new_op); ++ idx_pl = 0; /* as primary operation, its index in the pending list is 0 */ + slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "sync_update_persist_betxn_pre_op - primary operation targets " + "\"%s\" (0x%lx)\n", + slapi_sdn_get_dn(sdn), (ulong) new_op->op); + } ++ ++ /* records, in the operation extension AND in the pending list, the identifier (index) of ++ * this operation into the pending list ++ */ ++ op_ident = (op_ext_ident_t *) slapi_ch_calloc(1, sizeof (op_ext_ident_t)); ++ op_ident->idx_pl = idx_pl; ++ new_op->idx_pl = idx_pl; ++ sync_persist_set_operation_extension(pb, op_ident); + return 0; + } + +-/* This operation can not be proceed by sync_repl listener because +- * of internal problem. For example, POST entry does not exist ++/* This operation failed or skipped (e.g. no MODs). ++ * In such case POST entry does not exist + */ + static void +-ignore_op_pl(Operation *op) ++ignore_op_pl(Slapi_PBlock *pb) + { + OPERATION_PL_CTX_T *prim_op, *curr_op; ++ op_ext_ident_t *ident; ++ Operation *op; ++ ++ slapi_pblock_get(pb, SLAPI_OPERATION, &op); ++ ++ /* prim_op is set if betxn was called ++ * In case of invalid update (schema violation) the ++ * operation skip betxn and prim_op is not set. ++ * This is the same for ident ++ */ + prim_op = get_thread_primary_op(); ++ ident = sync_persist_get_operation_extension(pb); + +- for (curr_op = prim_op; curr_op; curr_op = curr_op->next) { +- if ((curr_op->op == op) && +- (curr_op->flags == OPERATION_PL_PENDING)) { /* If by any "chance" a same operation structure was reused in consecutive updates +- * we can not only rely on 'op' value +- */ +- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "ignore_op_pl operation (0x%lx) from the pending list\n", +- (ulong) op); +- curr_op->flags = OPERATION_PL_IGNORED; +- return; ++ if (ident) { ++ /* The TXN_BEPROP was called, so the operation is ++ * registered in the pending list ++ */ ++ for (curr_op = prim_op; curr_op; curr_op = curr_op->next) { ++ if (curr_op->idx_pl == ident->idx_pl) { ++ /* The operation extension (ident) refers this operation (currop in the pending list). ++ * This is called during sync_repl postop. At this moment ++ * the operation in the pending list (identified by idx_pl in the operation extension) ++ * should be pending ++ */ ++ PR_ASSERT(curr_op->flags == OPERATION_PL_PENDING); ++ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "ignore_op_pl operation (op=0x%lx, idx_pl=%d) from the pending list\n", ++ (ulong) op, ident->idx_pl); ++ curr_op->flags = OPERATION_PL_IGNORED; ++ return; ++ } + } + } +- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "ignore_op_pl can not retrieve an operation (0x%lx) in pending list\n", +- (ulong) op); ++ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "ignore_op_pl failing operation (op=0x%lx, idx_pl=%d) was not in the pending list\n", ++ (ulong) op, ident ? ident->idx_pl : -1); + } + + /* This is a generic function that is called by betxn_post of this plugin. +@@ -126,7 +193,9 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber + { + OPERATION_PL_CTX_T *prim_op = NULL, *curr_op; + Operation *pb_op; ++ op_ext_ident_t *ident; + Slapi_DN *sdn; ++ uint32_t count; /* use for diagnostic of the lenght of the pending list */ + int32_t rc; + + if (!SYNC_IS_INITIALIZED()) { +@@ -138,7 +207,7 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber + + if (NULL == e) { + /* Ignore this operation (for example case of failure of the operation) */ +- ignore_op_pl(pb_op); ++ ignore_op_pl(pb); + return; + } + +@@ -161,16 +230,21 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber + + + prim_op = get_thread_primary_op(); ++ ident = sync_persist_get_operation_extension(pb); + PR_ASSERT(prim_op); ++ PR_ASSERT(ident); + /* First mark the operation as completed/failed + * the param to be used once the operation will be pushed + * on the listeners queue + */ + for (curr_op = prim_op; curr_op; curr_op = curr_op->next) { +- if ((curr_op->op == pb_op) && +- (curr_op->flags == OPERATION_PL_PENDING)) { /* If by any "chance" a same operation structure was reused in consecutive updates +- * we can not only rely on 'op' value +- */ ++ if (curr_op->idx_pl == ident->idx_pl) { ++ /* The operation extension (ident) refers this operation (currop in the pending list) ++ * This is called during sync_repl postop. At this moment ++ * the operation in the pending list (identified by idx_pl in the operation extension) ++ * should be pending ++ */ ++ PR_ASSERT(curr_op->flags == OPERATION_PL_PENDING); + if (rc == LDAP_SUCCESS) { + curr_op->flags = OPERATION_PL_SUCCEEDED; + curr_op->entry = e ? slapi_entry_dup(e) : NULL; +@@ -183,46 +257,50 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber + } + } + if (!curr_op) { +- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "%s - operation not found on the pendling list\n", label); ++ slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "%s - operation (op=0x%lx, idx_pl=%d) not found on the pendling list\n", ++ label, (ulong) pb_op, ident->idx_pl); + PR_ASSERT(curr_op); + } + +-#if DEBUG +- /* dump the pending queue */ +- for (curr_op = prim_op; curr_op; curr_op = curr_op->next) { +- char *flags_str; +- char * entry_str; ++ /* for diagnostic of the pending list, dump its content if it is too long */ ++ for (count = 0, curr_op = prim_op; curr_op; count++, curr_op = curr_op->next); ++ if (loglevel_is_set(SLAPI_LOG_PLUGIN) && (count > 10)) { + +- if (curr_op->entry) { +- entry_str = slapi_entry_get_dn(curr_op->entry); +- } else if (curr_op->eprev){ +- entry_str = slapi_entry_get_dn(curr_op->eprev); +- } else { +- entry_str = "unknown"; +- } +- switch (curr_op->flags) { +- case OPERATION_PL_SUCCEEDED: +- flags_str = "succeeded"; +- break; +- case OPERATION_PL_FAILED: +- flags_str = "failed"; +- break; +- case OPERATION_PL_IGNORED: +- flags_str = "ignored"; +- break; +- case OPERATION_PL_PENDING: +- flags_str = "pending"; +- break; +- default: +- flags_str = "unknown"; +- break; +- ++ /* if pending list looks abnormally too long, dump the pending list */ ++ for (curr_op = prim_op; curr_op; curr_op = curr_op->next) { ++ char *flags_str; ++ char * entry_str; + +- } +- slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "dump pending list(0x%lx) %s %s\n", ++ if (curr_op->entry) { ++ entry_str = slapi_entry_get_dn(curr_op->entry); ++ } else if (curr_op->eprev) { ++ entry_str = slapi_entry_get_dn(curr_op->eprev); ++ } else { ++ entry_str = "unknown"; ++ } ++ switch (curr_op->flags) { ++ case OPERATION_PL_SUCCEEDED: ++ flags_str = "succeeded"; ++ break; ++ case OPERATION_PL_FAILED: ++ flags_str = "failed"; ++ break; ++ case OPERATION_PL_IGNORED: ++ flags_str = "ignored"; ++ break; ++ case OPERATION_PL_PENDING: ++ flags_str = "pending"; ++ break; ++ default: ++ flags_str = "unknown"; ++ break; ++ ++ ++ } ++ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "dump pending list(0x%lx) %s %s\n", + (ulong) curr_op->op, entry_str, flags_str); ++ } + } +-#endif + + /* Second check if it remains a pending operation in the pending list */ + for (curr_op = prim_op; curr_op; curr_op = curr_op->next) { +-- +2.26.2 + diff --git a/SOURCES/0035-Issue-4581-A-failed-re-indexing-leaves-the-database-.patch b/SOURCES/0035-Issue-4581-A-failed-re-indexing-leaves-the-database-.patch new file mode 100644 index 0000000..d80f386 --- /dev/null +++ b/SOURCES/0035-Issue-4581-A-failed-re-indexing-leaves-the-database-.patch @@ -0,0 +1,145 @@ +From e6536aa27bfdc27cad07f6c5cd3312f0f0710c96 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Mon, 1 Feb 2021 09:28:25 +0100 +Subject: [PATCH 3/3] Issue 4581 - A failed re-indexing leaves the database in + broken state (#4582) + +Bug description: + During reindex the numsubordinates attribute is not updated in parent entries. + The consequence is that the internal counter job->numsubordinates==0. + Later when indexing the ancestorid, the server can show the progression of this + indexing with a ratio using job->numsubordinates==0. + Division with 0 -> SIGFPE + +Fix description: + if the numsubordinates is NULL, log a message without a division. + +relates: https://github.com/389ds/389-ds-base/issues/4581 + +Reviewed by: Pierre Rogier, Mark Reynolds, Simon Pichugin, Teko Mihinto (thanks !!) + +Platforms tested: F31 +--- + .../slapd/back-ldbm/db-bdb/bdb_import.c | 72 ++++++++++++++----- + 1 file changed, 54 insertions(+), 18 deletions(-) + +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c +index ba783ee59..7f484934f 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c +@@ -468,18 +468,30 @@ bdb_get_nonleaf_ids(backend *be, DB_TXN *txn, IDList **idl, ImportJob *job) + } + key_count++; + if (!(key_count % PROGRESS_INTERVAL)) { +- import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids", +- "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)", +- (key_count * 100 / job->numsubordinates), key_count); ++ if (job->numsubordinates) { ++ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids", ++ "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)", ++ (key_count * 100 / job->numsubordinates), key_count); ++ } else { ++ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids", ++ "Gathering ancestorid non-leaf IDs: processed %d ancestors...", ++ key_count); ++ } + started_progress_logging = 1; + } + } while (ret == 0 && !(job->flags & FLAG_ABORT)); + + if (started_progress_logging) { + /* finish what we started logging */ +- import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids", +- "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)", +- (key_count * 100 / job->numsubordinates), key_count); ++ if (job->numsubordinates) { ++ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids", ++ "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)", ++ (key_count * 100 / job->numsubordinates), key_count); ++ } else { ++ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids", ++ "Gathering ancestorid non-leaf IDs: processed %d ancestors", ++ key_count); ++ } + } + import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids", + "Finished gathering ancestorid non-leaf IDs."); +@@ -660,9 +672,15 @@ bdb_ancestorid_default_create_index(backend *be, ImportJob *job) + + key_count++; + if (!(key_count % PROGRESS_INTERVAL)) { +- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index", +- "Creating ancestorid index: processed %d%% (ID count %d)", +- (key_count * 100 / job->numsubordinates), key_count); ++ if (job->numsubordinates) { ++ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index", ++ "Creating ancestorid index: processed %d%% (ID count %d)", ++ (key_count * 100 / job->numsubordinates), key_count); ++ } else { ++ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index", ++ "Creating ancestorid index: processed %d ancestors...", ++ key_count); ++ } + started_progress_logging = 1; + } + +@@ -743,9 +761,15 @@ out: + if (ret == 0) { + if (started_progress_logging) { + /* finish what we started logging */ +- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index", +- "Creating ancestorid index: processed %d%% (ID count %d)", +- (key_count * 100 / job->numsubordinates), key_count); ++ if (job->numsubordinates) { ++ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index", ++ "Creating ancestorid index: processed %d%% (ID count %d)", ++ (key_count * 100 / job->numsubordinates), key_count); ++ } else { ++ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index", ++ "Creating ancestorid index: processed %d ancestors", ++ key_count); ++ } + } + import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index", + "Created ancestorid index (old idl)."); +@@ -869,9 +893,15 @@ bdb_ancestorid_new_idl_create_index(backend *be, ImportJob *job) + + key_count++; + if (!(key_count % PROGRESS_INTERVAL)) { +- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index", +- "Creating ancestorid index: progress %d%% (ID count %d)", +- (key_count * 100 / job->numsubordinates), key_count); ++ if (job->numsubordinates) { ++ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index", ++ "Creating ancestorid index: progress %d%% (ID count %d)", ++ (key_count * 100 / job->numsubordinates), key_count); ++ } else { ++ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index", ++ "Creating ancestorid index: progress %d ancestors...", ++ key_count); ++ } + started_progress_logging = 1; + } + +@@ -932,9 +962,15 @@ out: + if (ret == 0) { + if (started_progress_logging) { + /* finish what we started logging */ +- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index", +- "Creating ancestorid index: processed %d%% (ID count %d)", +- (key_count * 100 / job->numsubordinates), key_count); ++ if (job->numsubordinates) { ++ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index", ++ "Creating ancestorid index: processed %d%% (ID count %d)", ++ (key_count * 100 / job->numsubordinates), key_count); ++ } else { ++ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index", ++ "Creating ancestorid index: processed %d ancestors", ++ key_count); ++ } + } + import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index", + "Created ancestorid index (new idl)."); +-- +2.26.2 + diff --git a/SOURCES/0036-Issue-4513-CI-Tests-fix-test-failures.patch b/SOURCES/0036-Issue-4513-CI-Tests-fix-test-failures.patch new file mode 100644 index 0000000..69c362a --- /dev/null +++ b/SOURCES/0036-Issue-4513-CI-Tests-fix-test-failures.patch @@ -0,0 +1,190 @@ +From 4839898dbe69d6445f3571beec1bf3f1557d6cc6 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 12 Jan 2021 10:09:23 -0500 +Subject: [PATCH] Issue 4513 - CI Tests - fix test failures + +Description: + + Fixed tests in these suites: basic, entryuuid, filter, lib389, and schema + +relates: https://github.com/389ds/389-ds-base/issues/4513 + +Reviewed by: progier(Thanks!) +--- + dirsrvtests/tests/suites/basic/basic_test.py | 65 ++++++++++--------- + .../filter/rfc3673_all_oper_attrs_test.py | 4 +- + .../suites/lib389/config_compare_test.py | 5 +- + .../suites/lib389/idm/user_compare_i2_test.py | 3 + + .../tests/suites/schema/schema_reload_test.py | 3 + + 5 files changed, 47 insertions(+), 33 deletions(-) + +diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py +index 97908c31c..fc9af46e4 100644 +--- a/dirsrvtests/tests/suites/basic/basic_test.py ++++ b/dirsrvtests/tests/suites/basic/basic_test.py +@@ -1059,6 +1059,41 @@ def test_search_ou(topology_st): + assert len(entries) == 0 + + ++def test_bind_invalid_entry(topology_st): ++ """Test the failing bind does not return information about the entry ++ ++ :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f ++ ++ :setup: Standalone instance ++ ++ :steps: ++ 1: bind as non existing entry ++ 2: check that bind info does not report 'No such entry' ++ ++ :expectedresults: ++ 1: pass ++ 2: pass ++ """ ++ ++ topology_st.standalone.restart() ++ INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX ++ try: ++ topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD) ++ except ldap.LDAPError as e: ++ log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY) ++ log.info('exception description: ' + e.args[0]['desc']) ++ if 'info' in e.args[0]: ++ log.info('exception info: ' + e.args[0]['info']) ++ assert e.args[0]['desc'] == 'Invalid credentials' ++ assert 'info' not in e.args[0] ++ pass ++ ++ log.info('test_bind_invalid_entry: PASSED') ++ ++ # reset credentials ++ topology_st.standalone.simple_bind_s(DN_DM, PW_DM) ++ ++ + @pytest.mark.bz1044135 + @pytest.mark.ds47319 + def test_connection_buffer_size(topology_st): +@@ -1477,36 +1512,6 @@ def test_dscreate_with_different_rdn(dscreate_test_rdn_value): + else: + assert True + +-def test_bind_invalid_entry(topology_st): +- """Test the failing bind does not return information about the entry +- +- :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f +- +- :setup: Standalone instance +- +- :steps: +- 1: bind as non existing entry +- 2: check that bind info does not report 'No such entry' +- +- :expectedresults: +- 1: pass +- 2: pass +- """ +- +- topology_st.standalone.restart() +- INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX +- try: +- topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD) +- except ldap.LDAPError as e: +- log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY) +- log.info('exception description: ' + e.args[0]['desc']) +- if 'info' in e.args[0]: +- log.info('exception info: ' + e.args[0]['info']) +- assert e.args[0]['desc'] == 'Invalid credentials' +- assert 'info' not in e.args[0] +- pass +- +- log.info('test_bind_invalid_entry: PASSED') + + + if __name__ == '__main__': +diff --git a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py +index c882bea5f..0477acda7 100644 +--- a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py ++++ b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py +@@ -53,11 +53,11 @@ TEST_PARAMS = [(DN_ROOT, False, [ + (TEST_USER_DN, False, [ + 'createTimestamp', 'creatorsName', 'entrydn', + 'entryid', 'modifiersName', 'modifyTimestamp', +- 'nsUniqueId', 'parentid' ++ 'nsUniqueId', 'parentid', 'entryUUID' + ]), + (TEST_USER_DN, True, [ + 'createTimestamp', 'creatorsName', 'entrydn', +- 'entryid', 'modifyTimestamp', 'nsUniqueId', 'parentid' ++ 'entryid', 'modifyTimestamp', 'nsUniqueId', 'parentid', 'entryUUID' + ]), + (DN_CONFIG, False, [ + 'numSubordinates', 'passwordHistory' +diff --git a/dirsrvtests/tests/suites/lib389/config_compare_test.py b/dirsrvtests/tests/suites/lib389/config_compare_test.py +index 709bae8cb..84f55acfa 100644 +--- a/dirsrvtests/tests/suites/lib389/config_compare_test.py ++++ b/dirsrvtests/tests/suites/lib389/config_compare_test.py +@@ -22,15 +22,18 @@ def test_config_compare(topology_i2): + st2_config = topology_i2.ins.get('standalone2').config + # 'nsslapd-port' attribute is expected to be same in cn=config comparison, + # but they are different in our testing environment +- # as we are using 2 DS instances running, both running simultaneuosly. ++ # as we are using 2 DS instances running, both running simultaneously. + # Hence explicitly adding 'nsslapd-port' to compare_exclude. + st1_config._compare_exclude.append('nsslapd-port') + st2_config._compare_exclude.append('nsslapd-port') + st1_config._compare_exclude.append('nsslapd-secureport') + st2_config._compare_exclude.append('nsslapd-secureport') ++ st1_config._compare_exclude.append('nsslapd-ldapssotoken-secret') ++ st2_config._compare_exclude.append('nsslapd-ldapssotoken-secret') + + assert Config.compare(st1_config, st2_config) + ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py b/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py +index c7540e4ce..ccde0f6b0 100644 +--- a/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py ++++ b/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py +@@ -39,6 +39,9 @@ def test_user_compare_i2(topology_i2): + st2_users.create(properties=user_properties) + st2_testuser = st2_users.get('testuser') + ++ st1_testuser._compare_exclude.append('entryuuid') ++ st2_testuser._compare_exclude.append('entryuuid') ++ + assert UserAccount.compare(st1_testuser, st2_testuser) + + +diff --git a/dirsrvtests/tests/suites/schema/schema_reload_test.py b/dirsrvtests/tests/suites/schema/schema_reload_test.py +index 2ece5dda5..e7e7d833d 100644 +--- a/dirsrvtests/tests/suites/schema/schema_reload_test.py ++++ b/dirsrvtests/tests/suites/schema/schema_reload_test.py +@@ -54,6 +54,7 @@ def test_valid_schema(topo): + schema_file.write("objectclasses: ( 1.2.3.4.5.6.7.8 NAME 'TestObject' " + + "SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " + + "sn $ ValidAttribute ) X-ORIGIN 'user defined' )')\n") ++ os.chmod(schema_filename, 0o777) + except OSError as e: + log.fatal("Failed to create schema file: " + + "{} Error: {}".format(schema_filename, str(e))) +@@ -106,6 +107,7 @@ def test_invalid_schema(topo): + schema_file.write("objectclasses: ( 1.2.3.4.5.6.7 NAME 'MoZiLLaOBJeCT' " + + "SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " + + "sn $ MozillaAttribute ) X-ORIGIN 'user defined' )')\n") ++ os.chmod(schema_filename, 0o777) + except OSError as e: + log.fatal("Failed to create schema file: " + + "{} Error: {}".format(schema_filename, str(e))) +@@ -122,6 +124,7 @@ def test_invalid_schema(topo): + schema_file.write("objectclasses: ( 1.2.3.4.5.6.70 NAME 'MoZiLLaOBJeCT' " + + "SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " + + "cn $ MoZiLLaATTRiBuTe ) X-ORIGIN 'user defined' )')\n") ++ os.chmod(schema_filename, 0o777) + except OSError as e: + log.fatal("Failed to create schema file: " + + "{} Error: {}".format(schema_filename, str(e))) +-- +2.26.2 + diff --git a/SPECS/389-ds-base.spec b/SPECS/389-ds-base.spec index 7901e19..5d1d14b 100644 --- a/SPECS/389-ds-base.spec +++ b/SPECS/389-ds-base.spec @@ -44,8 +44,8 @@ ExcludeArch: i686 Summary: 389 Directory Server (base) Name: 389-ds-base -Version: 1.4.3.8 -Release: %{?relprefix}4%{?prerel}%{?dist} +Version: 1.4.3.16 +Release: %{?relprefix}11%{?prerel}%{?dist} License: GPLv3+ URL: https://www.port389.org Group: System Environment/Daemons @@ -174,28 +174,44 @@ Source2: %{name}-devel.README %if %{bundle_jemalloc} Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2 %endif -Patch01: 0001-Issue-51076-prevent-unnecessarily-duplication-of-the.patch -Patch02: 0002-Ticket-51082-abort-when-a-empty-valueset-is-freed.patch -Patch03: 0003-Issue-51091-healthcheck-json-report-fails-when-mappi.patch -Patch04: 0004-Issue-51076-remove-unnecessary-slapi-entry-dups.patch -Patch05: 0005-Issue-51086-Improve-dscreate-instance-name-validatio.patch -Patch06: 0006-Issue-51102-RFE-ds-replcheck-make-online-timeout-con.patch -Patch07: 0007-Issue-51110-Fix-ASAN-ODR-warnings.patch -Patch08: 0008-Issue-51095-abort-operation-if-CSN-can-not-be-genera.patch -Patch09: 0009-Issue-51113-Allow-using-uid-for-replication-manager-.patch -Patch10: 0010-Issue-50931-RFE-AD-filter-rewriter-for-ObjectCategor.patch -Patch11: 0011-Issue-50746-Add-option-to-healthcheck-to-list-all-th.patch -Patch12: 0012-Issue-50984-Memory-leaks-in-disk-monitoring.patch -Patch13: 0013-Issue-50984-Memory-leaks-in-disk-monitoring.patch -Patch14: 0014-Issue-50201-nsIndexIDListScanLimit-accepts-any-value.patch -Patch15: 0015-Issue-51157-Reindex-task-may-create-abandoned-index-.patch -Patch16: 0016-Issue-51165-add-new-access-log-keywords-for-wtime-an.patch -Patch17: 0017-Issue-50912-pwdReset-can-be-modified-by-a-user.patch -Patch18: 0018-Issue-50791-Healthcheck-should-look-for-notes-A-F-in.patch -Patch19: 0019-Issue-51144-dsctl-fails-with-instance-names-that-con.patch -Patch20: 0020-Ticket-49859-A-distinguished-value-can-be-missing-in.patch -Patch21: 0021-Issue-49256-log-warning-when-thread-number-is-very-d.patch -Patch22: 0022-Issue-51188-db2ldif-crashes-when-LDIF-file-can-t-be-.patch +Patch01: 0001-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch +Patch02: 0002-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch +Patch03: 0003-do-not-add-referrals-for-masters-with-different-data.patch +Patch04: 0004-Ticket-50933-Update-2307compat.ldif.patch +Patch05: 0005-Issue-50933-Fix-OID-change-between-10rfc2307-and-10r.patch +Patch06: 0006-Ticket-51131-improve-mutex-alloc-in-conntable.patch +Patch07: 0007-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch +Patch08: 0008-Issue-3657-Add-options-to-dsctl-for-dsrc-file.patch +Patch09: 0009-Issue-4440-BUG-ldifgen-with-start-idx-option-fails-w.patch +Patch10: 0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch +Patch11: 0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch +Patch12: 0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch +Patch13: 0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch +Patch14: 0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch +Patch15: 0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch +Patch16: 0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch +Patch17: 0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch +Patch18: 0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch +Patch19: 0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch +Patch20: 0020-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch +Patch21: 0021-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch +Patch22: 0022-Fix-cherry-pick-erorr.patch +Patch23: 0023-Issue-4419-Warn-users-of-skipped-entries-during-ldif.patch +Patch24: 0024-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch +Patch25: 0025-Issue-4414-disk-monitoring-prevent-division-by-zero-.patch +Patch26: 0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch +Patch27: 0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch +Patch28: 0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch +Patch29: 0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch +Patch30: 0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch +Patch31: 0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch +Patch32: 0032-Backport-tests-from-master-branch-fix-failing-tests-.patch +Patch33: 0033-Issue-5442-Search-results-are-different-between-RHDS.patch +Patch34: 0034-Issue-4526-sync_repl-when-completing-an-operation-in.patch +Patch35: 0035-Issue-4581-A-failed-re-indexing-leaves-the-database-.patch +Patch36: 0036-Issue-4513-CI-Tests-fix-test-failures.patch +# Patch37: 0037-Issue-4609-CVE-info-disclosure-when-authenticating.patch + %description 389 Directory Server is an LDAPv3 compliant server. The base package includes @@ -813,6 +829,100 @@ exit 0 %doc README.md %changelog +* Mon Feb 15 2021 Mark Reynolds - 1.4.3.16-11 +- Bump version to 1.4.3.16-11 +- Resolves: Bug 1924130 - RHDS11: “write” permission of ACI changes ns-slapd’s behavior on search operation(remove patch as it breaks DogTag, will add this patch back after DogTag is fixed) + +* Wed Feb 10 2021 Mark Reynolds - 1.4.3.16-10 +- Bump version to 1.4.3.16-10 +- Resolves: Bug 1924130 - RHDS11: “write” permission of ACI changes ns-slapd’s behavior on search operation(part 2) + +* Tue Feb 2 2021 Mark Reynolds - 1.4.3.16-9 +- Bump version to 1.4.3.16-9 +- Resolves: Bug 1924130 - RHDS11: “write” permission of ACI changes ns-slapd’s behavior on search operation +- Resolves: Bug 1916677 - A failed re-indexing leaves the database in broken state. +- Resolves: Bug 1912822 - sync_repl: when completing an operation in the pending list, it can select the wrong operation + +* Wed Jan 13 2021 Mark Reynolds - 1.4.3.16-8 +- Bump version to 1.4.3.16-8 +- Resolves: Bug 1903539 - cn=monitor is throwing err=32 with scope: -s one +- Resolves: Bug 1893870 - PR_WaitCondVar() issue causes replication delay when clock jumps backwards + +* Thu Jan 7 2021 Mark Reynolds - 1.4.3.16-7 +- Bump version to 1.4.3.16-7 +- Resolves: Bug 1890118 - SIGFPE crash in rhds disk monitoring routine +- Resolves: Bug 1904991 - 389-ds:1.4/389-ds-base: information disclosure during the binding of a DN +- Resolves: Bug 1627645 - ldif2db does not change exit code when there are skipped entries + +* Wed Dec 16 2020 Mark Reynolds - 1.4.3.16-6 +- Bump version to 1.4.3.16-6 +- Resolves: Bug 1879386 - cli dsconf replication monitor fails to retrieve database RUV - consumer (Unavailable) State (green) Reason (error (0) +- Resolves: Bug 1904991 - Unexpected info returned to ldap request +- Resolves: Bug 1843838 - heap-use-after-free in slapi_be_getsuffix +- Resolves: Bug 1903133 - Server-Cert.crt created using dscreate has Subject:CN =localhost instead of hostname. + +* Wed Dec 9 2020 Mark Reynolds - 1.4.3.16-5 +- Bump version to 1.4.3.16-5 +- Resolves: Bug 1879386 - cli dsconf replication monitor fails to retrieve database RUV +- Resolves: Bug 1887449 - Sync repl: missing update because operation are erroneously stated as nested +- Resolves: Bug 1887415 - Sync repl - if a series of updates target the same entry then the cookie get wrong changenumber +- Resolves: Bug 1851978 - SyncRepl plugin provides a wrong cookie + +* Thu Dec 3 2020 Mark Reynolds - 1.4.3.16-4 +- Bump version to 1.4.3.16-4 +- Resolves: Bug 1843517 - Using ldifgen with --start-idx option fails with unsupported operand +- Resolves: Bug 1801086 - [RFE] Generate dsrc file using dsconf +- Resolves: Bug 1843838 - heap-use-after-free in slapi_be_getsuffix + +* Wed Nov 25 2020 Mark Reynolds - 1.4.3.16-3 +- Bump version to 1.4.3.16-3 +- Resolves: Bug 1859219 - rfc2307 and rfc2307bis compat schema +- Resolves: Bug 1843604 - reduce the cost of allocation/free when open/close a connection +- Resolves: Bug 1898850 - Entries conflict not resolved by replication + +* Thu Nov 19 2020 Mark Reynolds - 1.4.3.16-2 +- Bump version to 1.4.3.16-2 +- Resolves: Bug 1859227 - create keep alive entry after on line init +- Resolves: Bug 1888863 - group rdn with leading space char and add fails error 21 invalid syntax and delete fails error 32 +- Resolves: Bug 1859228 - do not add referrals for masters with different data generation + +* Mon Oct 26 2020 Mark Reynolds - 1.4.3.16-1 +- Bump version to 1.4.3.16-1 +- Resolves: Bug 1887415 - Sync repl - if a serie of updates target the same entry then the cookie get wrong changenumber +- Resolves: Bug 1859225 - suffix management in backends incorrect + +* Mon Oct 26 2020 Mark Reynolds - 1.4.3.14-1 +- Bump version to 1.4.3.14-1 +- Resolves: Bug 1862529 - Rebase 389-ds-base-1.4.3 in RHEL 8.4 +- Resolves: Bug 1859301 - Misleading message in access log for idle timeout +- Resolves: Bug 1889782 - Missing closing quote when reporting the details of unindexed/paged search results +- Resolves: Bug 1862971 - dsidm user status fails with Error: 'nsUserAccount' object has no attribute 'is_locked' +- Resolves: Bug 1859878 - Managed Entries configuration not being enforced +- Resolves: Bug 1851973 - Duplicate entryUSN numbers for different LDAP entries in the same backend +- Resolves: Bug 1851967 - if dbhome directory is set online backup fails +- Resolves: Bug 1887449 - Sync repl: missing update because operation are erroneously stated as nested +- Resolves: Bug 1887415 - Sync repl - if a serie of updates target the same entry then the cookie get wrong changenumber +- Resolves: Bug 1851978 - SyncRepl plugin provides a wrong cookie +- Resolves: Bug 1843604 - reduce the cost of allocation/free when open/close a connection +- Resolves: Bug 1872930 - dscreate: Not possible to bind to a unix domain socket +- Resolves: Bug 1861504 - ds-replcheck crashes in offline mode +- Resolves: Bug 1859282 - remove ldbm_back_entry_release +- Resolves: Bug 1859225 - suffix management in backends incorrect +- Resolves: Bug 1859224 - remove unused or unnecessary database plugin functions +- Resolves: Bug 1859219 - rfc2307 and rfc2307bis compat schema +- Resolves: Bug 1851975 - Add option to reject internal unindexed searches +- Resolves: Bug 1851972 - Remove code duplication from the BDB backend separation work +- Resolves: Bug 1850275 - Add new access log keywords for time spent in work queue and actual operation time +- Resolves: Bug 1848359 - Add failover credentials to replication agreement +- Resolves: Bug 1837315 - Healthcheck code DSBLE0002 not returned on disabled suffix + +* Wed Aug 5 2020 Mark Reynolds - 1.4.3.8-5 +- Bump version to 1.4.3.8-5 +- Resolves: Bug 1841086 - SSL alert: The value of sslVersionMax "TLS1.3" is higher than the supported version +- Resolves: Bug 1800529 - Memory leaks in disk monitoring +- Resolves: Bug 1748227 - Instance name length is not enforced +- Resolves: Bug 1849418 - python3-lib389 pulls unnecessary bash-completion package + * Fri Jun 26 2020 Mark Reynolds - 1.4.3.8-4 - Bump version to 1.4.3.8-4 - Resolves: Bug 1806978 - ns-slapd crashes during db2ldif @@ -865,195 +975,3 @@ exit 0 - Resolves: Bug 1790986 - cenotaph errors on modrdn operations - Resolves: Bug 1769734 - Heavy StartTLS connection load can randomly fail with err=1 - Resolves: Bug 1758501 - LeakSanitizer: detected memory leaks in changelog5_init and perfctrs_init - -* Fri May 8 2020 Mark Reynolds - 1.4.3.8-0 -- Bump version to 1.4.3.8-0 -- Issue 51078 - Add nsslapd-enable-upgrade-hash to the schema -- Issue 51054 - Revise ACI target syntax checking -- Issue 51068 - deadlock when updating the schema -- Issue 51060 - unable to set sslVersionMin to TLS1.0 -- Issue 51064 - Unable to install server where IPv6 is disabled -- Issue 51051 - CLI fix consistency issues with confirmations -- Issue 49731 - undo db_home_dir under /dev/shm/dirsrv for now -- Issue 51054 - AddressSanitizer: heap-buffer-overflow in ldap_utf8prev -- Issue 51047 - React deprecating ComponentWillMount -- Issue 50499 - fix npm audit issues -- Issue 50545 - Port dbgen.pl to dsctl - -* Wed Apr 22 2020 Mark Reynolds - 1.4.3.7-1 -- Bump version to 1.4.3.7 -- Issue 51024 - syncrepl_entry callback does not contain attributes added by postoperation plugins -- Issue 51035 - Heavy StartTLS connection load can randomly fail with err=1 -- Issue 49731 - undo db_home_dir under /dev/shm/dirsrv for now -- Issue 51031 - UI - transition between two instances needs improvement - -* Thu Apr 16 2020 Mark Reynolds - 1.4.3.6-1 -- Bump version to 1.4.3.6 -- Issue 50933 - 10rfc2307compat.ldif is not ready to set used by default -- Issue 50931 - RFE AD filter rewriter for ObjectCategory -- Issue 51016 - Fix memory leaks in changelog5_init and perfctrs_init -- Issue 50980 - RFE extend usability for slapi_compute_add_search_rewriter and slapi_compute_add_evaluator -- Issue 51008 - dbhome in containers -- Issue 50875 - Refactor passwordUserAttributes's and passwordBadWords's code -- Issue 51014 - slapi_pal.c possible static buffer overflow -- Issue 50545 - remove dbmon "incr" option from arg parser -- Issue 50545 - Port dbmon.sh to dsconf -- Issue 51005 - AttributeUniqueness plugin's DN parameter should not have a default value -- Issue 49731 - Fix additional issues with setting db home directory by default -- Issue 50337 - Replace exec() with setattr() -- Issue 50905 - intermittent SSL hang with rhds -- Issue 50952 - SSCA lacks basicConstraint:CA -- Issue 50640 - Database links: get_monitor() takes 1 positional argument but 2 were given -- Issue 50869 - Setting nsslapd-allowed-sasl-mechanisms truncates the value - -* Wed Apr 1 2020 Mark Reynolds - 1.4.3.5-1 -- Bump version to 1.4.3.5 -- Issue 50994 - Fix latest UI bugs found by QE -- Issue 50933 - rfc2307compat.ldif -- Issue 50337 - Replace exec() with setattr() -- Issue 50984 - Memory leaks in disk monitoring -- Issue 50984 - Memory leaks in disk monitoring -- Issue 49731 - dscreate fails in silent mode because of db_home_dir -- Issue 50975 - Revise UI branding with new minimized build -- Issue 49437 - Fix memory leak with indirect COS -- Issue 49731 - Do not add db_home_dir to template-dse.ldif -- Issue 49731 - set and use db_home_directory by default -- Issue 50971 - fix BSD_SOURCE -- Issue 50744 - -n option of dbverify does not work -- Issue 50952 - SSCA lacks basicConstraint:CA -- Issue 50976 - Clean up Web UI source directory from unused files -- Issue 50955 - Fix memory leaks in chaining plugin(part 2) -- Issue 50966 - UI - Database indexes not using typeAhead correctly -- Issue 50974 - UI - wrong title in "Delete Suffix" popup -- Issue 50972 - Fix cockpit plugin build -- Issue 49761 - Fix CI test suite issues -- Issue 50971 - Support building on FreeBSD. -- Issue 50960 - [RFE] Advance options in RHDS Disk Monitoring Framework -- Issue 50800 - wildcards in rootdn-allow-ip attribute are not accepted -- Issue 50963 - We should bundle *.min.js files of Console -- Issue 50860 - Port Password Policy test cases from TET to python3 Password grace limit section. -- Issue 50860 - Port Password Policy test cases from TET to python3 series of bugs Port final -- Issue 50954 - buildnum.py - fix date formatting issue - -* Mon Mar 16 2020 Mark Reynolds - 1.4.3.4-1 -- Bump version to 1.4.3.4 -- Issue 50954 - Port buildnum.pl to python(part 2) -- Issue 50955 - Fix memory leaks in chaining plugin -- Issue 50954 - Port buildnum.pl to python -- Issue 50947 - change 00core.ldif objectClasses for openldap migration -- Issue 50755 - setting nsslapd-db-home-directory is overriding db_directory -- Issue 50937 - Update CLI for new backend split configuration -- Issue 50860 - Port Password Policy test cases from TET to python3 pwp.sh -- Issue 50945 - givenname alias of gn from openldap -- Issue 50935 - systemd override in lib389 for dscontainer -- Issue 50499 - Fix npm audit issues -- Issue 49761 - Fix CI test suite issues -- Issue 50618 - clean compiler warning and log level -- Issue 50889 - fix compiler issues -- Issue 50884 - Health check tool DSEldif check fails -- Issue 50926 - Remove dual spinner and other UI fixes -- Issue 50928 - Unable to create a suffix with countryName -- Issue 50758 - Only Recommend bash-completion, not Require -- Issue 50923 - Fix a test regression -- Issue 50904 - Connect All React Components And Refactor the Main Navigation Tab Code -- Issue 50920 - cl-dump exit code is 0 even if command fails with invalid arguments -- Issue 50923 - Add test - dsctl fails to remove instances with dashes in the name -- Issue 50919 - Backend delete fails using dsconf -- Issue 50872 - dsconf can't create GSSAPI replication agreements -- Issue 50912 - RFE - add password policy attribute pwdReset -- Issue 50914 - No error returned when adding an entry matching filters for a non existing automember group -- Issue 50889 - Extract pem files into a private namespace -- Issue 50909 - nsDS5ReplicaId cant be set to the old value it had before -- Issue 50686 - Port fractional replication test cases from TET to python3 final -- Issue 49845 - Remove pkgconfig check for libasan -- Issue:50860 - Port Password Policy test cases from TET to python3 bug624080 -- Issue:50860 - Port Password Policy test cases from TET to python3 series of bugs -- Issue 50786 - connection table freelist -- Issue 50618 - support cgroupv2 -- Issue 50900 - Fix cargo offline build -- Issue 50898 - ldclt core dumped when run with -e genldif option - -* Mon Feb 17 2020 Matus Honek - 1.4.3.3-3 -- Bring back the necessary c_rehash util (#1803370) - -* Fri Feb 14 2020 Mark Reynolds - 1.4.3.3-2 -- Bump version to 1.4.3.3-2 -- Remove unneeded perl dependencies -- Change bash-completion to "Recommends" instead of "Requires" - -* Thu Feb 13 2020 Mark Reynolds - 1.4.3.3-1 -- Bump version to 1.4.3.3 -- Issue 50855 - remove unused file from UI -- Issue 50855 - UI: Port Server Tab to React -- Issue 49845 - README does not contain complete information on building -- Issue 50686 - Port fractional replication test cases from TET to python3 part 1 -- Issue 49623 - cont cenotaph errors on modrdn operations -- Issue 50882 - Fix healthcheck errors for instances that do not have TLS enabled -- Issue 50886 - Typo in the replication debug message -- Issue 50873 - Fix healthcheck and virtual attr check -- Issue 50873 - Fix issues with healthcheck tool -- Issue 50028 - Add a new CI test case -- Issue 49946 - Add a new CI test case -- Issue 50117 - Add a new CI test case -- Issue 50787 - fix implementation of attr unique -- Issue 50859 - support running only with ldaps socket -- Issue 50823 - dsctl doesn't work with 'slapd-' in the instance name -- Issue 49624 - cont - DB Deadlock on modrdn appears to corrupt database and entry cache -- Issue 50867 - Fix minor buildsys issues -- Issue 50737 - Allow building with rust online without vendoring -- Issue 50831 - add cargo.lock to allow offline builds -- Issue 50694 - import PEM certs on startup -- Issue 50857 - Memory leak in ACI using IP subject -- Issue 49761 - Fix CI test suite issues -- Issue 50853 - Fix NULL pointer deref in config setting -- Issue 50850 - Fix dsctl healthcheck for python36 -- Issue 49990 - Need to enforce a hard maximum limit for file descriptors -- Issue 48707 - ldapssotoken for authentication - -* Tue Jan 28 2020 Fedora Release Engineering - 1.4.3.2-1.1 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild - -* Thu Jan 23 2020 Mark Reynolds - 1.4.3.2-1 -- Bump version to 1.4.3.2 -- Issue 49254 - Fix compiler failures and warnings -- Issue 50741 - cont bdb_start - Detected Disorderly Shutdown -- Issue 50836 - Port Schema UI tab to React -- Issue 50842 - Decrease 389-console Cockpit component size -- Issue 50790 - Add result text when filter is invalid -- Issue 50627 - Add ASAN logs to HTML report -- Issue 50834 - Incorrectly setting the NSS default SSL version max -- Issue 50829 - Disk monitoring rotated log cleanup causes heap-use-after-free -- Issue 50709 - (cont) Several memory leaks reported by Valgrind for 389-ds 1.3.9.1-10 -- Issue 50784 - performance testing scripts -- Issue 50599 - Fix memory leak when removing db region files -- Issue 49395 - Set the default TLS version min to TLS1.2 -- Issue 50818 - dsconf pwdpolicy get error -- Issue 50824 - dsctl remove fails with "name 'ensure_str' is not defined" -- Issue 50599 - Remove db region files prior to db recovery -- Issue 50812 - dscontainer executable should be placed under /usr/libexec/dirsrv/ -- Issue 50816 - dsconf allows the root password to be set to nothing -- Issue 50798 - incorrect bytes in format string(fix import issue) - -* Thu Jan 16 2020 Adam Williamson - 1.4.3.1-3 -- Backport two more import/missing function fixes - -* Wed Jan 15 2020 Adam Williamson - 1.4.3.1-2 -- Backport 828aad0 to fix missing imports from 1.4.3.1 - -* Mon Jan 13 2020 Mark Reynolds - 1.4.3.1-1 -- Bump version to 1.4.3.1 -- Issue 50798 - incorrect bytes in format string -- Issue 50545 - Add the new replication monitor functionality to UI -- Issue 50806 - Fix minor issues in lib389 health checks -- Issue 50690 - Port Password Storage test cases from TET to python3 part 1 -- Issue 49761 - Fix CI test suite issues -- Issue 49761 - Fix CI test suite issues -- Issue 50754 - Add Restore Change Log option to CLI -- Issue 48055 - CI test - automember_plugin(part2) -- Issue 50667 - dsctl -l did not respect PREFIX -- Issue 50780 - More CLI fixes -- Issue 50649 - lib389 without defaults.inf -- Issue 50780 - Fix UI issues -- Issue 50727 - correct mistaken options in filter validation patch -- Issue 50779 - lib389 - conflict compare fails for DN's with spaces -- Set branch version to 1.4.3.0