From 5d2be4455891c4f9b73f5ef6327bd555f3b7a8cd Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Dec 15 2020 16:06:49 +0000 Subject: import 389-ds-base-1.4.3.8-6.module+el8.3.0+8995+c08169ba --- diff --git a/SOURCES/0026-Issue-4297-On-ADD-replication-URP-issue-internal-sea.patch b/SOURCES/0026-Issue-4297-On-ADD-replication-URP-issue-internal-sea.patch new file mode 100644 index 0000000..a09cca2 --- /dev/null +++ b/SOURCES/0026-Issue-4297-On-ADD-replication-URP-issue-internal-sea.patch @@ -0,0 +1,194 @@ +From e78d3bd879b880d679b49f3fa5ebe8009d309063 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Fri, 2 Oct 2020 12:03:12 +0200 +Subject: [PATCH 1/8] Issue 4297- On ADD replication URP issue internal + searches with filter containing unescaped chars (#4355) + +Bug description: + In MMR a consumer receiving a ADD has to do some checking based on basedn. + It checks if the entry was a tombstone or if the conflicting parent entry was a tombstone. + + To do this checking, URP does internal searches using basedn. + A '*' (ASTERISK) is valid in a RDN and in a DN. But using a DN in an assertionvalue of a filter, the ASTERISK needs to be escaped else the server will interprete the filtertype to be a substring. (see + https://tools.ietf.org/html/rfc4515#section-3) + + The problem is that if a added entry contains an ASTERISK in the DN, it will not be escaped in internal search and trigger substring search (likely unindexed). + +Fix description: + escape the DN before doing internal search in URP + +Fixes: #4297 + +Reviewed by: Mark Reynolds, William Brown, Simon Pichugi (thanks !) + +Platforms tested: F31 +--- + .../suites/replication/acceptance_test.py | 63 +++++++++++++++++++ + ldap/servers/plugins/replication/urp.c | 10 ++- + ldap/servers/slapd/filter.c | 21 +++++++ + ldap/servers/slapd/slapi-plugin.h | 1 + + 4 files changed, 93 insertions(+), 2 deletions(-) + +diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py +index 5009f4e7c..661dddb11 100644 +--- a/dirsrvtests/tests/suites/replication/acceptance_test.py ++++ b/dirsrvtests/tests/suites/replication/acceptance_test.py +@@ -7,6 +7,7 @@ + # --- END COPYRIGHT BLOCK --- + # + import pytest ++import logging + from lib389.replica import Replicas + from lib389.tasks import * + from lib389.utils import * +@@ -556,6 +557,68 @@ def test_csnpurge_large_valueset(topo_m2): + for i in range(21,25): + test_user.add('description', 'value {}'.format(str(i))) + ++@pytest.mark.ds51244 ++def test_urp_trigger_substring_search(topo_m2): ++ """Test that a ADD of a entry with a '*' in its DN, triggers ++ an internal search with a escaped DN ++ ++ :id: 9869bb39-419f-42c3-a44b-c93eb0b77667 ++ :setup: MMR with 2 masters ++ :steps: ++ 1. enable internal operation loggging for plugins ++ 2. Create on M1 a test_user with a '*' in its DN ++ 3. Check the test_user is replicated ++ 4. Check in access logs that the internal search does not contain '*' ++ :expectedresults: ++ 1. Should succeeds ++ 2. Should succeeds ++ 3. Should succeeds ++ 4. Should succeeds ++ """ ++ m1 = topo_m2.ms["master1"] ++ m2 = topo_m2.ms["master2"] ++ ++ # Enable loggging of internal operation logging to capture URP intop ++ log.info('Set nsslapd-plugin-logging to on') ++ for inst in (m1, m2): ++ inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access') ++ inst.config.set('nsslapd-plugin-logging', 'on') ++ inst.restart() ++ ++ # add a user with a DN containing '*' ++ test_asterisk_uid = 'asterisk_*_in_value' ++ test_asterisk_dn = 'uid={},{}'.format(test_asterisk_uid, DEFAULT_SUFFIX) ++ ++ test_user = UserAccount(m1, test_asterisk_dn) ++ if test_user.exists(): ++ log.info('Deleting entry {}'.format(test_asterisk_dn)) ++ test_user.delete() ++ test_user.create(properties={ ++ 'uid': test_asterisk_uid, ++ 'cn': test_asterisk_uid, ++ 'sn': test_asterisk_uid, ++ 'userPassword': test_asterisk_uid, ++ 'uidNumber' : '1000', ++ 'gidNumber' : '2000', ++ 'homeDirectory' : '/home/asterisk', ++ }) ++ ++ # check that the ADD was replicated on M2 ++ test_user_m2 = UserAccount(m2, test_asterisk_dn) ++ for i in range(1,5): ++ if test_user_m2.exists(): ++ break ++ else: ++ log.info('Entry not yet replicated on M2, wait a bit') ++ time.sleep(2) ++ ++ # check that M2 access logs does not "(&(objectclass=nstombstone)(nscpentrydn=uid=asterisk_*_in_value,dc=example,dc=com))" ++ log.info('Check that on M2, URP as not triggered such internal search') ++ pattern = ".*\(Internal\).*SRCH.*\(&\(objectclass=nstombstone\)\(nscpentrydn=uid=asterisk_\*_in_value,dc=example,dc=com.*" ++ found = m2.ds_access_log.match(pattern) ++ log.info("found line: %s" % found) ++ assert not found ++ + + if __name__ == '__main__': + # Run isolated +diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c +index 79a817c90..301e9fa00 100644 +--- a/ldap/servers/plugins/replication/urp.c ++++ b/ldap/servers/plugins/replication/urp.c +@@ -1411,9 +1411,12 @@ urp_add_check_tombstone (Slapi_PBlock *pb, char *sessionid, Slapi_Entry *entry, + Slapi_Entry **entries = NULL; + Slapi_PBlock *newpb; + char *basedn = slapi_entry_get_ndn(entry); ++ char *escaped_basedn; + const Slapi_DN *suffix = slapi_get_suffix_by_dn(slapi_entry_get_sdn (entry)); ++ escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn); + +- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", basedn); ++ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn); ++ slapi_ch_free((void **)&escaped_basedn); + newpb = slapi_pblock_new(); + slapi_search_internal_set_pb(newpb, + slapi_sdn_get_dn(suffix), /* Base DN */ +@@ -1602,12 +1605,15 @@ urp_find_tombstone_for_glue (Slapi_PBlock *pb, char *sessionid, const Slapi_Entr + Slapi_Entry **entries = NULL; + Slapi_PBlock *newpb; + const char *basedn = slapi_sdn_get_dn(parentdn); ++ char *escaped_basedn; ++ escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn); + + char *conflict_csnstr = (char*)slapi_entry_attr_get_ref((Slapi_Entry *)entry, "conflictcsn"); + CSN *conflict_csn = csn_new_by_string(conflict_csnstr); + CSN *tombstone_csn = NULL; + +- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", basedn); ++ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn); ++ slapi_ch_free((void **)&escaped_basedn); + newpb = slapi_pblock_new(); + char *parent_dn = slapi_dn_parent (basedn); + slapi_search_internal_set_pb(newpb, +diff --git a/ldap/servers/slapd/filter.c b/ldap/servers/slapd/filter.c +index c818baec3..d671c87ff 100644 +--- a/ldap/servers/slapd/filter.c ++++ b/ldap/servers/slapd/filter.c +@@ -130,6 +130,27 @@ filter_escape_filter_value(struct slapi_filter *f, const char *fmt, size_t len _ + return ptr; + } + ++/* Escaped an equality filter value (assertionValue) of a given attribute ++ * Caller must free allocated escaped filter value ++ */ ++char * ++slapi_filter_escape_filter_value(char* filter_attr, char *filter_value) ++{ ++ char *result; ++ struct slapi_filter *f; ++ ++ if ((filter_attr == NULL) || (filter_value == NULL)) { ++ return NULL; ++ } ++ f = (struct slapi_filter *)slapi_ch_calloc(1, sizeof(struct slapi_filter)); ++ f->f_choice = LDAP_FILTER_EQUALITY; ++ f->f_un.f_un_ava.ava_type = filter_attr; ++ f->f_un.f_un_ava.ava_value.bv_len = strlen(filter_value); ++ f->f_un.f_un_ava.ava_value.bv_val = filter_value; ++ result = filter_escape_filter_value(f, FILTER_EQ_FMT, FILTER_EQ_LEN); ++ slapi_ch_free((void**) &f); ++ return result; ++} + + /* + * get_filter_internal(): extract an LDAP filter from a BerElement and create +diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h +index 8d9c3fa6a..04c02cf7c 100644 +--- a/ldap/servers/slapd/slapi-plugin.h ++++ b/ldap/servers/slapd/slapi-plugin.h +@@ -5262,6 +5262,7 @@ int slapi_vattr_filter_test_ext(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Filter * + int slapi_filter_compare(struct slapi_filter *f1, struct slapi_filter *f2); + Slapi_Filter *slapi_filter_dup(Slapi_Filter *f); + int slapi_filter_changetype(Slapi_Filter *f, const char *newtype); ++char *slapi_filter_escape_filter_value(char* filter_attr, char *filter_value); + + int slapi_attr_is_last_mod(char *attr); + +-- +2.26.2 + diff --git a/SOURCES/0027-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch b/SOURCES/0027-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch new file mode 100644 index 0000000..d7373cb --- /dev/null +++ b/SOURCES/0027-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch @@ -0,0 +1,66 @@ +From 3cf7734177c70c36062d4e667b91e15f22a2ea81 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Wed, 25 Nov 2020 18:07:34 +0100 +Subject: [PATCH 2/8] Issue 4297 - 2nd fix for on ADD replication URP issue + internal searches with filter containing unescaped chars (#4439) + +Bug description: + Previous fix is buggy because slapi_filter_escape_filter_value returns + a escaped filter component not an escaped assertion value. + +Fix description: + use the escaped filter component + +relates: https://github.com/389ds/389-ds-base/issues/4297 + +Reviewed by: William Brown + +Platforms tested: F31 +--- + ldap/servers/plugins/replication/urp.c | 17 ++++++++--------- + 1 file changed, 8 insertions(+), 9 deletions(-) + +diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c +index 301e9fa00..96ad2759a 100644 +--- a/ldap/servers/plugins/replication/urp.c ++++ b/ldap/servers/plugins/replication/urp.c +@@ -1411,12 +1411,12 @@ urp_add_check_tombstone (Slapi_PBlock *pb, char *sessionid, Slapi_Entry *entry, + Slapi_Entry **entries = NULL; + Slapi_PBlock *newpb; + char *basedn = slapi_entry_get_ndn(entry); +- char *escaped_basedn; ++ char *escaped_filter; + const Slapi_DN *suffix = slapi_get_suffix_by_dn(slapi_entry_get_sdn (entry)); +- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn); ++ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", basedn); + +- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn); +- slapi_ch_free((void **)&escaped_basedn); ++ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter); ++ slapi_ch_free((void **)&escaped_filter); + newpb = slapi_pblock_new(); + slapi_search_internal_set_pb(newpb, + slapi_sdn_get_dn(suffix), /* Base DN */ +@@ -1605,15 +1605,14 @@ urp_find_tombstone_for_glue (Slapi_PBlock *pb, char *sessionid, const Slapi_Entr + Slapi_Entry **entries = NULL; + Slapi_PBlock *newpb; + const char *basedn = slapi_sdn_get_dn(parentdn); +- char *escaped_basedn; +- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn); +- ++ char *escaped_filter; ++ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn); + char *conflict_csnstr = (char*)slapi_entry_attr_get_ref((Slapi_Entry *)entry, "conflictcsn"); + CSN *conflict_csn = csn_new_by_string(conflict_csnstr); + CSN *tombstone_csn = NULL; + +- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn); +- slapi_ch_free((void **)&escaped_basedn); ++ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter); ++ slapi_ch_free((void **)&escaped_filter); + newpb = slapi_pblock_new(); + char *parent_dn = slapi_dn_parent (basedn); + slapi_search_internal_set_pb(newpb, +-- +2.26.2 + diff --git a/SOURCES/0028-Issue-51233-ds-replcheck-crashes-in-offline-mode.patch b/SOURCES/0028-Issue-51233-ds-replcheck-crashes-in-offline-mode.patch new file mode 100644 index 0000000..ef55254 --- /dev/null +++ b/SOURCES/0028-Issue-51233-ds-replcheck-crashes-in-offline-mode.patch @@ -0,0 +1,37 @@ +From 16a004faf7eda3f8c4d59171bceab8cf78a9d002 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 6 Aug 2020 14:50:19 -0400 +Subject: [PATCH 3/8] Issue 51233 - ds-replcheck crashes in offline mode + +Bug Description: When processing all the DN's found in the Master LDIF + it is possible that the LDIF is not in the expected + order and ldifsearch fails (crashing the tool). + +Fix Description: If ldifsearch does not find an entry, start from the + beginning of the LDIF and try again. + +relates: https://pagure.io/389-ds-base/issue/51233 + +Reviewed by: spichugi(Thanks!) +--- + ldap/admin/src/scripts/ds-replcheck | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck +index 5bb7dfce3..1c133f4dd 100755 +--- a/ldap/admin/src/scripts/ds-replcheck ++++ b/ldap/admin/src/scripts/ds-replcheck +@@ -725,6 +725,10 @@ def do_offline_report(opts, output_file=None): + missing = False + for dn in master_dns: + mresult = ldif_search(MLDIF, dn) ++ if mresult['entry'] is None and mresult['conflict'] is None and not mresult['tombstone']: ++ # Try from the beginning ++ MLDIF.seek(0) ++ mresult = ldif_search(MLDIF, dn) + rresult = ldif_search(RLDIF, dn) + + if dn in replica_dns: +-- +2.26.2 + diff --git a/SOURCES/0029-Issue-4429-NULL-dereference-in-revert_cache.patch b/SOURCES/0029-Issue-4429-NULL-dereference-in-revert_cache.patch new file mode 100644 index 0000000..d6f2dc6 --- /dev/null +++ b/SOURCES/0029-Issue-4429-NULL-dereference-in-revert_cache.patch @@ -0,0 +1,103 @@ +From bc8bdaa57ba9b57671e2921705b99eaa70729ce7 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 11 Nov 2020 11:45:11 -0500 +Subject: [PATCH 4/8] Issue 4429 - NULL dereference in revert_cache() + +Bug Description: During a delete, if the DN (with an escaped leading space) + of an existing entry fail to parse the server will revert + the entry update. In this case it will lead to a crash + becuase ther ldbm inst struct is not set before it attempts + the cache revert. + +Fix Description: Check the the ldbm instance struct is not NULL before + dereferencing it. + +Relates: https://github.com/389ds/389-ds-base/issues/4429 + +Reviewed by: firstyear & spichugi(Thanks!!) +--- + .../tests/suites/syntax/acceptance_test.py | 40 +++++++++++++++++++ + ldap/servers/slapd/back-ldbm/cache.c | 3 ++ + 2 files changed, 43 insertions(+) + +diff --git a/dirsrvtests/tests/suites/syntax/acceptance_test.py b/dirsrvtests/tests/suites/syntax/acceptance_test.py +index db8f63c7e..543718689 100644 +--- a/dirsrvtests/tests/suites/syntax/acceptance_test.py ++++ b/dirsrvtests/tests/suites/syntax/acceptance_test.py +@@ -6,12 +6,14 @@ + # See LICENSE for details. + # --- END COPYRIGHT BLOCK --- + ++import ldap + import logging + import pytest + import os + from lib389.schema import Schema + from lib389.config import Config + from lib389.idm.user import UserAccounts ++from lib389.idm.group import Groups + from lib389._constants import DEFAULT_SUFFIX + from lib389.topologies import log, topology_st as topo + +@@ -105,6 +107,44 @@ def test_invalid_uidnumber(topo, validate_syntax_off): + log.info('Found an invalid entry with wrong uidNumber - Success') + + ++def test_invalid_dn_syntax_crash(topo): ++ """Add an entry with an escaped space, restart the server, and try to delete ++ it. In this case the DN is not correctly parsed and causes cache revert to ++ to dereference a NULL pointer. So the delete can fail as long as the server ++ does not crash. ++ ++ :id: 62d87272-dfb8-4627-9ca1-dbe33082caf8 ++ :setup: Standalone Instance ++ :steps: ++ 1. Add entry with leading escaped space in the RDN ++ 2. Restart the server so the entry is rebuilt from the database ++ 3. Delete the entry ++ 4. The server should still be running ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ ++ # Create group ++ groups = Groups(topo.standalone, DEFAULT_SUFFIX) ++ group = groups.create(properties={'cn': ' test'}) ++ ++ # Restart the server ++ topo.standalone.restart() ++ ++ # Delete group ++ try: ++ group.delete() ++ except ldap.NO_SUCH_OBJECT: ++ # This is okay in this case as we are only concerned about a crash ++ pass ++ ++ # Make sure server is still running ++ groups.list() ++ ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/slapd/back-ldbm/cache.c b/ldap/servers/slapd/back-ldbm/cache.c +index 89f958a35..5ad9ca829 100644 +--- a/ldap/servers/slapd/back-ldbm/cache.c ++++ b/ldap/servers/slapd/back-ldbm/cache.c +@@ -614,6 +614,9 @@ flush_hash(struct cache *cache, struct timespec *start_time, int32_t type) + void + revert_cache(ldbm_instance *inst, struct timespec *start_time) + { ++ if (inst == NULL) { ++ return; ++ } + flush_hash(&inst->inst_cache, start_time, ENTRY_CACHE); + flush_hash(&inst->inst_dncache, start_time, DN_CACHE); + } +-- +2.26.2 + diff --git a/SOURCES/0030-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch b/SOURCES/0030-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch new file mode 100644 index 0000000..15c8818 --- /dev/null +++ b/SOURCES/0030-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch @@ -0,0 +1,232 @@ +From 132f126c18214345ef4204bf8a061a0eca58fa59 Mon Sep 17 00:00:00 2001 +From: progier389 <72748589+progier389@users.noreply.github.com> +Date: Tue, 3 Nov 2020 12:18:50 +0100 +Subject: [PATCH 5/8] ticket 2058: Add keep alive entry after on-line + initialization - second version (#4399) + +Bug description: +Keep alive entry is not created on target master after on line initialization, +and its RUVelement stays empty until a direct update is issued on that master + +Fix description: +The patch allows a consumer (configured as a master) to create (if it did not +exist before) the consumer's keep alive entry. It creates it at the end of a +replication session at a time we are sure the changelog exists and will not +be reset. It allows a consumer to have RUVelement with csn in the RUV at the +first incoming replication session. + +That is basically lkrispen's proposal with an associated pytest testcase + +Second version changes: + - moved the testcase to suites/replication/regression_test.py + - set up the topology from a 2 master topology then + reinitialized the replicas from an ldif without replication metadata + rather than using the cli. + - search for keepalive entries using search_s instead of getEntry + - add a comment about keep alive entries purpose + +last commit: + - wait that ruv are in sync before checking keep alive entries + +Reviewed by: droideck, Firstyear + +Platforms tested: F32 + +relates: #2058 +--- + .../suites/replication/regression_test.py | 130 ++++++++++++++++++ + .../plugins/replication/repl5_replica.c | 14 ++ + ldap/servers/plugins/replication/repl_extop.c | 4 + + 3 files changed, 148 insertions(+) + +diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py +index 844d762b9..14b9d6a44 100644 +--- a/dirsrvtests/tests/suites/replication/regression_test.py ++++ b/dirsrvtests/tests/suites/replication/regression_test.py +@@ -98,6 +98,30 @@ def _move_ruv(ldif_file): + for dn, entry in ldif_list: + ldif_writer.unparse(dn, entry) + ++def _remove_replication_data(ldif_file): ++ """ Remove the replication data from ldif file: ++ db2lif without -r includes some of the replica data like ++ - nsUniqueId ++ - keepalive entries ++ This function filters the ldif fil to remove these data ++ """ ++ ++ with open(ldif_file) as f: ++ parser = ldif.LDIFRecordList(f) ++ parser.parse() ++ ++ ldif_list = parser.all_records ++ # Iterate on a copy of the ldif entry list ++ for dn, entry in ldif_list[:]: ++ if dn.startswith('cn=repl keep alive'): ++ ldif_list.remove((dn,entry)) ++ else: ++ entry.pop('nsUniqueId') ++ with open(ldif_file, 'w') as f: ++ ldif_writer = ldif.LDIFWriter(f) ++ for dn, entry in ldif_list: ++ ldif_writer.unparse(dn, entry) ++ + + @pytest.fixture(scope="module") + def topo_with_sigkill(request): +@@ -897,6 +921,112 @@ def test_moving_entry_make_online_init_fail(topology_m2): + assert len(m1entries) == len(m2entries) + + ++def get_keepalive_entries(instance,replica): ++ # Returns the keep alive entries that exists with the suffix of the server instance ++ try: ++ entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL, ++ "(&(objectclass=ldapsubentry)(cn=repl keep alive*))", ++ ['cn', 'nsUniqueId', 'modifierTimestamp']) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: error %s' % (dn, instance, str(e))) ++ assert False ++ # No error, so lets log the keepalive entries ++ if log.isEnabledFor(logging.DEBUG): ++ for ret in entries: ++ log.debug("Found keepalive entry:\n"+str(ret)); ++ return entries ++ ++def verify_keepalive_entries(topo, expected): ++ #Check that keep alive entries exists (or not exists) for every masters on every masters ++ #Note: The testing method is quite basic: counting that there is one keepalive entry per master. ++ # that is ok for simple test cases like test_online_init_should_create_keepalive_entries but ++ # not for the general case as keep alive associated with no more existing master may exists ++ # (for example after: db2ldif / demote a master / ldif2db / init other masters) ++ # ==> if the function is somehow pushed in lib389, a check better than simply counting the entries ++ # should be done. ++ for masterId in topo.ms: ++ master=topo.ms[masterId] ++ for replica in Replicas(master).list(): ++ if (replica.get_role() != ReplicaRole.MASTER): ++ continue ++ replica_info = f'master: {masterId} RID: {replica.get_rid()} suffix: {replica.get_suffix()}' ++ log.debug(f'Checking keepAliveEntries on {replica_info}') ++ keepaliveEntries = get_keepalive_entries(master, replica); ++ expectedCount = len(topo.ms) if expected else 0 ++ foundCount = len(keepaliveEntries) ++ if (foundCount == expectedCount): ++ log.debug(f'Found {foundCount} keepalive entries as expected on {replica_info}.') ++ else: ++ log.error(f'{foundCount} Keepalive entries are found ' ++ f'while {expectedCount} were expected on {replica_info}.') ++ assert False ++ ++ ++def test_online_init_should_create_keepalive_entries(topo_m2): ++ """Check that keep alive entries are created when initializinf a master from another one ++ ++ :id: d5940e71-d18a-4b71-aaf7-b9185361fffe ++ :setup: Two masters replication setup ++ :steps: ++ 1. Generate ldif without replication data ++ 2 Init both masters from that ldif ++ 3 Check that keep alive entries does not exists ++ 4 Perform on line init of master2 from master1 ++ 5 Check that keep alive entries exists ++ :expectedresults: ++ 1. No error while generating ldif ++ 2. No error while importing the ldif file ++ 3. No keepalive entrie should exists on any masters ++ 4. No error while initializing master2 ++ 5. All keepalive entries should exist on every masters ++ ++ """ ++ ++ repl = ReplicationManager(DEFAULT_SUFFIX) ++ m1 = topo_m2.ms["master1"] ++ m2 = topo_m2.ms["master2"] ++ # Step 1: Generate ldif without replication data ++ m1.stop() ++ m2.stop() ++ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() ++ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], ++ excludeSuffixes=None, repl_data=False, ++ outputfile=ldif_file, encrypt=False) ++ # Remove replication metadata that are still in the ldif ++ _remove_replication_data(ldif_file) ++ ++ # Step 2: Init both masters from that ldif ++ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) ++ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) ++ m1.start() ++ m2.start() ++ ++ """ Replica state is now as if CLI setup has been done using: ++ dsconf master1 replication enable --suffix "${SUFFIX}" --role master ++ dsconf master2 replication enable --suffix "${SUFFIX}" --role master ++ dsconf master1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}" ++ dsconf master2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}" ++ dsconf master1 repl-agmt create --suffix "${SUFFIX}" ++ dsconf master2 repl-agmt create --suffix "${SUFFIX}" ++ """ ++ ++ # Step 3: No keepalive entrie should exists on any masters ++ verify_keepalive_entries(topo_m2, False) ++ ++ # Step 4: Perform on line init of master2 from master1 ++ agmt = Agreements(m1).list()[0] ++ agmt.begin_reinit() ++ (done, error) = agmt.wait_reinit() ++ assert done is True ++ assert error is False ++ ++ # Step 5: All keepalive entries should exists on every masters ++ # Verify the keep alive entry once replication is in sync ++ # (that is the step that fails when bug is not fixed) ++ repl.wait_for_ruv(m2,m1) ++ verify_keepalive_entries(topo_m2, True); ++ ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c +index f01782330..f0ea0f8ef 100644 +--- a/ldap/servers/plugins/replication/repl5_replica.c ++++ b/ldap/servers/plugins/replication/repl5_replica.c +@@ -373,6 +373,20 @@ replica_destroy(void **arg) + slapi_ch_free((void **)arg); + } + ++/****************************************************************************** ++ ******************** REPLICATION KEEP ALIVE ENTRIES ************************** ++ ****************************************************************************** ++ * They are subentries of the replicated suffix and there is one per master. * ++ * These entries exist only to trigger a change that get replicated over the * ++ * topology. * ++ * Their main purpose is to generate records in the changelog and they are * ++ * updated from time to time by fractional replication to insure that at * ++ * least a change must be replicated by FR after a great number of not * ++ * replicated changes are found in the changelog. The interest is that the * ++ * fractional RUV get then updated so less changes need to be walked in the * ++ * changelog when searching for the first change to send * ++ ******************************************************************************/ ++ + #define KEEP_ALIVE_ATTR "keepalivetimestamp" + #define KEEP_ALIVE_ENTRY "repl keep alive" + #define KEEP_ALIVE_DN_FORMAT "cn=%s %d,%s" +diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c +index 14c8e0bcc..af486f730 100644 +--- a/ldap/servers/plugins/replication/repl_extop.c ++++ b/ldap/servers/plugins/replication/repl_extop.c +@@ -1173,6 +1173,10 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb) + */ + if (cl5GetState() == CL5_STATE_OPEN) { + replica_log_ruv_elements(r); ++ /* now that the changelog is open and started, we can alos cretae the ++ * keep alive entry without risk that db and cl will not match ++ */ ++ replica_subentry_check(replica_get_root(r), replica_get_rid(r)); + } + + /* ONREPL code that dealt with new RUV, etc was moved into the code +-- +2.26.2 + diff --git a/SOURCES/0031-do-not-add-referrals-for-masters-with-different-data.patch b/SOURCES/0031-do-not-add-referrals-for-masters-with-different-data.patch new file mode 100644 index 0000000..80e4bd3 --- /dev/null +++ b/SOURCES/0031-do-not-add-referrals-for-masters-with-different-data.patch @@ -0,0 +1,513 @@ +From 9d25d8bc3262bfaeeda2992538f649bf1a1b33de Mon Sep 17 00:00:00 2001 +From: progier389 <72748589+progier389@users.noreply.github.com> +Date: Thu, 12 Nov 2020 18:50:04 +0100 +Subject: [PATCH 6/8] do not add referrals for masters with different data + generation #2054 (#4427) + +Bug description: +The problem is that some operation mandatory in the usual cases are +also performed when replication cannot take place because the +database set are differents (i.e: RUV generation ids are different) + +One of the issue is that the csn generator state is updated when +starting a replication session (it is a problem when trying to +reset the time skew, as freshly reinstalled replicas get infected +by the old ones) + +A second issue is that the RUV got updated when ending a replication session +(which may add replica that does not share the same data set, +then update operations on consumer retun referrals towards wrong masters + +Fix description: +The fix checks the RUVs generation id before updating the csn generator +and before updating the RUV. + +Reviewed by: mreynolds + firstyear + vashirov + +Platforms tested: F32 +--- + .../suites/replication/regression_test.py | 290 ++++++++++++++++++ + ldap/servers/plugins/replication/repl5.h | 1 + + .../plugins/replication/repl5_inc_protocol.c | 20 +- + .../plugins/replication/repl5_replica.c | 39 ++- + src/lib389/lib389/dseldif.py | 37 +++ + 5 files changed, 368 insertions(+), 19 deletions(-) + +diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py +index 14b9d6a44..a72af6b30 100644 +--- a/dirsrvtests/tests/suites/replication/regression_test.py ++++ b/dirsrvtests/tests/suites/replication/regression_test.py +@@ -13,6 +13,7 @@ from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts + from lib389.pwpolicy import PwPolicyManager + from lib389.utils import * + from lib389.topologies import topology_m2 as topo_m2, TopologyMain, topology_m3 as topo_m3, create_topology, _remove_ssca_db, topology_i2 as topo_i2 ++from lib389.topologies import topology_m2c2 as topo_m2c2 + from lib389._constants import * + from lib389.idm.organizationalunit import OrganizationalUnits + from lib389.idm.user import UserAccount +@@ -22,6 +23,7 @@ from lib389.idm.directorymanager import DirectoryManager + from lib389.replica import Replicas, ReplicationManager, Changelog5, BootstrapReplicationManager + from lib389.agreement import Agreements + from lib389 import pid_from_file ++from lib389.dseldif import * + + + pytestmark = pytest.mark.tier1 +@@ -1027,6 +1029,294 @@ def test_online_init_should_create_keepalive_entries(topo_m2): + verify_keepalive_entries(topo_m2, True); + + ++def get_agreement(agmts, consumer): ++ # Get agreement towards consumer among the agremment list ++ for agmt in agmts.list(): ++ if (agmt.get_attr_val_utf8('nsDS5ReplicaPort') == str(consumer.port) and ++ agmt.get_attr_val_utf8('nsDS5ReplicaHost') == consumer.host): ++ return agmt ++ return None; ++ ++ ++def test_ruv_url_not_added_if_different_uuid(topo_m2c2): ++ """Check that RUV url is not updated if RUV generation uuid are different ++ ++ :id: 7cc30a4e-0ffd-4758-8f00-e500279af344 ++ :setup: Two masters + two consumers replication setup ++ :steps: ++ 1. Generate ldif without replication data ++ 2. Init both masters from that ldif ++ (to clear the ruvs and generates different generation uuid) ++ 3. Perform on line init from master1 to consumer1 ++ and from master2 to consumer2 ++ 4. Perform update on both masters ++ 5. Check that c1 RUV does not contains URL towards m2 ++ 6. Check that c2 RUV does contains URL towards m2 ++ 7. Perform on line init from master1 to master2 ++ 8. Perform update on master2 ++ 9. Check that c1 RUV does contains URL towards m2 ++ :expectedresults: ++ 1. No error while generating ldif ++ 2. No error while importing the ldif file ++ 3. No error and Initialization done. ++ 4. No error ++ 5. master2 replicaid should not be in the consumer1 RUV ++ 6. master2 replicaid should be in the consumer2 RUV ++ 7. No error and Initialization done. ++ 8. No error ++ 9. master2 replicaid should be in the consumer1 RUV ++ ++ """ ++ ++ # Variables initialization ++ repl = ReplicationManager(DEFAULT_SUFFIX) ++ ++ m1 = topo_m2c2.ms["master1"] ++ m2 = topo_m2c2.ms["master2"] ++ c1 = topo_m2c2.cs["consumer1"] ++ c2 = topo_m2c2.cs["consumer2"] ++ ++ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) ++ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) ++ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX) ++ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX) ++ ++ replicid_m2 = replica_m2.get_rid() ++ ++ agmts_m1 = Agreements(m1, replica_m1.dn) ++ agmts_m2 = Agreements(m2, replica_m2.dn) ++ ++ m1_m2 = get_agreement(agmts_m1, m2) ++ m1_c1 = get_agreement(agmts_m1, c1) ++ m1_c2 = get_agreement(agmts_m1, c2) ++ m2_m1 = get_agreement(agmts_m2, m1) ++ m2_c1 = get_agreement(agmts_m2, c1) ++ m2_c2 = get_agreement(agmts_m2, c2) ++ ++ # Step 1: Generate ldif without replication data ++ m1.stop() ++ m2.stop() ++ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() ++ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], ++ excludeSuffixes=None, repl_data=False, ++ outputfile=ldif_file, encrypt=False) ++ # Remove replication metadata that are still in the ldif ++ # _remove_replication_data(ldif_file) ++ ++ # Step 2: Init both masters from that ldif ++ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) ++ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) ++ m1.start() ++ m2.start() ++ ++ # Step 3: Perform on line init from master1 to consumer1 ++ # and from master2 to consumer2 ++ m1_c1.begin_reinit() ++ m2_c2.begin_reinit() ++ (done, error) = m1_c1.wait_reinit() ++ assert done is True ++ assert error is False ++ (done, error) = m2_c2.wait_reinit() ++ assert done is True ++ assert error is False ++ ++ # Step 4: Perform update on both masters ++ repl.test_replication(m1, c1) ++ repl.test_replication(m2, c2) ++ ++ # Step 5: Check that c1 RUV does not contains URL towards m2 ++ ruv = replica_c1.get_ruv() ++ log.debug(f"c1 RUV: {ruv}") ++ url=ruv._rid_url.get(replica_m2.get_rid()) ++ if (url == None): ++ log.debug(f"No URL for RID {replica_m2.get_rid()} in RUV"); ++ else: ++ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}"); ++ log.error(f"URL for RID {replica_m2.get_rid()} found in RUV") ++ #Note: this assertion fails if issue 2054 is not fixed. ++ assert False ++ ++ # Step 6: Check that c2 RUV does contains URL towards m2 ++ ruv = replica_c2.get_ruv() ++ log.debug(f"c1 RUV: {ruv} {ruv._rids} ") ++ url=ruv._rid_url.get(replica_m2.get_rid()) ++ if (url == None): ++ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV"); ++ assert False ++ else: ++ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}"); ++ ++ ++ # Step 7: Perform on line init from master1 to master2 ++ m1_m2.begin_reinit() ++ (done, error) = m1_m2.wait_reinit() ++ assert done is True ++ assert error is False ++ ++ # Step 8: Perform update on master2 ++ repl.test_replication(m2, c1) ++ ++ # Step 9: Check that c1 RUV does contains URL towards m2 ++ ruv = replica_c1.get_ruv() ++ log.debug(f"c1 RUV: {ruv} {ruv._rids} ") ++ url=ruv._rid_url.get(replica_m2.get_rid()) ++ if (url == None): ++ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV"); ++ assert False ++ else: ++ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}"); ++ ++ ++def test_csngen_state_not_updated_if_different_uuid(topo_m2c2): ++ """Check that csngen remote offset is not updated if RUV generation uuid are different ++ ++ :id: 77694b8e-22ae-11eb-89b2-482ae39447e5 ++ :setup: Two masters + two consumers replication setup ++ :steps: ++ 1. Disable m1<->m2 agreement to avoid propagate timeSkew ++ 2. Generate ldif without replication data ++ 3. Increase time skew on master2 ++ 4. Init both masters from that ldif ++ (to clear the ruvs and generates different generation uuid) ++ 5. Perform on line init from master1 to consumer1 and master2 to consumer2 ++ 6. Perform update on both masters ++ 7: Check that c1 has no time skew ++ 8: Check that c2 has time skew ++ 9. Init master2 from master1 ++ 10. Perform update on master2 ++ 11. Check that c1 has time skew ++ :expectedresults: ++ 1. No error ++ 2. No error while generating ldif ++ 3. No error ++ 4. No error while importing the ldif file ++ 5. No error and Initialization done. ++ 6. No error ++ 7. c1 time skew should be lesser than threshold ++ 8. c2 time skew should be higher than threshold ++ 9. No error and Initialization done. ++ 10. No error ++ 11. c1 time skew should be higher than threshold ++ ++ """ ++ ++ # Variables initialization ++ repl = ReplicationManager(DEFAULT_SUFFIX) ++ ++ m1 = topo_m2c2.ms["master1"] ++ m2 = topo_m2c2.ms["master2"] ++ c1 = topo_m2c2.cs["consumer1"] ++ c2 = topo_m2c2.cs["consumer2"] ++ ++ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) ++ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) ++ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX) ++ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX) ++ ++ replicid_m2 = replica_m2.get_rid() ++ ++ agmts_m1 = Agreements(m1, replica_m1.dn) ++ agmts_m2 = Agreements(m2, replica_m2.dn) ++ ++ m1_m2 = get_agreement(agmts_m1, m2) ++ m1_c1 = get_agreement(agmts_m1, c1) ++ m1_c2 = get_agreement(agmts_m1, c2) ++ m2_m1 = get_agreement(agmts_m2, m1) ++ m2_c1 = get_agreement(agmts_m2, c1) ++ m2_c2 = get_agreement(agmts_m2, c2) ++ ++ # Step 1: Disable m1<->m2 agreement to avoid propagate timeSkew ++ m1_m2.pause() ++ m2_m1.pause() ++ ++ # Step 2: Generate ldif without replication data ++ m1.stop() ++ m2.stop() ++ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() ++ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], ++ excludeSuffixes=None, repl_data=False, ++ outputfile=ldif_file, encrypt=False) ++ # Remove replication metadata that are still in the ldif ++ # _remove_replication_data(ldif_file) ++ ++ # Step 3: Increase time skew on master2 ++ timeSkew=6*3600 ++ # We can modify master2 time skew ++ # But the time skew on the consumer may be smaller ++ # depending on when the cnsgen generation time is updated ++ # and when first csn get replicated. ++ # Since we use timeSkew has threshold value to detect ++ # whether there are time skew or not, ++ # lets add a significative margin (longer than the test duration) ++ # to avoid any risk of erroneous failure ++ timeSkewMargin = 300 ++ DSEldif(m2)._increaseTimeSkew(DEFAULT_SUFFIX, timeSkew+timeSkewMargin) ++ ++ # Step 4: Init both masters from that ldif ++ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) ++ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) ++ m1.start() ++ m2.start() ++ ++ # Step 5: Perform on line init from master1 to consumer1 ++ # and from master2 to consumer2 ++ m1_c1.begin_reinit() ++ m2_c2.begin_reinit() ++ (done, error) = m1_c1.wait_reinit() ++ assert done is True ++ assert error is False ++ (done, error) = m2_c2.wait_reinit() ++ assert done is True ++ assert error is False ++ ++ # Step 6: Perform update on both masters ++ repl.test_replication(m1, c1) ++ repl.test_replication(m2, c2) ++ ++ # Step 7: Check that c1 has no time skew ++ # Stop server to insure that dse.ldif is uptodate ++ c1.stop() ++ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0] ++ c1_timeSkew = int(c1_nsState['time_skew']) ++ log.debug(f"c1 time skew: {c1_timeSkew}") ++ if (c1_timeSkew >= timeSkew): ++ log.error(f"c1 csngen state has unexpectedly been synchronized with m2: time skew {c1_timeSkew}") ++ assert False ++ c1.start() ++ ++ # Step 8: Check that c2 has time skew ++ # Stop server to insure that dse.ldif is uptodate ++ c2.stop() ++ c2_nsState = DSEldif(c2).readNsState(DEFAULT_SUFFIX)[0] ++ c2_timeSkew = int(c2_nsState['time_skew']) ++ log.debug(f"c2 time skew: {c2_timeSkew}") ++ if (c2_timeSkew < timeSkew): ++ log.error(f"c2 csngen state has not been synchronized with m2: time skew {c2_timeSkew}") ++ assert False ++ c2.start() ++ ++ # Step 9: Perform on line init from master1 to master2 ++ m1_c1.pause() ++ m1_m2.resume() ++ m1_m2.begin_reinit() ++ (done, error) = m1_m2.wait_reinit() ++ assert done is True ++ assert error is False ++ ++ # Step 10: Perform update on master2 ++ repl.test_replication(m2, c1) ++ ++ # Step 11: Check that c1 has time skew ++ # Stop server to insure that dse.ldif is uptodate ++ c1.stop() ++ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0] ++ c1_timeSkew = int(c1_nsState['time_skew']) ++ log.debug(f"c1 time skew: {c1_timeSkew}") ++ if (c1_timeSkew < timeSkew): ++ log.error(f"c1 csngen state has not been synchronized with m2: time skew {c1_timeSkew}") ++ assert False ++ ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h +index 638471744..b2605011a 100644 +--- a/ldap/servers/plugins/replication/repl5.h ++++ b/ldap/servers/plugins/replication/repl5.h +@@ -698,6 +698,7 @@ void replica_dump(Replica *r); + void replica_set_enabled(Replica *r, PRBool enable); + Replica *replica_get_replica_from_dn(const Slapi_DN *dn); + Replica *replica_get_replica_from_root(const char *repl_root); ++int replica_check_generation(Replica *r, const RUV *remote_ruv); + int replica_update_ruv(Replica *replica, const CSN *csn, const char *replica_purl); + Replica *replica_get_replica_for_op(Slapi_PBlock *pb); + /* the functions below manipulate replica hash */ +diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c +index 29b1fb073..af5e5897c 100644 +--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c ++++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c +@@ -2161,26 +2161,12 @@ examine_update_vector(Private_Repl_Protocol *prp, RUV *remote_ruv) + } else if (NULL == remote_ruv) { + return_value = EXAMINE_RUV_PRISTINE_REPLICA; + } else { +- char *local_gen = NULL; +- char *remote_gen = ruv_get_replica_generation(remote_ruv); +- Object *local_ruv_obj; +- RUV *local_ruv; +- + PR_ASSERT(NULL != prp->replica); +- local_ruv_obj = replica_get_ruv(prp->replica); +- if (NULL != local_ruv_obj) { +- local_ruv = (RUV *)object_get_data(local_ruv_obj); +- PR_ASSERT(local_ruv); +- local_gen = ruv_get_replica_generation(local_ruv); +- object_release(local_ruv_obj); +- } +- if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) { +- return_value = EXAMINE_RUV_GENERATION_MISMATCH; +- } else { ++ if (replica_check_generation(prp->replica, remote_ruv)) { + return_value = EXAMINE_RUV_OK; ++ } else { ++ return_value = EXAMINE_RUV_GENERATION_MISMATCH; + } +- slapi_ch_free((void **)&remote_gen); +- slapi_ch_free((void **)&local_gen); + } + return return_value; + } +diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c +index f0ea0f8ef..7e56d6557 100644 +--- a/ldap/servers/plugins/replication/repl5_replica.c ++++ b/ldap/servers/plugins/replication/repl5_replica.c +@@ -812,6 +812,36 @@ replica_set_ruv(Replica *r, RUV *ruv) + replica_unlock(r->repl_lock); + } + ++/* ++ * Check if replica generation is the same than the remote ruv one ++ */ ++int ++replica_check_generation(Replica *r, const RUV *remote_ruv) ++{ ++ int return_value; ++ char *local_gen = NULL; ++ char *remote_gen = ruv_get_replica_generation(remote_ruv); ++ Object *local_ruv_obj; ++ RUV *local_ruv; ++ ++ PR_ASSERT(NULL != r); ++ local_ruv_obj = replica_get_ruv(r); ++ if (NULL != local_ruv_obj) { ++ local_ruv = (RUV *)object_get_data(local_ruv_obj); ++ PR_ASSERT(local_ruv); ++ local_gen = ruv_get_replica_generation(local_ruv); ++ object_release(local_ruv_obj); ++ } ++ if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) { ++ return_value = PR_FALSE; ++ } else { ++ return_value = PR_TRUE; ++ } ++ slapi_ch_free_string(&remote_gen); ++ slapi_ch_free_string(&local_gen); ++ return return_value; ++} ++ + /* + * Update one particular CSN in an RUV. This is meant to be called + * whenever (a) the server has processed a client operation and +@@ -1298,6 +1328,11 @@ replica_update_csngen_state_ext(Replica *r, const RUV *ruv, const CSN *extracsn) + + PR_ASSERT(r && ruv); + ++ if (!replica_check_generation(r, ruv)) /* ruv has wrong generation - we are done */ ++ { ++ return 0; ++ } ++ + rc = ruv_get_max_csn(ruv, &csn); + if (rc != RUV_SUCCESS) { + return -1; +@@ -3713,8 +3748,8 @@ replica_update_ruv_consumer(Replica *r, RUV *supplier_ruv) + replica_lock(r->repl_lock); + + local_ruv = (RUV *)object_get_data(r->repl_ruv); +- +- if (is_cleaned_rid(supplier_id) || local_ruv == NULL) { ++ if (is_cleaned_rid(supplier_id) || local_ruv == NULL || ++ !replica_check_generation(r, supplier_ruv)) { + replica_unlock(r->repl_lock); + return; + } +diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py +index f2725add9..6e6be7cd2 100644 +--- a/src/lib389/lib389/dseldif.py ++++ b/src/lib389/lib389/dseldif.py +@@ -316,6 +316,43 @@ class DSEldif(DSLint): + + return states + ++ def _increaseTimeSkew(self, suffix, timeSkew): ++ # Increase csngen state local_offset by timeSkew ++ # Warning: instance must be stopped before calling this function ++ assert (timeSkew >= 0) ++ nsState = self.readNsState(suffix)[0] ++ self._instance.log.debug(f'_increaseTimeSkew nsState is {nsState}') ++ oldNsState = self.get(nsState['dn'], 'nsState', True) ++ self._instance.log.debug(f'oldNsState is {oldNsState}') ++ ++ # Lets reencode the new nsState ++ from lib389.utils import print_nice_time ++ if pack('h', 1) == pack('=h',1): ++ end = '>' ++ else: ++ raise ValueError("Unknown endian, unable to proceed") ++ ++ thelen = len(oldNsState) ++ if thelen <= 20: ++ pad = 2 # padding for short H values ++ timefmt = 'I' # timevals are unsigned 32-bit int ++ else: ++ pad = 6 # padding for short H values ++ timefmt = 'Q' # timevals are unsigned 64-bit int ++ fmtstr = "%sH%dx3%sH%dx" % (end, pad, timefmt, pad) ++ newNsState = base64.b64encode(pack(fmtstr, int(nsState['rid']), ++ int(nsState['gen_time']), int(nsState['local_offset'])+timeSkew, ++ int(nsState['remote_offset']), int(nsState['seq_num']))) ++ newNsState = newNsState.decode('utf-8') ++ self._instance.log.debug(f'newNsState is {newNsState}') ++ # Lets replace the value. ++ (entry_dn_i, attr_data) = self._find_attr(nsState['dn'], 'nsState') ++ attr_i = next(iter(attr_data)) ++ self._contents[entry_dn_i + attr_i] = f"nsState:: {newNsState}" ++ self._update() ++ + + class FSChecks(DSLint): + """This is for the healthcheck feature, check commonly used system config files the +-- +2.26.2 + diff --git a/SOURCES/0032-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch b/SOURCES/0032-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch new file mode 100644 index 0000000..9b30355 --- /dev/null +++ b/SOURCES/0032-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch @@ -0,0 +1,159 @@ +From 0b0147cdaad0f1fc54451c23b6e5d70da178736f Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 11 Nov 2020 08:59:18 -0500 +Subject: [PATCH 7/8] Issue 4383 - Do not normalize escaped spaces in a DN + +Bug Description: Adding an entry with an escaped leading space leads to many + problems. Mainly id2entry can get corrupted during an + import of such an entry, and the entryrdn index is not + updated correctly + +Fix Description: In slapi_dn_normalize_ext() leave an escaped space intact. + +Relates: https://github.com/389ds/389-ds-base/issues/4383 + +Reviewed by: firstyear, progier, and tbordaz (Thanks!!!) +--- + .../tests/suites/syntax/acceptance_test.py | 75 ++++++++++++++++++- + ldap/servers/slapd/dn.c | 8 +- + 2 files changed, 77 insertions(+), 6 deletions(-) + +diff --git a/dirsrvtests/tests/suites/syntax/acceptance_test.py b/dirsrvtests/tests/suites/syntax/acceptance_test.py +index 543718689..7939a99a7 100644 +--- a/dirsrvtests/tests/suites/syntax/acceptance_test.py ++++ b/dirsrvtests/tests/suites/syntax/acceptance_test.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2019 Red Hat, Inc. ++# Copyright (C) 2020 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -7,13 +7,12 @@ + # --- END COPYRIGHT BLOCK --- + + import ldap +-import logging + import pytest + import os + from lib389.schema import Schema + from lib389.config import Config + from lib389.idm.user import UserAccounts +-from lib389.idm.group import Groups ++from lib389.idm.group import Group, Groups + from lib389._constants import DEFAULT_SUFFIX + from lib389.topologies import log, topology_st as topo + +@@ -127,7 +126,7 @@ def test_invalid_dn_syntax_crash(topo): + 4. Success + """ + +- # Create group ++ # Create group + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': ' test'}) + +@@ -145,6 +144,74 @@ def test_invalid_dn_syntax_crash(topo): + groups.list() + + ++@pytest.mark.parametrize("props, rawdn", [ ++ ({'cn': ' leadingSpace'}, "cn=\\20leadingSpace,ou=Groups,dc=example,dc=com"), ++ ({'cn': 'trailingSpace '}, "cn=trailingSpace\\20,ou=Groups,dc=example,dc=com")]) ++def test_dn_syntax_spaces_delete(topo, props, rawdn): ++ """Test that an entry with a space as the first character in the DN can be ++ deleted without error. We also want to make sure the indexes are properly ++ updated by repeatedly adding and deleting the entry, and that the entry cache ++ is properly maintained. ++ ++ :id: b993f37c-c2b0-4312-992c-a9048ff98965 ++ :parametrized: yes ++ :setup: Standalone Instance ++ :steps: ++ 1. Create a group with a DN that has a space as the first/last ++ character. ++ 2. Delete group ++ 3. Add group ++ 4. Modify group ++ 5. Restart server and modify entry ++ 6. Delete group ++ 7. Add group back ++ 8. Delete group using specific DN ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Success ++ 8. Success ++ """ ++ ++ # Create group ++ groups = Groups(topo.standalone, DEFAULT_SUFFIX) ++ group = groups.create(properties=props.copy()) ++ ++ # Delete group (verifies DN/RDN parsing works and cache is correct) ++ group.delete() ++ ++ # Add group again (verifies entryrdn index was properly updated) ++ groups = Groups(topo.standalone, DEFAULT_SUFFIX) ++ group = groups.create(properties=props.copy()) ++ ++ # Modify the group (verifies dn/rdn parsing is correct) ++ group.replace('description', 'escaped space group') ++ ++ # Restart the server. This will pull the entry from the database and ++ # convert it into a cache entry, which is different than how a client ++ # first adds an entry and is put into the cache before being written to ++ # disk. ++ topo.standalone.restart() ++ ++ # Make sure we can modify the entry (verifies cache entry was created ++ # correctly) ++ group.replace('description', 'escaped space group after restart') ++ ++ # Make sure it can still be deleted (verifies cache again). ++ group.delete() ++ ++ # Add it back so we can delete it using a specific DN (sanity test to verify ++ # another DN/RDN parsing variation). ++ groups = Groups(topo.standalone, DEFAULT_SUFFIX) ++ group = groups.create(properties=props.copy()) ++ group = Group(topo.standalone, dn=rawdn) ++ group.delete() ++ ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c +index 2af3f38fc..3980b897f 100644 +--- a/ldap/servers/slapd/dn.c ++++ b/ldap/servers/slapd/dn.c +@@ -894,8 +894,7 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len) + s++; + } + } +- } else if (s + 2 < ends && +- isxdigit(*(s + 1)) && isxdigit(*(s + 2))) { ++ } else if (s + 2 < ends && isxdigit(*(s + 1)) && isxdigit(*(s + 2))) { + /* esc hexpair ==> real character */ + int n = slapi_hexchar2int(*(s + 1)); + int n2 = slapi_hexchar2int(*(s + 2)); +@@ -903,6 +902,11 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len) + if (n == 0) { /* don't change \00 */ + *d++ = *++s; + *d++ = *++s; ++ } else if (n == 32) { /* leave \20 (space) intact */ ++ *d++ = *s; ++ *d++ = *++s; ++ *d++ = *++s; ++ s++; + } else { + *d++ = n; + s += 3; +-- +2.26.2 + diff --git a/SOURCES/0033-Issue-49300-entryUSN-is-duplicated-after-memberOf-op.patch b/SOURCES/0033-Issue-49300-entryUSN-is-duplicated-after-memberOf-op.patch new file mode 100644 index 0000000..0796c0c --- /dev/null +++ b/SOURCES/0033-Issue-49300-entryUSN-is-duplicated-after-memberOf-op.patch @@ -0,0 +1,560 @@ +From 220dbafa048269105b3f7958a5d5bfd1d988da26 Mon Sep 17 00:00:00 2001 +From: Simon Pichugin +Date: Tue, 30 Jun 2020 15:39:30 +0200 +Subject: [PATCH 8/8] Issue 49300 - entryUSN is duplicated after memberOf + operation + +Bug Description: When we assign a member to a group we have two +oprations - group modification and user modification. +As a result, they both have the same entryUSN because USN Plugin +assigns entryUSN value in bepreop but increments the counter +in the postop and a lot of things can happen in between. + +Fix Description: Increment the counter in bepreop together with +entryUSN assignment. Also, decrement the counter in bepostop if +the failuer has happened. +Add test suite to cover the change. + +https://pagure.io/389-ds-base/issue/49300 + +Reviewed by: tbordaz (Thanks!) +--- + .../tests/suites/plugins/entryusn_test.py | 240 ++++++++++++++++++ + ldap/servers/plugins/usn/usn.c | 109 ++++---- + ldap/servers/slapd/pblock.c | 14 +- + ldap/servers/slapd/pblock_v3.h | 1 + + ldap/servers/slapd/slapi-plugin.h | 3 + + 5 files changed, 322 insertions(+), 45 deletions(-) + create mode 100644 dirsrvtests/tests/suites/plugins/entryusn_test.py + +diff --git a/dirsrvtests/tests/suites/plugins/entryusn_test.py b/dirsrvtests/tests/suites/plugins/entryusn_test.py +new file mode 100644 +index 000000000..721315419 +--- /dev/null ++++ b/dirsrvtests/tests/suites/plugins/entryusn_test.py +@@ -0,0 +1,240 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2020 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import ldap ++import logging ++import pytest ++from lib389._constants import DEFAULT_SUFFIX ++from lib389.config import Config ++from lib389.plugins import USNPlugin, MemberOfPlugin ++from lib389.idm.group import Groups ++from lib389.idm.user import UserAccounts ++from lib389.idm.organizationalunit import OrganizationalUnit ++from lib389.tombstone import Tombstones ++from lib389.rootdse import RootDSE ++from lib389.topologies import topology_st, topology_m2 ++ ++log = logging.getLogger(__name__) ++ ++USER_NUM = 10 ++GROUP_NUM = 3 ++ ++ ++def check_entryusn_no_duplicates(entryusn_list): ++ """Check that all values in the list are unique""" ++ ++ if len(entryusn_list) > len(set(entryusn_list)): ++ raise AssertionError(f"EntryUSN values have duplicates, please, check logs") ++ ++ ++def check_lastusn_after_restart(inst): ++ """Check that last usn is the same after restart""" ++ ++ root_dse = RootDSE(inst) ++ last_usn_before = root_dse.get_attr_val_int("lastusn;userroot") ++ inst.restart() ++ last_usn_after = root_dse.get_attr_val_int("lastusn;userroot") ++ assert last_usn_after == last_usn_before ++ ++ ++@pytest.fixture(scope="module") ++def setup(topology_st, request): ++ """ ++ Enable USN plug-in ++ Enable MEMBEROF plugin ++ Add test entries ++ """ ++ ++ inst = topology_st.standalone ++ ++ log.info("Enable the USN plugin...") ++ plugin = USNPlugin(inst) ++ plugin.enable() ++ ++ log.info("Enable the MEMBEROF plugin...") ++ plugin = MemberOfPlugin(inst) ++ plugin.enable() ++ ++ inst.restart() ++ ++ users_list = [] ++ log.info("Adding test entries...") ++ users = UserAccounts(inst, DEFAULT_SUFFIX) ++ for id in range(USER_NUM): ++ user = users.create_test_user(uid=id) ++ users_list.append(user) ++ ++ groups_list = [] ++ log.info("Adding test groups...") ++ groups = Groups(inst, DEFAULT_SUFFIX) ++ for id in range(GROUP_NUM): ++ group = groups.create(properties={'cn': f'test_group{id}'}) ++ groups_list.append(group) ++ ++ def fin(): ++ for user in users_list: ++ try: ++ user.delete() ++ except ldap.NO_SUCH_OBJECT: ++ pass ++ for group in groups_list: ++ try: ++ group.delete() ++ except ldap.NO_SUCH_OBJECT: ++ pass ++ request.addfinalizer(fin) ++ ++ return {"users": users_list, ++ "groups": groups_list} ++ ++ ++def test_entryusn_no_duplicates(topology_st, setup): ++ """Verify that entryUSN is not duplicated after memberOf operation ++ ++ :id: 1a7d382d-1214-4d56-b9c2-9c4ed57d1683 ++ :setup: Standalone instance, Groups and Users, USN and memberOf are enabled ++ :steps: ++ 1. Add a member to group 1 ++ 2. Add a member to group 1 and 2 ++ 3. Check that entryUSNs are different ++ 4. Check that lastusn before and after a restart are the same ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ """ ++ ++ inst = topology_st.standalone ++ config = Config(inst) ++ config.replace('nsslapd-accesslog-level', '260') # Internal op ++ config.replace('nsslapd-errorlog-level', '65536') ++ config.replace('nsslapd-plugin-logging', 'on') ++ entryusn_list = [] ++ ++ users = setup["users"] ++ groups = setup["groups"] ++ ++ groups[0].replace('member', users[0].dn) ++ entryusn_list.append(users[0].get_attr_val_int('entryusn')) ++ log.info(f"{users[0].dn}_1: {entryusn_list[-1:]}") ++ entryusn_list.append(groups[0].get_attr_val_int('entryusn')) ++ log.info(f"{groups[0].dn}_1: {entryusn_list[-1:]}") ++ check_entryusn_no_duplicates(entryusn_list) ++ ++ groups[1].replace('member', [users[0].dn, users[1].dn]) ++ entryusn_list.append(users[0].get_attr_val_int('entryusn')) ++ log.info(f"{users[0].dn}_2: {entryusn_list[-1:]}") ++ entryusn_list.append(users[1].get_attr_val_int('entryusn')) ++ log.info(f"{users[1].dn}_2: {entryusn_list[-1:]}") ++ entryusn_list.append(groups[1].get_attr_val_int('entryusn')) ++ log.info(f"{groups[1].dn}_2: {entryusn_list[-1:]}") ++ check_entryusn_no_duplicates(entryusn_list) ++ ++ check_lastusn_after_restart(inst) ++ ++ ++def test_entryusn_is_same_after_failure(topology_st, setup): ++ """Verify that entryUSN is the same after failed operation ++ ++ :id: 1f227533-370a-48c1-b920-9b3b0bcfc32e ++ :setup: Standalone instance, Groups and Users, USN and memberOf are enabled ++ :steps: ++ 1. Get current group's entryUSN value ++ 2. Try to modify the group with an invalid syntax ++ 3. Get new group's entryUSN value and compare with old ++ 4. Check that lastusn before and after a restart are the same ++ :expectedresults: ++ 1. Success ++ 2. Invalid Syntax error ++ 3. Should be the same ++ 4. Success ++ """ ++ ++ inst = topology_st.standalone ++ users = setup["users"] ++ ++ # We need this update so we get the latest USN pointed to our entry ++ users[0].replace('description', 'update') ++ ++ entryusn_before = users[0].get_attr_val_int('entryusn') ++ users[0].replace('description', 'update') ++ try: ++ users[0].replace('uid', 'invalid update') ++ except ldap.NOT_ALLOWED_ON_RDN: ++ pass ++ users[0].replace('description', 'second update') ++ entryusn_after = users[0].get_attr_val_int('entryusn') ++ ++ # entryUSN should be OLD + 2 (only two user updates) ++ assert entryusn_after == (entryusn_before + 2) ++ ++ check_lastusn_after_restart(inst) ++ ++ ++def test_entryusn_after_repl_delete(topology_m2): ++ """Verify that entryUSN is incremented on 1 after delete operation which creates a tombstone ++ ++ :id: 1704cf65-41bc-4347-bdaf-20fc2431b218 ++ :setup: An instance with replication, Users, USN enabled ++ :steps: ++ 1. Try to delete a user ++ 2. Check the tombstone has the incremented USN ++ 3. Try to delete ou=People with users ++ 4. Check the entry has a not incremented entryUSN ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Should fail with Not Allowed On Non-leaf error ++ 4. Success ++ """ ++ ++ inst = topology_m2.ms["master1"] ++ plugin = USNPlugin(inst) ++ plugin.enable() ++ inst.restart() ++ users = UserAccounts(inst, DEFAULT_SUFFIX) ++ ++ try: ++ user_1 = users.create_test_user() ++ user_rdn = user_1.rdn ++ tombstones = Tombstones(inst, DEFAULT_SUFFIX) ++ ++ user_1.replace('description', 'update_ts') ++ user_usn = user_1.get_attr_val_int('entryusn') ++ ++ user_1.delete() ++ ++ ts = tombstones.get(user_rdn) ++ ts_usn = ts.get_attr_val_int('entryusn') ++ ++ assert (user_usn + 1) == ts_usn ++ ++ user_1 = users.create_test_user() ++ org = OrganizationalUnit(inst, f"ou=People,{DEFAULT_SUFFIX}") ++ org.replace('description', 'update_ts') ++ ou_usn_before = org.get_attr_val_int('entryusn') ++ try: ++ org.delete() ++ except ldap.NOT_ALLOWED_ON_NONLEAF: ++ pass ++ ou_usn_after = org.get_attr_val_int('entryusn') ++ assert ou_usn_before == ou_usn_after ++ ++ finally: ++ try: ++ user_1.delete() ++ except ldap.NO_SUCH_OBJECT: ++ pass ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) +diff --git a/ldap/servers/plugins/usn/usn.c b/ldap/servers/plugins/usn/usn.c +index 12ba040c6..f2cc8a62c 100644 +--- a/ldap/servers/plugins/usn/usn.c ++++ b/ldap/servers/plugins/usn/usn.c +@@ -333,6 +333,12 @@ _usn_add_next_usn(Slapi_Entry *e, Slapi_Backend *be) + } + slapi_ch_free_string(&usn_berval.bv_val); + ++ /* ++ * increment the counter now and decrement in the bepostop ++ * if the operation will fail ++ */ ++ slapi_counter_increment(be->be_usn_counter); ++ + slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, + "<-- _usn_add_next_usn\n"); + +@@ -370,6 +376,12 @@ _usn_mod_next_usn(LDAPMod ***mods, Slapi_Backend *be) + + *mods = slapi_mods_get_ldapmods_passout(&smods); + ++ /* ++ * increment the counter now and decrement in the bepostop ++ * if the operation will fail ++ */ ++ slapi_counter_increment(be->be_usn_counter); ++ + slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, + "<-- _usn_mod_next_usn\n"); + return LDAP_SUCCESS; +@@ -420,6 +432,7 @@ usn_betxnpreop_delete(Slapi_PBlock *pb) + { + Slapi_Entry *e = NULL; + Slapi_Backend *be = NULL; ++ int32_t tombstone_incremented = 0; + int rc = SLAPI_PLUGIN_SUCCESS; + + slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, +@@ -441,7 +454,9 @@ usn_betxnpreop_delete(Slapi_PBlock *pb) + goto bail; + } + _usn_add_next_usn(e, be); ++ tombstone_incremented = 1; + bail: ++ slapi_pblock_set(pb, SLAPI_USN_INCREMENT_FOR_TOMBSTONE, &tombstone_incremented); + slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, + "<-- usn_betxnpreop_delete\n"); + +@@ -483,7 +498,7 @@ bail: + return rc; + } + +-/* count up the counter */ ++/* count down the counter */ + static int + usn_bepostop(Slapi_PBlock *pb) + { +@@ -493,25 +508,24 @@ usn_bepostop(Slapi_PBlock *pb) + slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, + "--> usn_bepostop\n"); + +- /* if op is not successful, don't increment the counter */ ++ /* if op is not successful, decrement the counter, else - do nothing */ + slapi_pblock_get(pb, SLAPI_RESULT_CODE, &rc); + if (LDAP_SUCCESS != rc) { +- /* no plugin failure */ +- rc = SLAPI_PLUGIN_SUCCESS; +- goto bail; +- } ++ slapi_pblock_get(pb, SLAPI_BACKEND, &be); ++ if (NULL == be) { ++ rc = LDAP_PARAM_ERROR; ++ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc); ++ rc = SLAPI_PLUGIN_FAILURE; ++ goto bail; ++ } + +- slapi_pblock_get(pb, SLAPI_BACKEND, &be); +- if (NULL == be) { +- rc = LDAP_PARAM_ERROR; +- slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc); +- rc = SLAPI_PLUGIN_FAILURE; +- goto bail; ++ if (be->be_usn_counter) { ++ slapi_counter_decrement(be->be_usn_counter); ++ } + } + +- if (be->be_usn_counter) { +- slapi_counter_increment(be->be_usn_counter); +- } ++ /* no plugin failure */ ++ rc = SLAPI_PLUGIN_SUCCESS; + bail: + slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, + "<-- usn_bepostop\n"); +@@ -519,13 +533,14 @@ bail: + return rc; + } + +-/* count up the counter */ ++/* count down the counter on a failure and mod ignore */ + static int + usn_bepostop_modify(Slapi_PBlock *pb) + { + int rc = SLAPI_PLUGIN_FAILURE; + Slapi_Backend *be = NULL; + LDAPMod **mods = NULL; ++ int32_t do_decrement = 0; + int i; + + slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, +@@ -534,9 +549,7 @@ usn_bepostop_modify(Slapi_PBlock *pb) + /* if op is not successful, don't increment the counter */ + slapi_pblock_get(pb, SLAPI_RESULT_CODE, &rc); + if (LDAP_SUCCESS != rc) { +- /* no plugin failure */ +- rc = SLAPI_PLUGIN_SUCCESS; +- goto bail; ++ do_decrement = 1; + } + + slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods); +@@ -545,25 +558,29 @@ usn_bepostop_modify(Slapi_PBlock *pb) + if (mods[i]->mod_op & LDAP_MOD_IGNORE) { + slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, + "usn_bepostop_modify - MOD_IGNORE detected\n"); +- goto bail; /* conflict occurred. +- skip incrementing the counter. */ ++ do_decrement = 1; /* conflict occurred. ++ decrement he counter. */ + } else { + break; + } + } + } + +- slapi_pblock_get(pb, SLAPI_BACKEND, &be); +- if (NULL == be) { +- rc = LDAP_PARAM_ERROR; +- slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc); +- rc = SLAPI_PLUGIN_FAILURE; +- goto bail; ++ if (do_decrement) { ++ slapi_pblock_get(pb, SLAPI_BACKEND, &be); ++ if (NULL == be) { ++ rc = LDAP_PARAM_ERROR; ++ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc); ++ rc = SLAPI_PLUGIN_FAILURE; ++ goto bail; ++ } ++ if (be->be_usn_counter) { ++ slapi_counter_decrement(be->be_usn_counter); ++ } + } + +- if (be->be_usn_counter) { +- slapi_counter_increment(be->be_usn_counter); +- } ++ /* no plugin failure */ ++ rc = SLAPI_PLUGIN_SUCCESS; + bail: + slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, + "<-- usn_bepostop_modify\n"); +@@ -573,34 +590,38 @@ bail: + + /* count up the counter */ + /* if the op is delete and the op was not successful, remove preventryusn */ ++/* the function is executed on TXN level */ + static int + usn_bepostop_delete(Slapi_PBlock *pb) + { + int rc = SLAPI_PLUGIN_FAILURE; + Slapi_Backend *be = NULL; ++ int32_t tombstone_incremented = 0; + + slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, + "--> usn_bepostop_delete\n"); + +- /* if op is not successful, don't increment the counter */ ++ /* if op is not successful and it is a tombstone entry, decrement the counter */ + slapi_pblock_get(pb, SLAPI_RESULT_CODE, &rc); + if (LDAP_SUCCESS != rc) { +- /* no plugin failure */ +- rc = SLAPI_PLUGIN_SUCCESS; +- goto bail; +- } ++ slapi_pblock_get(pb, SLAPI_USN_INCREMENT_FOR_TOMBSTONE, &tombstone_incremented); ++ if (tombstone_incremented) { ++ slapi_pblock_get(pb, SLAPI_BACKEND, &be); ++ if (NULL == be) { ++ rc = LDAP_PARAM_ERROR; ++ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc); ++ rc = SLAPI_PLUGIN_FAILURE; ++ goto bail; ++ } + +- slapi_pblock_get(pb, SLAPI_BACKEND, &be); +- if (NULL == be) { +- rc = LDAP_PARAM_ERROR; +- slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc); +- rc = SLAPI_PLUGIN_FAILURE; +- goto bail; ++ if (be->be_usn_counter) { ++ slapi_counter_decrement(be->be_usn_counter); ++ } ++ } + } + +- if (be->be_usn_counter) { +- slapi_counter_increment(be->be_usn_counter); +- } ++ /* no plugin failure */ ++ rc = SLAPI_PLUGIN_SUCCESS; + bail: + slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, + "<-- usn_bepostop_delete\n"); +diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c +index cb562e938..454ea9cc3 100644 +--- a/ldap/servers/slapd/pblock.c ++++ b/ldap/servers/slapd/pblock.c +@@ -2436,7 +2436,7 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value) + (*(char **)value) = NULL; + } + break; +- ++ + case SLAPI_SEARCH_CTRLS: + if (pblock->pb_intop != NULL) { + (*(LDAPControl ***)value) = pblock->pb_intop->pb_search_ctrls; +@@ -2479,6 +2479,14 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value) + } + break; + ++ case SLAPI_USN_INCREMENT_FOR_TOMBSTONE: ++ if (pblock->pb_intop != NULL) { ++ (*(int32_t *)value) = pblock->pb_intop->pb_usn_tombstone_incremented; ++ } else { ++ (*(int32_t *)value) = 0; ++ } ++ break; ++ + /* ACI Target Check */ + case SLAPI_ACI_TARGET_CHECK: + if (pblock->pb_misc != NULL) { +@@ -4156,6 +4164,10 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value) + pblock->pb_intop->pb_paged_results_cookie = *(int *)value; + break; + ++ case SLAPI_USN_INCREMENT_FOR_TOMBSTONE: ++ pblock->pb_intop->pb_usn_tombstone_incremented = *((int32_t *)value); ++ break; ++ + /* ACI Target Check */ + case SLAPI_ACI_TARGET_CHECK: + _pblock_assert_pb_misc(pblock); +diff --git a/ldap/servers/slapd/pblock_v3.h b/ldap/servers/slapd/pblock_v3.h +index 7ec2f37d6..90498c0b0 100644 +--- a/ldap/servers/slapd/pblock_v3.h ++++ b/ldap/servers/slapd/pblock_v3.h +@@ -161,6 +161,7 @@ typedef struct _slapi_pblock_intop + + int pb_paged_results_index; /* stash SLAPI_PAGED_RESULTS_INDEX */ + int pb_paged_results_cookie; /* stash SLAPI_PAGED_RESULTS_COOKIE */ ++ int32_t pb_usn_tombstone_incremented; /* stash SLAPI_PAGED_RESULTS_COOKIE */ + } slapi_pblock_intop; + + /* Stuff that is rarely used, but still present */ +diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h +index 04c02cf7c..589830bb4 100644 +--- a/ldap/servers/slapd/slapi-plugin.h ++++ b/ldap/servers/slapd/slapi-plugin.h +@@ -7483,6 +7483,9 @@ typedef enum _slapi_op_note_t { + #define SLAPI_PAGED_RESULTS_INDEX 1945 + #define SLAPI_PAGED_RESULTS_COOKIE 1949 + ++/* USN Plugin flag for tombstone entries */ ++#define SLAPI_USN_INCREMENT_FOR_TOMBSTONE 1950 ++ + /* ACI Target Check */ + #define SLAPI_ACI_TARGET_CHECK 1946 + +-- +2.26.2 + diff --git a/SPECS/389-ds-base.spec b/SPECS/389-ds-base.spec index 37554a0..f8decd7 100644 --- a/SPECS/389-ds-base.spec +++ b/SPECS/389-ds-base.spec @@ -45,7 +45,7 @@ ExcludeArch: i686 Summary: 389 Directory Server (base) Name: 389-ds-base Version: 1.4.3.8 -Release: %{?relprefix}5%{?prerel}%{?dist} +Release: %{?relprefix}6%{?prerel}%{?dist} License: GPLv3+ URL: https://www.port389.org Group: System Environment/Daemons @@ -199,6 +199,15 @@ Patch22: 0022-Issue-51188-db2ldif-crashes-when-LDIF-file-can-t-be-.patc Patch23: 0023-Issue-51086-Fix-instance-name-length-for-interactive.patch Patch24: 0024-Issue-51129-SSL-alert-The-value-of-sslVersionMax-TLS.patch Patch25: 0025-Issue-50984-Memory-leaks-in-disk-monitoring.patch +Patch26: 0026-Issue-4297-On-ADD-replication-URP-issue-internal-sea.patch +Patch27: 0027-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch +Patch28: 0028-Issue-51233-ds-replcheck-crashes-in-offline-mode.patch +Patch29: 0029-Issue-4429-NULL-dereference-in-revert_cache.patch +Patch30: 0030-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch +Patch31: 0031-do-not-add-referrals-for-masters-with-different-data.patch +Patch32: 0032-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch +Patch33: 0033-Issue-49300-entryUSN-is-duplicated-after-memberOf-op.patch + %description 389 Directory Server is an LDAPv3 compliant server. The base package includes @@ -816,6 +825,16 @@ exit 0 %doc README.md %changelog +* Thu Dec 3 2020 Mark Reynolds - 1.4.3.8-6 +- Bump version to 1.4.3.8-6 +- Resolves: Bug 1851973 - Duplicate entryUSN numbers for different LDAP entries in the same backend +- Resolves: Bug 1888863 - group rdn with leading space char and add fails error 21 invalid syntax and delete fails error 32 +- Resolves: Bug 1859228 - do not add referrals for masters with different data generation +- Resolves: Bug 1859227 - create keep alive entry after on line init +- Resolves: Bug 1896850 - NULL dereference in revert_cache() +- Resolves: Bug 1861504 - ds-replcheck crashes in offline mode +- Resolves: Bug 1898850 - Entries conflict not resolved by replication + * Wed Aug 5 2020 Mark Reynolds - 1.4.3.8-5 - Bump version to 1.4.3.8-5 - Resolves: Bug 1841086 - SSL alert: The value of sslVersionMax "TLS1.3" is higher than the supported version