From 26521db319aa986405ba417d20be884851226fd5 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Aug 06 2019 11:11:28 +0000 Subject: import 389-ds-base-1.3.9.1-10.el7 --- diff --git a/.389-ds-base.metadata b/.389-ds-base.metadata index d09f29f..df950b5 100644 --- a/.389-ds-base.metadata +++ b/.389-ds-base.metadata @@ -1 +1 @@ -930c13abb2fc444f1dbd0443ed72a5d5b14c48da SOURCES/389-ds-base-1.3.8.4.tar.bz2 +e81bcb4434dd15b1424594dccce432c49e393788 SOURCES/389-ds-base-1.3.9.1.tar.bz2 diff --git a/.gitignore b/.gitignore index 740caa7..0e8f7f2 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -SOURCES/389-ds-base-1.3.8.4.tar.bz2 +SOURCES/389-ds-base-1.3.9.1.tar.bz2 diff --git a/SOURCES/0000-Ticket-49830-Import-fails-if-backend-name-is-default.patch b/SOURCES/0000-Ticket-49830-Import-fails-if-backend-name-is-default.patch deleted file mode 100644 index 6f16723..0000000 --- a/SOURCES/0000-Ticket-49830-Import-fails-if-backend-name-is-default.patch +++ /dev/null @@ -1,190 +0,0 @@ -From da5a1bbb4e4352b8df10c84572441d47217b6c2c Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Fri, 6 Jul 2018 11:37:56 -0400 -Subject: [PATCH] Ticket 49830 - Import fails if backend name is "default" - -Bug Description: The server was previously reserving the backend - name "default". If you tried to import on a - backend with this name the import would skip all - child entries - -Fix Description: Change the default backend name to something - obscure, instead of "default". - - Also improved lib389's dbgen to generate the - correct "dc" attribute value in the root node. - -https://pagure.io/389-ds-base/issue/49830 - -Reviewed by: spichugi(Thanks!) - -(cherry picked from commit 8fa838a4ffd4d0c15ae51cb21f246bb1f2dea2a1) ---- - .../tests/suites/import/regression_test.py | 46 +++++++++++++++++++ - ldap/servers/slapd/defbackend.c | 4 +- - ldap/servers/slapd/mapping_tree.c | 7 ++- - ldap/servers/slapd/slap.h | 3 ++ - src/lib389/lib389/dbgen.py | 13 +++++- - 5 files changed, 66 insertions(+), 7 deletions(-) - -diff --git a/dirsrvtests/tests/suites/import/regression_test.py b/dirsrvtests/tests/suites/import/regression_test.py -index ad51721a1..d83d00323 100644 ---- a/dirsrvtests/tests/suites/import/regression_test.py -+++ b/dirsrvtests/tests/suites/import/regression_test.py -@@ -23,6 +23,52 @@ TEST_SUFFIX1 = "dc=importest1,dc=com" - TEST_BACKEND1 = "importest1" - TEST_SUFFIX2 = "dc=importest2,dc=com" - TEST_BACKEND2 = "importest2" -+TEST_DEFAULT_SUFFIX = "dc=default,dc=com" -+TEST_DEFAULT_NAME = "default" -+ -+ -+def test_import_be_default(topo): -+ """ Create a backend using the name "default". previously this name was -+ used int -+ -+ :id: 8e507beb-e917-4330-8cac-1ff0eee10508 -+ :feature: Import -+ :setup: Standalone instance -+ :steps: -+ 1. Create a test suffix using the be name of "default" -+ 2. Create an ldif for the "default" backend -+ 3. Import ldif -+ 4. Verify all entries were imported -+ :expectedresults: -+ 1. Success -+ 2. Success -+ 3. Success -+ 4. Success -+ """ -+ log.info('Adding suffix:{} and backend: {}...'.format(TEST_DEFAULT_SUFFIX, -+ TEST_DEFAULT_NAME)) -+ backends = Backends(topo.standalone) -+ backends.create(properties={BACKEND_SUFFIX: TEST_DEFAULT_SUFFIX, -+ BACKEND_NAME: TEST_DEFAULT_NAME}) -+ -+ log.info('Create LDIF file and import it...') -+ ldif_dir = topo.standalone.get_ldif_dir() -+ ldif_file = os.path.join(ldif_dir, 'default.ldif') -+ dbgen(topo.standalone, 5, ldif_file, TEST_DEFAULT_SUFFIX) -+ -+ log.info('Stopping the server and running offline import...') -+ topo.standalone.stop() -+ assert topo.standalone.ldif2db(TEST_DEFAULT_NAME, None, None, -+ None, ldif_file) -+ topo.standalone.start() -+ -+ log.info('Verifying entry count after import...') -+ entries = topo.standalone.search_s(TEST_DEFAULT_SUFFIX, -+ ldap.SCOPE_SUBTREE, -+ "(objectclass=*)") -+ assert len(entries) > 1 -+ -+ log.info('Test PASSED') - - - def test_del_suffix_import(topo): -diff --git a/ldap/servers/slapd/defbackend.c b/ldap/servers/slapd/defbackend.c -index aa709da87..b0465e297 100644 ---- a/ldap/servers/slapd/defbackend.c -+++ b/ldap/servers/slapd/defbackend.c -@@ -23,8 +23,6 @@ - /* - * ---------------- Macros --------------------------------------------------- - */ --#define DEFBACKEND_TYPE "default" -- - #define DEFBACKEND_OP_NOT_HANDLED 0 - #define DEFBACKEND_OP_HANDLED 1 - -@@ -65,7 +63,7 @@ defbackend_init(void) - /* - * create a new backend - */ -- defbackend_backend = slapi_be_new(DEFBACKEND_TYPE, DEFBACKEND_TYPE, 1 /* Private */, 0 /* Do Not Log Changes */); -+ defbackend_backend = slapi_be_new(DEFBACKEND_TYPE, DEFBACKEND_NAME, 1 /* Private */, 0 /* Do Not Log Changes */); - if ((rc = slapi_pblock_set(pb, SLAPI_BACKEND, defbackend_backend)) != 0) { - errmsg = "slapi_pblock_set SLAPI_BACKEND failed"; - goto cleanup_and_return; -diff --git a/ldap/servers/slapd/mapping_tree.c b/ldap/servers/slapd/mapping_tree.c -index 472a2f6aa..834949a67 100644 ---- a/ldap/servers/slapd/mapping_tree.c -+++ b/ldap/servers/slapd/mapping_tree.c -@@ -748,7 +748,7 @@ mapping_tree_entry_add(Slapi_Entry *entry, mapping_tree_node **newnodep) - be_names = (char **)slapi_ch_calloc(1, sizeof(char *)); - be_states = (int *)slapi_ch_calloc(1, sizeof(int)); - -- tmp_backend_name = (char *)slapi_ch_strdup("default"); /* "NULL_CONTAINER" */ -+ tmp_backend_name = (char *)slapi_ch_strdup(DEFBACKEND_NAME); /* "NULL_CONTAINER" */ - (be_names)[be_list_count] = tmp_backend_name; - - /* set backend as started by default */ -@@ -2250,7 +2250,10 @@ slapi_mapping_tree_select_all(Slapi_PBlock *pb, Slapi_Backend **be_list, Slapi_E - if (ret != LDAP_SUCCESS) { - /* flag we have problems at least on part of the tree */ - flag_partial_result = 1; -- } else if ((((!slapi_sdn_issuffix(sdn, slapi_mtn_get_dn(node)) && !slapi_sdn_issuffix(slapi_mtn_get_dn(node), sdn))) || ((node_list == mapping_tree_root) && node->mtn_private && (scope != LDAP_SCOPE_BASE))) && (!be || strncmp(be->be_name, "default", 8))) { -+ } else if ((((!slapi_sdn_issuffix(sdn, slapi_mtn_get_dn(node)) && !slapi_sdn_issuffix(slapi_mtn_get_dn(node), sdn))) || -+ ((node_list == mapping_tree_root) && node->mtn_private && (scope != LDAP_SCOPE_BASE))) && -+ (!be || strncmp(be->be_name, DEFBACKEND_NAME, 8))) -+ { - if (be && !be_isdeleted(be)) { - /* wrong backend or referall, ignore it */ - slapi_log_err(SLAPI_LOG_ARGS, "slapi_mapping_tree_select_all", -diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h -index 7378c2d2a..eb97cdcc4 100644 ---- a/ldap/servers/slapd/slap.h -+++ b/ldap/servers/slapd/slap.h -@@ -45,6 +45,9 @@ static char ptokPBE[34] = "Internal (Software) Token "; - #define SLAPD_EXEMODE_DBVERIFY 12 - #define SLAPD_EXEMODE_UPGRADEDNFORMAT 13 - -+#define DEFBACKEND_TYPE "default" -+#define DEFBACKEND_NAME "DirectoryServerDefaultBackend" -+ - #define LDAP_SYSLOG - #include - #define RLIM_TYPE int -diff --git a/src/lib389/lib389/dbgen.py b/src/lib389/lib389/dbgen.py -index a0cda9430..68455b480 100644 ---- a/src/lib389/lib389/dbgen.py -+++ b/src/lib389/lib389/dbgen.py -@@ -113,8 +113,13 @@ usercertificate;binary:: MIIBvjCCASegAwIBAgIBAjANBgkqhkiG9w0BAQQFADAnMQ8wDQYD - DBGEN_HEADER = """dn: {SUFFIX} - objectClass: top - objectClass: domain -+<<<<<<< HEAD - dc: example - aci: (target=ldap:///{SUFFIX})(targetattr=*)(version 3.0; acl "acl1"; allow(write) userdn = "ldap:///self";) -+======= -+dc: {RDN} -+aci: (target=ldap:///{SUFFIX})(targetattr=*)(version 3.0; acl "acl1"; allow(write) userdn = "ldap:///self";) -+>>>>>>> 8fa838a4f... Ticket 49830 - Import fails if backend name is "default" - aci: (target=ldap:///{SUFFIX})(targetattr=*)(version 3.0; acl "acl2"; allow(write) groupdn = "ldap:///cn=Directory Administrators, {SUFFIX}";) - aci: (target=ldap:///{SUFFIX})(targetattr=*)(version 3.0; acl "acl3"; allow(read, search, compare) userdn = "ldap:///anyone";) - -@@ -145,7 +150,7 @@ ou: Payroll - - """ - --def dbgen(instance, number, ldif_file, suffix): -+def dbgen(instance, number, ldif_file, suffix, pseudol10n=False): - familyname_file = os.path.join(instance.ds_paths.data_dir, 'dirsrv/data/dbgen-FamilyNames') - givename_file = os.path.join(instance.ds_paths.data_dir, 'dirsrv/data/dbgen-GivenNames') - familynames = [] -@@ -156,7 +161,11 @@ def dbgen(instance, number, ldif_file, suffix): - givennames = [n.strip() for n in f] - - with open(ldif_file, 'w') as output: -- output.write(DBGEN_HEADER.format(SUFFIX=suffix)) -+ rdn = suffix.split(",", 1)[0].split("=", 1)[1] -+ output.write(DBGEN_HEADER.format(SUFFIX=suffix, RDN=rdn)) -+ for ou in DBGEN_OUS: -+ ou = pseudolocalize(ou) if pseudol10n else ou -+ output.write(DBGEN_OU_TEMPLATE.format(SUFFIX=suffix, OU=ou)) - for i in range(0, number): - # Pick a random ou - ou = random.choice(DBGEN_OUS) --- -2.17.1 - diff --git a/SOURCES/0000-Ticket-50236-memberOf-should-be-more-robust.patch b/SOURCES/0000-Ticket-50236-memberOf-should-be-more-robust.patch new file mode 100644 index 0000000..3e67017 --- /dev/null +++ b/SOURCES/0000-Ticket-50236-memberOf-should-be-more-robust.patch @@ -0,0 +1,252 @@ +From 933b46f1f434ac7c11e155611c91f21e31c4d6f7 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 21 Feb 2019 16:58:05 -0500 +Subject: [PATCH] Ticket 50236 - memberOf should be more robust + +Bug Description: When doing a modrdn, or any memberOf update, if the entry + already has the memberOf attribute with the same value + the operation is incorrectly rejected. + +Fix Description: If we get an error 20 (type or value exists) return success. + + Also fixed a coding mistake that causes the wrong error + code to be returned. This also required fixing the CI + test to check for the new correct errro code. + +https://pagure.io/389-ds-base/issue/50236 + +Reviewed by: firstyear, spichugi, and tbordaz (Thanks!!!) +--- + .../suites/memberof_plugin/regression_test.py | 48 ++++++++++--------- + ldap/servers/plugins/memberof/memberof.c | 35 ++++++++++---- + 2 files changed, 53 insertions(+), 30 deletions(-) + +diff --git a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py +index 2b0c4aec2..9d0ce35ed 100644 +--- a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py ++++ b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py +@@ -11,13 +11,12 @@ import pytest + import os + import time + import ldap +-import subprocess + from random import sample + from lib389.utils import ds_is_older, ensure_list_bytes, ensure_bytes, ensure_str + from lib389.topologies import topology_m1h1c1 as topo, topology_st, topology_m2 as topo_m2 + from lib389._constants import * + from lib389.plugins import MemberOfPlugin +-from lib389 import agreement, Entry ++from lib389 import Entry + from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES + from lib389.idm.group import Groups, Group + from lib389.replica import ReplicationManager +@@ -49,7 +48,7 @@ def add_users(topo_m2, users_num, suffix): + users_list = [] + users = UserAccounts(topo_m2.ms["master1"], suffix, rdn=None) + log.info('Adding %d users' % users_num) +- for num in sample(range(1000), users_num): ++ for num in sample(list(range(1000)), users_num): + num_ran = int(round(num)) + USER_NAME = 'test%05d' % num_ran + user = users.create(properties={ +@@ -76,8 +75,8 @@ def config_memberof(server): + for ent in ents: + log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % ent.dn) + server.agreement.setProperties(agmnt_dn=ents[0].dn, +- properties={RA_FRAC_EXCLUDE:'(objectclass=*) $ EXCLUDE memberOf', +- RA_FRAC_EXCLUDE_TOTAL_UPDATE:'(objectclass=*) $ EXCLUDE '}) ++ properties={RA_FRAC_EXCLUDE: '(objectclass=*) $ EXCLUDE memberOf', ++ RA_FRAC_EXCLUDE_TOTAL_UPDATE: '(objectclass=*) $ EXCLUDE '}) + + + def send_updates_now(server): +@@ -184,14 +183,14 @@ def test_memberof_with_repl(topo): + for i in range(3): + CN = '%s%d' % (GROUP_CN, i) + groups = Groups(M1, SUFFIX) +- testgroup = groups.create(properties={'cn' : CN}) ++ testgroup = groups.create(properties={'cn': CN}) + time.sleep(2) + test_groups.append(testgroup) + + # Step 4 + #Now start testing by adding differnt user to differn group + if not ds_is_older('1.3.7'): +- test_groups[0].remove('objectClass', 'nsMemberOf') ++ test_groups[0].remove('objectClass', 'nsMemberOf') + + member_dn = test_users[0].dn + grp0_dn = test_groups[0].dn +@@ -211,7 +210,7 @@ def test_memberof_with_repl(topo): + # Step 7 + for i in [grp0_dn, grp1_dn]: + for inst in [M1, H1, C1]: +- _find_memberof(inst, member_dn, i) ++ _find_memberof(inst, member_dn, i) + + # Step 8 + for i in [M1, H1, C1]: +@@ -225,7 +224,7 @@ def test_memberof_with_repl(topo): + # For negative testcase, we are using assertionerror + for inst in [M1, H1, C1]: + for i in [grp0_dn, member_dn]: +- with pytest.raises(AssertionError): ++ with pytest.raises(AssertionError): + _find_memberof(inst, i, grp1_dn) + + # Step 11 +@@ -369,7 +368,7 @@ def test_memberof_with_changelog_reset(topo_m2): + 'objectclass': ensure_list_bytes(['top', 'groupOfNames'])} + + for user in users_list: +- dic_of_attributes.setdefault('member',[]) ++ dic_of_attributes.setdefault('member', []) + dic_of_attributes['member'].append(user.dn) + + log.info('Adding the test group using async function') +@@ -427,7 +426,7 @@ def rename_entry(server, cn, from_subtree, to_subtree): + server.rename_s(dn, nrdn, newsuperior=to_subtree, delold=0) + + +-def _find_memberof(server, user_dn=None, group_dn=None, find_result=True): ++def _find_memberof_ext(server, user_dn=None, group_dn=None, find_result=True): + assert (server) + assert (user_dn) + assert (group_dn) +@@ -495,18 +494,19 @@ def test_memberof_group(topology_st): + dn2 = '%s,%s' % ('uid=test_m2', SUBTREE_1) + g1 = '%s,%s' % ('cn=g1', SUBTREE_1) + g2 = '%s,%s' % ('cn=g2', SUBTREE_2) +- _find_memberof(inst, dn1, g1, True) +- _find_memberof(inst, dn2, g1, True) +- _find_memberof(inst, dn1, g2, False) +- _find_memberof(inst, dn2, g2, False) ++ _find_memberof_ext(inst, dn1, g1, True) ++ _find_memberof_ext(inst, dn2, g1, True) ++ _find_memberof_ext(inst, dn1, g2, False) ++ _find_memberof_ext(inst, dn2, g2, False) + + rename_entry(inst, 'cn=g2', SUBTREE_2, SUBTREE_1) + + g2n = '%s,%s' % ('cn=g2-new', SUBTREE_1) +- _find_memberof(inst, dn1, g1, True) +- _find_memberof(inst, dn2, g1, True) +- _find_memberof(inst, dn1, g2n, True) +- _find_memberof(inst, dn2, g2n, True) ++ _find_memberof_ext(inst, dn1, g1, True) ++ _find_memberof_ext(inst, dn2, g1, True) ++ _find_memberof_ext(inst, dn1, g2n, True) ++ _find_memberof_ext(inst, dn2, g2n, True) ++ + + def _config_memberof_entrycache_on_modrdn_failure(server): + +@@ -517,11 +517,13 @@ def _config_memberof_entrycache_on_modrdn_failure(server): + (ldap.MOD_REPLACE, 'memberOfEntryScope', peoplebase.encode()), + (ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsMemberOf')]) + ++ + def _disable_auto_oc_memberof(server): + MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') + server.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsContainer')]) + ++ + @pytest.mark.ds49967 + def test_entrycache_on_modrdn_failure(topology_st): + """This test checks that when a modrdn fails, the destination entry is not returned by a search +@@ -657,7 +659,7 @@ def test_entrycache_on_modrdn_failure(topology_st): + topology_st.standalone.rename_s(group2_dn, 'cn=group_in2', newsuperior=peoplebase, delold=0) + topology_st.standalone.log.info("This is unexpected, modrdn should fail as the member entry have not the appropriate objectclass") + assert False +- except ldap.OPERATIONS_ERROR: ++ except ldap.OBJECT_CLASS_VIOLATION: + pass + + # retrieve the entry having the specific description value +@@ -671,9 +673,11 @@ def test_entrycache_on_modrdn_failure(topology_st): + assert ent.dn == group2_dn + assert found + ++ + def _config_memberof_silent_memberof_failure(server): + _config_memberof_entrycache_on_modrdn_failure(server) + ++ + def test_silent_memberof_failure(topology_st): + """This test checks that if during a MODRDN, the memberof plugin fails + then MODRDN also fails +@@ -817,7 +821,7 @@ def test_silent_memberof_failure(topology_st): + topology_st.standalone.rename_s(group2_dn, 'cn=group_in2', newsuperior=peoplebase, delold=0) + topology_st.standalone.log.info("This is unexpected, modrdn should fail as the member entry have not the appropriate objectclass") + assert False +- except ldap.OPERATIONS_ERROR: ++ except ldap.OBJECT_CLASS_VIOLATION: + pass + + # Check the those entries have not memberof +@@ -843,7 +847,7 @@ def test_silent_memberof_failure(topology_st): + except ldap.OPERATIONS_ERROR: + pass + +- # Check the those entries have not memberof ++ # Check the those entries do not have memberof + for i in (4, 5): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) +diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c +index 329a845a7..b54eb3977 100644 +--- a/ldap/servers/plugins/memberof/memberof.c ++++ b/ldap/servers/plugins/memberof/memberof.c +@@ -919,8 +919,7 @@ memberof_postop_modrdn(Slapi_PBlock *pb) + * entry that is being renamed. */ + for (i = 0; configCopy.groupattrs && configCopy.groupattrs[i]; i++) { + if (0 == slapi_entry_attr_find(post_e, configCopy.groupattrs[i], &attr)) { +- if ((ret = memberof_moddn_attr_list(pb, &configCopy, pre_sdn, +- post_sdn, attr) != 0)) { ++ if ((ret = memberof_moddn_attr_list(pb, &configCopy, pre_sdn, post_sdn, attr)) != 0) { + slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM, + "memberof_postop_modrdn - Update failed for (%s), error (%d)\n", + slapi_sdn_get_dn(pre_sdn), ret); +@@ -1720,12 +1719,32 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o + replace_mod.mod_values = replace_val; + } + rc = memberof_add_memberof_attr(mods, op_to, config->auto_add_oc); +- if (rc == LDAP_NO_SUCH_ATTRIBUTE) { +- /* the memberof values to be replaced do not exist +- * just add the new values */ +- mods[0] = mods[1]; +- mods[1] = NULL; +- rc = memberof_add_memberof_attr(mods, op_to, config->auto_add_oc); ++ if (rc == LDAP_NO_SUCH_ATTRIBUTE || rc == LDAP_TYPE_OR_VALUE_EXISTS) { ++ if (rc == LDAP_TYPE_OR_VALUE_EXISTS) { ++ /* ++ * For some reason the new modrdn value is present, so retry ++ * the delete by itself and ignore the add op by tweaking ++ * the mod array. ++ */ ++ mods[1] = NULL; ++ rc = memberof_add_memberof_attr(mods, op_to, config->auto_add_oc); ++ } else { ++ /* ++ * The memberof value to be replaced does not exist so just ++ * add the new value. Shuffle the mod array to apply only ++ * the add operation. ++ */ ++ mods[0] = mods[1]; ++ mods[1] = NULL; ++ rc = memberof_add_memberof_attr(mods, op_to, config->auto_add_oc); ++ if (rc == LDAP_TYPE_OR_VALUE_EXISTS) { ++ /* ++ * The entry already has the expected memberOf value, no ++ * problem just return success. ++ */ ++ rc = LDAP_SUCCESS; ++ } ++ } + } + } + } +-- +2.17.2 + diff --git a/SOURCES/0001-Ticket-48818-For-a-replica-bindDNGroup-should-be-fet.patch b/SOURCES/0001-Ticket-48818-For-a-replica-bindDNGroup-should-be-fet.patch deleted file mode 100644 index 4571372..0000000 --- a/SOURCES/0001-Ticket-48818-For-a-replica-bindDNGroup-should-be-fet.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 0ea14f45cbc834e4791fdc393c5a2a042fd08101 Mon Sep 17 00:00:00 2001 -From: Thierry Bordaz -Date: Tue, 10 Jul 2018 12:07:45 +0200 -Subject: [PATCH] Ticket 48818 - For a replica bindDNGroup, should be fetched - the first time it is used not when the replica is started - -Bug Description: - The fetching of the bindDNGroup is working as designed but this ticket is to make it more flexible - - At startup, if the group does not contain the replica_mgr. - No replication session will succeed until bindDnGroupCheckInterval delay. - updatedn_group_last_check is the timestamp of the last fetch. At startup - updatedn_group_last_check is set to the current time. So the next fetch will happen not before - updatedn_group_last_check+bindDnGroupCheckInterval. - - If the groupDn is changed after startup, no incoming replication can happen for the first - bindDnGroupCheckInterval seconds - -Fix Description: - The fix consist to unset updatedn_group_last_check so that the group will be fetch when the first - incoming replication session will happen. - -https://pagure.io/389-ds-base/issue/49818 - -Reviewed by: Mark Reynolds, Simon Spichugi (thanks !!!) - -Platforms tested: F27 - -Flag Day: no - -Doc impact: no ---- - ldap/servers/plugins/replication/repl5_replica.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c -index dee20875e..41cad3bf0 100644 ---- a/ldap/servers/plugins/replication/repl5_replica.c -+++ b/ldap/servers/plugins/replication/repl5_replica.c -@@ -2026,7 +2026,7 @@ _replica_init_from_config(Replica *r, Slapi_Entry *e, char *errortext) - /* get replication bind dn groups */ - r->updatedn_groups = replica_updatedn_group_new(e); - r->groupdn_list = replica_groupdn_list_new(r->updatedn_groups); -- r->updatedn_group_last_check = time(NULL); -+ r->updatedn_group_last_check = 0; - /* get groupdn check interval */ - if ((val = slapi_entry_attr_get_charptr(e, attr_replicaBindDnGroupCheckInterval))) { - if (repl_config_valid_num(attr_replicaBindDnGroupCheckInterval, val, -1, INT_MAX, &rc, errormsg, &interval) != 0) { --- -2.17.1 - diff --git a/SOURCES/0001-Ticket-50238-Failed-modrdn-can-corrupt-entry-cache.patch b/SOURCES/0001-Ticket-50238-Failed-modrdn-can-corrupt-entry-cache.patch new file mode 100644 index 0000000..ee050dd --- /dev/null +++ b/SOURCES/0001-Ticket-50238-Failed-modrdn-can-corrupt-entry-cache.patch @@ -0,0 +1,130 @@ +From 2a181763d0cff5f31dc18f3e71f79dd815906c09 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Fri, 22 Feb 2019 13:46:48 -0500 +Subject: [PATCH] Ticket 50238 - Failed modrdn can corrupt entry cache + +Bug Description: Under certain conditions (found under IPA) when a backend + transaction plugin fails and causes a modrdn operation to + fail the entry cache no longer contains the original/pre + entry, but instead it has the post modrdn'ed entry with + the original entry's ID + +Fix Description: Upon failure, if the post entry is in the cache, then swap + it out with the original entry. + +https://pagure.io/389-ds-base/issue/50238 + +Reviewed by: firstyear, spichugi, & tboardaz (Thanks!!!) +--- + dirsrvtests/tests/suites/betxns/betxn_test.py | 57 +++++++++++++++++++ + ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 16 ++++-- + 2 files changed, 68 insertions(+), 5 deletions(-) + +diff --git a/dirsrvtests/tests/suites/betxns/betxn_test.py b/dirsrvtests/tests/suites/betxns/betxn_test.py +index 175496495..48181a9ea 100644 +--- a/dirsrvtests/tests/suites/betxns/betxn_test.py ++++ b/dirsrvtests/tests/suites/betxns/betxn_test.py +@@ -8,6 +8,7 @@ + # + import pytest + import six ++import ldap + from lib389.tasks import * + from lib389.utils import * + from lib389.topologies import topology_st +@@ -248,6 +249,62 @@ def test_betxn_memberof(topology_st, dynamic_plugins): + log.info('test_betxn_memberof: PASSED') + + ++def test_betxn_modrdn_memberof(topology_st): ++ """Test modrdn operartions and memberOf ++ ++ :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5994 ++ ++ :setup: Standalone instance ++ ++ :steps: 1. Enable and configure memberOf plugin ++ 2. Set memberofgroupattr="member" and memberofAutoAddOC="nsContainer" ++ 3. Create group and user outside of memberOf plugin scope ++ 4. Do modrdn to move group into scope ++ 5. Do modrdn to move group into scope (again) ++ ++ :expectedresults: ++ 1. memberOf plugin plugin should be ON ++ 2. Set memberofgroupattr="member" and memberofAutoAddOC="nsContainer" should PASS ++ 3. Creating group and user should PASS ++ 4. Modrdn should fail with objectclass violation ++ 5. Second modrdn should also fail with objectclass violation ++ """ ++ ++ peoplebase = 'ou=people,%s' % DEFAULT_SUFFIX ++ memberof = MemberOfPlugin(topology_st.standalone) ++ memberof.enable() ++ memberof.set_autoaddoc('nsContainer') # Bad OC ++ memberof.set('memberOfEntryScope', peoplebase) ++ memberof.set('memberOfAllBackends', 'on') ++ topology_st.standalone.restart() ++ ++ groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) ++ group = groups.create(properties={ ++ 'cn': 'group', ++ }) ++ ++ # Create user and add it to group ++ users = UserAccounts(topology_st.standalone, basedn=DEFAULT_SUFFIX) ++ user = users.create(properties=TEST_USER_PROPERTIES) ++ if not ds_is_older('1.3.7'): ++ user.remove('objectClass', 'nsMemberOf') ++ ++ group.add_member(user.dn) ++ ++ # Attempt modrdn that should fail, but the original entry should stay in the cache ++ with pytest.raises(ldap.OBJECTCLASS_VIOLATION): ++ group.rename('cn=group_to_people', newsuperior=peoplebase) ++ ++ # Should fail, but not with NO_SUCH_OBJECT as the original entry should still be in the cache ++ with pytest.raises(ldap.OBJECTCLASS_VIOLATION): ++ group.rename('cn=group_to_people', newsuperior=peoplebase) ++ ++ # ++ # Done ++ # ++ log.info('test_betxn_modrdn_memberof: PASSED') ++ ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c +index 684b040b8..e4d0337d4 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c +@@ -1411,14 +1411,20 @@ common_return: + "operation failed, the target entry is cleared from dncache (%s)\n", slapi_entry_get_dn(ec->ep_entry)); + CACHE_REMOVE(&inst->inst_dncache, bdn); + CACHE_RETURN(&inst->inst_dncache, &bdn); ++ /* ++ * If the new/invalid entry (ec) is in the cache, that means we need to ++ * swap it out with the original entry (e) --> to undo the swap that ++ * modrdn_rename_entry_update_indexes() did. ++ */ ++ if (cache_is_in_cache(&inst->inst_cache, ec)) { ++ if (cache_replace(&inst->inst_cache, ec, e) != 0) { ++ slapi_log_err(SLAPI_LOG_ALERT, "ldbm_back_modrdn", ++ "failed to replace cache entry after error\n"); ++ } ++ } + } + +- /* remove the new entry from the cache if the op failed - +- otherwise, leave it in */ + if (ec && inst) { +- if (retval && cache_is_in_cache(&inst->inst_cache, ec)) { +- CACHE_REMOVE(&inst->inst_cache, ec); +- } + CACHE_RETURN(&inst->inst_cache, &ec); + } + ec = NULL; +-- +2.17.2 + diff --git a/SOURCES/0002-Ticket-49546-Fix-issues-with-MIB-file.patch b/SOURCES/0002-Ticket-49546-Fix-issues-with-MIB-file.patch deleted file mode 100644 index 620cd85..0000000 --- a/SOURCES/0002-Ticket-49546-Fix-issues-with-MIB-file.patch +++ /dev/null @@ -1,178 +0,0 @@ -From 9f1bbff43c3e6ec01e60d35082b21b83a8795dc2 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Thu, 12 Jul 2018 10:48:11 -0400 -Subject: [PATCH] Ticket 49546 - Fix issues with MIB file - -Description: Change dsMaxThreadsHit to dsMaxThreadsHits, and set the - proper object type for dsIntIndex - -https://pagure.io/389-ds-base/issue/49546 - -Reviewed by: spichugi & firstyear(Thanks!!) - -(cherry picked from commit 6d4caac04be4223971de54d292db82734f6d6a44) ---- - ldap/servers/slapd/agtmmap.c | 2 +- - ldap/servers/slapd/agtmmap.h | 2 +- - ldap/servers/slapd/connection.c | 2 +- - ldap/servers/slapd/slap.h | 2 +- - ldap/servers/slapd/snmp_collator.c | 6 +++--- - ldap/servers/snmp/ldap-agent.c | 4 ++-- - ldap/servers/snmp/ldap-agent.h | 2 +- - ldap/servers/snmp/redhat-directory.mib | 8 ++++---- - 8 files changed, 14 insertions(+), 14 deletions(-) - -diff --git a/ldap/servers/slapd/agtmmap.c b/ldap/servers/slapd/agtmmap.c -index fbc730db6..352ccefda 100644 ---- a/ldap/servers/slapd/agtmmap.c -+++ b/ldap/servers/slapd/agtmmap.c -@@ -298,7 +298,7 @@ agt_mread_stats(int hdl, struct hdr_stats_t *pHdrInfo, struct ops_stats_t *pDsOp - pDsOpsTbl->dsErrors = pfile_stats->ops_stats.dsErrors; - pDsOpsTbl->dsConnections = pfile_stats->ops_stats.dsConnections; - pDsOpsTbl->dsConnectionsInMaxThreads = pfile_stats->ops_stats.dsConnectionsInMaxThreads; -- pDsOpsTbl->dsMaxThreadsHit = pfile_stats->ops_stats.dsMaxThreadsHit; -+ pDsOpsTbl->dsMaxThreadsHits = pfile_stats->ops_stats.dsMaxThreadsHits; - } - - if (pDsEntTbl != NULL) { -diff --git a/ldap/servers/slapd/agtmmap.h b/ldap/servers/slapd/agtmmap.h -index 2397dad3c..fb27ab2c4 100644 ---- a/ldap/servers/slapd/agtmmap.h -+++ b/ldap/servers/slapd/agtmmap.h -@@ -102,7 +102,7 @@ struct ops_stats_t - uint64_t dsErrors; - uint64_t dsConnections; /* Number of currently connected clients */ - uint64_t dsConnectionSeq; /* Monotonically increasing number bumped on each new conn est */ -- uint64_t dsMaxThreadsHit; /* Number of times a connection hit max threads */ -+ uint64_t dsMaxThreadsHits; /* Number of times a connection hit max threads */ - uint64_t dsConnectionsInMaxThreads; /* current number of connections that are in max threads */ - uint64_t dsBytesRecv; /* Count of bytes read from clients */ - uint64_t dsBytesSent; /* Count of bytes sent to clients */ -diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c -index 1dbb49f06..188383b97 100644 ---- a/ldap/servers/slapd/connection.c -+++ b/ldap/servers/slapd/connection.c -@@ -1911,7 +1911,7 @@ connection_activity(Connection *conn, int maxthreads) - slapi_counter_increment(max_threads_count); - slapi_counter_increment(conns_in_maxthreads); - slapi_counter_increment(g_get_global_snmp_vars()->ops_tbl.dsConnectionsInMaxThreads); -- slapi_counter_increment(g_get_global_snmp_vars()->ops_tbl.dsMaxThreadsHit); -+ slapi_counter_increment(g_get_global_snmp_vars()->ops_tbl.dsMaxThreadsHits); - } - op_stack_obj = connection_get_operation(); - connection_add_operation(conn, op_stack_obj->op); -diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h -index eb97cdcc4..a02792648 100644 ---- a/ldap/servers/slapd/slap.h -+++ b/ldap/servers/slapd/slap.h -@@ -1889,7 +1889,7 @@ struct snmp_ops_tbl_t - Slapi_Counter *dsBytesSent; /* Count of bytes sent to clients */ - Slapi_Counter *dsEntriesReturned; - Slapi_Counter *dsReferralsReturned; -- Slapi_Counter *dsMaxThreadsHit; -+ Slapi_Counter *dsMaxThreadsHits; - Slapi_Counter *dsConnectionsInMaxThreads; - }; - -diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c -index d56379466..1da7ccbb2 100644 ---- a/ldap/servers/slapd/snmp_collator.c -+++ b/ldap/servers/slapd/snmp_collator.c -@@ -122,7 +122,7 @@ snmp_collator_init(void) - g_get_global_snmp_vars()->ops_tbl.dsEntriesReturned = slapi_counter_new(); - g_get_global_snmp_vars()->ops_tbl.dsReferralsReturned = slapi_counter_new(); - g_get_global_snmp_vars()->ops_tbl.dsConnectionsInMaxThreads = slapi_counter_new(); -- g_get_global_snmp_vars()->ops_tbl.dsMaxThreadsHit = slapi_counter_new(); -+ g_get_global_snmp_vars()->ops_tbl.dsMaxThreadsHits = slapi_counter_new(); - g_get_global_snmp_vars()->entries_tbl.dsMasterEntries = slapi_counter_new(); - g_get_global_snmp_vars()->entries_tbl.dsCopyEntries = slapi_counter_new(); - g_get_global_snmp_vars()->entries_tbl.dsCacheEntries = slapi_counter_new(); -@@ -592,7 +592,7 @@ snmp_update_ops_table(void) - stats->ops_stats.dsConnections = slapi_counter_get_value(g_get_global_snmp_vars()->ops_tbl.dsConnections); - stats->ops_stats.dsConnectionSeq = slapi_counter_get_value(g_get_global_snmp_vars()->ops_tbl.dsConnectionSeq); - stats->ops_stats.dsConnectionsInMaxThreads = slapi_counter_get_value(g_get_global_snmp_vars()->ops_tbl.dsConnectionsInMaxThreads); -- stats->ops_stats.dsMaxThreadsHit = slapi_counter_get_value(g_get_global_snmp_vars()->ops_tbl.dsMaxThreadsHit); -+ stats->ops_stats.dsMaxThreadsHits = slapi_counter_get_value(g_get_global_snmp_vars()->ops_tbl.dsMaxThreadsHits); - stats->ops_stats.dsBytesRecv = slapi_counter_get_value(g_get_global_snmp_vars()->ops_tbl.dsBytesRecv); - stats->ops_stats.dsBytesSent = slapi_counter_get_value(g_get_global_snmp_vars()->ops_tbl.dsBytesSent); - stats->ops_stats.dsEntriesReturned = slapi_counter_get_value(g_get_global_snmp_vars()->ops_tbl.dsEntriesReturned); -@@ -738,7 +738,7 @@ snmp_as_entry(Slapi_Entry *e) - add_counter_to_value(e, "Connections", slapi_counter_get_value(g_get_global_snmp_vars()->ops_tbl.dsConnections)); - add_counter_to_value(e, "ConnectionSeq", slapi_counter_get_value(g_get_global_snmp_vars()->ops_tbl.dsConnectionSeq)); - add_counter_to_value(e, "ConnectionsInMaxThreads", slapi_counter_get_value(g_get_global_snmp_vars()->ops_tbl.dsConnectionsInMaxThreads)); -- add_counter_to_value(e, "ConnectionsMaxThreadsCount", slapi_counter_get_value(g_get_global_snmp_vars()->ops_tbl.dsMaxThreadsHit)); -+ add_counter_to_value(e, "ConnectionsMaxThreadsCount", slapi_counter_get_value(g_get_global_snmp_vars()->ops_tbl.dsMaxThreadsHits)); - add_counter_to_value(e, "BytesRecv", slapi_counter_get_value(g_get_global_snmp_vars()->ops_tbl.dsBytesRecv)); - add_counter_to_value(e, "BytesSent", slapi_counter_get_value(g_get_global_snmp_vars()->ops_tbl.dsBytesSent)); - add_counter_to_value(e, "EntriesReturned", slapi_counter_get_value(g_get_global_snmp_vars()->ops_tbl.dsEntriesReturned)); -diff --git a/ldap/servers/snmp/ldap-agent.c b/ldap/servers/snmp/ldap-agent.c -index 4393a8956..bd9b8dd9b 100644 ---- a/ldap/servers/snmp/ldap-agent.c -+++ b/ldap/servers/snmp/ldap-agent.c -@@ -496,8 +496,8 @@ dsOpsTable_get_value(netsnmp_request_info *request, - the_stat = &context->ops_tbl.dsConnectionsInMaxThreads; - break; - -- case COLUMN_DSMAXTHREADSHIT: -- the_stat = &context->ops_tbl.dsMaxThreadsHit; -+ case COLUMN_DSMAXTHREADSHITS: -+ the_stat = &context->ops_tbl.dsMaxThreadsHits; - break; - - default: /* We shouldn't get here */ -diff --git a/ldap/servers/snmp/ldap-agent.h b/ldap/servers/snmp/ldap-agent.h -index 935d3a611..c98e467dd 100644 ---- a/ldap/servers/snmp/ldap-agent.h -+++ b/ldap/servers/snmp/ldap-agent.h -@@ -161,7 +161,7 @@ extern size_t snmptrap_oid_len; - #define COLUMN_DSERRORS 20 - #define COLUMN_DSCONNECTIONS 21 - #define COLUMN_DSCONNECTIONSINMAXTHREADS 22 --#define COLUMN_DSMAXTHREADSHIT 23 -+#define COLUMN_DSMAXTHREADSHITS 23 - #define dsOpsTable_COL_MIN 1 - #define dsOpsTable_COL_MAX 23 - -diff --git a/ldap/servers/snmp/redhat-directory.mib b/ldap/servers/snmp/redhat-directory.mib -index c8608972e..579be8ee4 100644 ---- a/ldap/servers/snmp/redhat-directory.mib -+++ b/ldap/servers/snmp/redhat-directory.mib -@@ -87,7 +87,7 @@ RHDS-MIB DEFINITIONS ::= BEGIN - dsErrors, - dsConnections, - dsConnectionsInMaxThreads, -- dsMaxThreadsHit, -+ dsMaxThreadsHits, - dsMasterEntries, - dsCopyEntries, - dsCacheEntries, -@@ -190,7 +190,7 @@ RHDS-MIB DEFINITIONS ::= BEGIN - Counter64, - dsConnectionsInMaxThreads - Counter64, -- dsMaxThreadsHit -+ dsMaxThreadsHits - Counter64 - - } -@@ -472,7 +472,7 @@ RHDS-MIB DEFINITIONS ::= BEGIN - "Redhat defined 1.2." - ::= { dsOpsEntry 22 } - -- dsMaxThreadsHit OBJECT-TYPE -+ dsMaxThreadsHits OBJECT-TYPE - SYNTAX Counter64 - MAX-ACCESS read-only - STATUS current -@@ -596,7 +596,7 @@ RHDS-MIB DEFINITIONS ::= BEGIN - - DsIntEntry ::= SEQUENCE { - dsIntIndex -- INTEGER, -+ Integer32, - dsName - DistinguishedName, - dsTimeOfCreation --- -2.17.1 - diff --git a/SOURCES/0002-Ticket-50232-export-creates-not-importable-ldif-file.patch b/SOURCES/0002-Ticket-50232-export-creates-not-importable-ldif-file.patch new file mode 100644 index 0000000..fd1bde2 --- /dev/null +++ b/SOURCES/0002-Ticket-50232-export-creates-not-importable-ldif-file.patch @@ -0,0 +1,296 @@ +From 70d1336481bd1a36d8b0bdef43a9364c7db58c26 Mon Sep 17 00:00:00 2001 +From: Ludwig Krispenz +Date: Wed, 20 Feb 2019 10:11:15 +0100 +Subject: [PATCH] Ticket 50232 - export creates not importable ldif file + +Bug: If the RUV entry hasa smaller entryid than the suffix entry it will be + exported before the suffix. If that ldif is used for import the RUV entry + is skipped and a new one generated with a different database generation + +Fix: Before exporting the RUV check that the suffix is alread exported, if not + make the RUV entry pending and write it after all othere entries + +Reviewed by: tbordaz, wbrown. Thanks +--- + dirsrvtests/tests/tickets/ticket50232_test.py | 163 ++++++++++++++++++ + ldap/servers/slapd/back-ldbm/ldif2ldbm.c | 58 ++++++- + 2 files changed, 219 insertions(+), 2 deletions(-) + create mode 100644 dirsrvtests/tests/tickets/ticket50232_test.py + +diff --git a/dirsrvtests/tests/tickets/ticket50232_test.py b/dirsrvtests/tests/tickets/ticket50232_test.py +new file mode 100644 +index 000000000..133ed0dfe +--- /dev/null ++++ b/dirsrvtests/tests/tickets/ticket50232_test.py +@@ -0,0 +1,163 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2016 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import logging ++ ++import pytest ++# from lib389.tasks import * ++# from lib389.utils import * ++from lib389.topologies import topology_st ++from lib389.replica import ReplicationManager,Replicas ++ ++from lib389._constants import DEFAULT_SUFFIX, BACKEND_NAME ++ ++from lib389.idm.user import UserAccounts ++from lib389.idm.organization import Organization ++from lib389.idm.organizationalunit import OrganizationalUnit ++ ++log = logging.getLogger(__name__) ++ ++NORMAL_SUFFIX = 'o=normal' ++NORMAL_BACKEND_NAME = 'normal' ++REVERSE_SUFFIX = 'o=reverse' ++REVERSE_BACKEND_NAME = 'reverse' ++ ++def _enable_replica(instance, suffix): ++ ++ repl = ReplicationManager(DEFAULT_SUFFIX) ++ repl._ensure_changelog(instance) ++ replicas = Replicas(instance) ++ replicas.create(properties={ ++ 'cn': 'replica', ++ 'nsDS5ReplicaRoot': suffix, ++ 'nsDS5ReplicaId': '1', ++ 'nsDS5Flags': '1', ++ 'nsDS5ReplicaType': '3' ++ }) ++ ++def _populate_suffix(instance, suffixname): ++ ++ o = Organization(instance, 'o={}'.format(suffixname)) ++ o.create(properties={ ++ 'o': suffixname, ++ 'description': 'test' ++ }) ++ ou = OrganizationalUnit(instance, 'ou=people,o={}'.format(suffixname)) ++ ou.create(properties={ ++ 'ou': 'people' ++ }) ++ ++def _get_replica_generation(instance, suffix): ++ ++ replicas = Replicas(instance) ++ replica = replicas.get(suffix) ++ ruv = replica.get_ruv() ++ return ruv._data_generation ++ ++def _test_export_import(instance, suffix, backend): ++ ++ before_generation = _get_replica_generation(instance, suffix) ++ ++ instance.stop() ++ instance.db2ldif( ++ bename=backend, ++ suffixes=[suffix], ++ excludeSuffixes=[], ++ encrypt=False, ++ repl_data=True, ++ outputfile="/tmp/output_file", ++ ) ++ instance.ldif2db( ++ bename=None, ++ excludeSuffixes=None, ++ encrypt=False, ++ suffixes=[suffix], ++ import_file="/tmp/output_file", ++ ) ++ instance.start() ++ after_generation = _get_replica_generation(instance, suffix) ++ ++ assert (before_generation == after_generation) ++ ++def test_ticket50232_normal(topology_st): ++ """ ++ The fix for ticket 50232 ++ ++ ++ The test sequence is: ++ - create suffix ++ - add suffix entry and some child entries ++ - "normally" done after populating suffix: enable replication ++ - get RUV and database generation ++ - export -r ++ - import ++ - get RUV and database generation ++ - assert database generation has not changed ++ """ ++ ++ log.info('Testing Ticket 50232 - export creates not imprtable ldif file, normal creation order') ++ ++ topology_st.standalone.backend.create(NORMAL_SUFFIX, {BACKEND_NAME: NORMAL_BACKEND_NAME}) ++ topology_st.standalone.mappingtree.create(NORMAL_SUFFIX, bename=NORMAL_BACKEND_NAME, parent=None) ++ ++ _populate_suffix(topology_st.standalone, NORMAL_BACKEND_NAME) ++ ++ repl = ReplicationManager(DEFAULT_SUFFIX) ++ repl._ensure_changelog(topology_st.standalone) ++ replicas = Replicas(topology_st.standalone) ++ replicas.create(properties={ ++ 'cn': 'replica', ++ 'nsDS5ReplicaRoot': NORMAL_SUFFIX, ++ 'nsDS5ReplicaId': '1', ++ 'nsDS5Flags': '1', ++ 'nsDS5ReplicaType': '3' ++ }) ++ ++ _test_export_import(topology_st.standalone, NORMAL_SUFFIX, NORMAL_BACKEND_NAME) ++ ++def test_ticket50232_reverse(topology_st): ++ """ ++ The fix for ticket 50232 ++ ++ ++ The test sequence is: ++ - create suffix ++ - enable replication before suffix enztry is added ++ - add suffix entry and some child entries ++ - get RUV and database generation ++ - export -r ++ - import ++ - get RUV and database generation ++ - assert database generation has not changed ++ """ ++ ++ log.info('Testing Ticket 50232 - export creates not imprtable ldif file, normal creation order') ++ ++ # ++ # Setup Replication ++ # ++ log.info('Setting up replication...') ++ repl = ReplicationManager(DEFAULT_SUFFIX) ++ # repl.create_first_master(topology_st.standalone) ++ # ++ # enable dynamic plugins, memberof and retro cl plugin ++ # ++ topology_st.standalone.backend.create(REVERSE_SUFFIX, {BACKEND_NAME: REVERSE_BACKEND_NAME}) ++ topology_st.standalone.mappingtree.create(REVERSE_SUFFIX, bename=REVERSE_BACKEND_NAME, parent=None) ++ ++ _enable_replica(topology_st.standalone, REVERSE_SUFFIX) ++ ++ _populate_suffix(topology_st.standalone, REVERSE_BACKEND_NAME) ++ ++ _test_export_import(topology_st.standalone, REVERSE_SUFFIX, REVERSE_BACKEND_NAME) ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) +diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c +index 11c020af0..49fe7cd5d 100644 +--- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c ++++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c +@@ -1103,6 +1103,7 @@ bail: + * (reunified at last) + */ + #define LDBM2LDIF_BUSY (-2) ++#define RUVRDN SLAPI_ATTR_UNIQUEID "=" RUV_STORAGE_ENTRY_UNIQUEID + int + ldbm_back_ldbm2ldif(Slapi_PBlock *pb) + { +@@ -1111,6 +1112,7 @@ ldbm_back_ldbm2ldif(Slapi_PBlock *pb) + DB *db = NULL; + DBC *dbc = NULL; + struct backentry *ep; ++ struct backentry *pending_ruv = NULL; + DBT key = {0}; + DBT data = {0}; + char *fname = NULL; +@@ -1146,6 +1148,8 @@ ldbm_back_ldbm2ldif(Slapi_PBlock *pb) + static int load_dse = 1; /* We'd like to load dse just once. */ + int server_running; + export_args eargs = {0}; ++ int32_t suffix_written = 0; ++ int32_t skip_ruv = 0; + + slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_ldbm2ldif", "=>\n"); + +@@ -1463,8 +1467,25 @@ ldbm_back_ldbm2ldif(Slapi_PBlock *pb) + } + } + +- if (0 != return_value) ++ if (DB_NOTFOUND == return_value) { ++ /* reached the end of the database, ++ * check if ruv is pending and write it ++ */ ++ if (pending_ruv) { ++ eargs.ep = pending_ruv; ++ eargs.idindex = idindex; ++ eargs.cnt = &cnt; ++ eargs.lastcnt = &lastcnt; ++ rc = export_one_entry(li, inst, &eargs); ++ backentry_free(&pending_ruv); ++ } ++ break; ++ } ++ ++ if (0 != return_value) { ++ /* error reading database */ + break; ++ } + + /* back to internal format */ + temp_id = id_stored_to_internal((char *)key.data); +@@ -1501,7 +1522,30 @@ ldbm_back_ldbm2ldif(Slapi_PBlock *pb) + rc = get_value_from_string((const char *)data.dptr, + LDBM_PARENTID_STR, &pid_str); + if (rc) { +- rc = 0; /* assume this is a suffix */ ++ /* this could be a suffix or the RUV entry. ++ * If it is the ruv and the suffix is not written ++ * keep the ruv and export as last entry. ++ * ++ * The reason for this is that if the RUV entry is in the ++ * ldif before the suffix entry then at an attempt to import ++ * that ldif the RUV entry would be skipped because the parent ++ * does not exist. Later a new RUV would be generated with ++ * a different database generation and replication is broken ++ */ ++ if (suffix_written) { ++ /* this must be the RUV, just continue and write it */ ++ rc = 0; ++ } else if (0 == strcasecmp(rdn, RUVRDN)) { ++ /* this is the RUV and the suffix is not yet written ++ * make it pending and continue with next entry ++ */ ++ skip_ruv = 1; ++ rc = 0; ++ } else { ++ /* this has to be the suffix */ ++ suffix_written = 1; ++ rc = 0; ++ } + } else { + pid = (ID)strtol(pid_str, (char **)NULL, 10); + slapi_ch_free_string(&pid_str); +@@ -1614,6 +1658,16 @@ ldbm_back_ldbm2ldif(Slapi_PBlock *pb) + continue; + } + ++ if (skip_ruv) { ++ /* now we keep a copy of the ruv entry ++ * and continue with the next entry ++ */ ++ pending_ruv = ep; ++ ep = NULL; ++ skip_ruv = 0; ++ continue; ++ } ++ + eargs.ep = ep; + eargs.idindex = idindex; + eargs.cnt = &cnt; +-- +2.17.2 + diff --git a/SOURCES/0003-Ticket-49840-ds-replcheck-command-returns-traceback-.patch b/SOURCES/0003-Ticket-49840-ds-replcheck-command-returns-traceback-.patch deleted file mode 100644 index d163168..0000000 --- a/SOURCES/0003-Ticket-49840-ds-replcheck-command-returns-traceback-.patch +++ /dev/null @@ -1,149 +0,0 @@ -From 6361810037bc32c22e3e00a16bc53b34d0b0d610 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Mon, 9 Jul 2018 15:50:09 -0400 -Subject: [PATCH] Ticket 49840 - ds-replcheck command returns traceback errors - against ldif files having garbage content when run in offline mode - -Description: Added a basic check to see if the LDIF files are actually - LDIF files. Also added checks that the database RUV are - present as well. - -https://pagure.io/389-ds-base/issue/49840 - -Reviewed by: spichugi(Thanks!) - -(cherry picked from commit 60cb52040704686d9541a2e2eb2765d86cb10af2) ---- - ldap/admin/src/scripts/ds-replcheck | 53 +++++++++++++++++++++++------ - 1 file changed, 43 insertions(+), 10 deletions(-) - -diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck -index 62f911034..5c195f983 100755 ---- a/ldap/admin/src/scripts/ds-replcheck -+++ b/ldap/admin/src/scripts/ds-replcheck -@@ -10,18 +10,19 @@ - # - - import os -+import sys - import re - import time - import ldap - import ldapurl - import argparse - import getpass -- -+from ldif import LDIFRecordList - from ldap.ldapobject import SimpleLDAPObject - from ldap.cidict import cidict - from ldap.controls import SimplePagedResultsControl - --VERSION = "1.3" -+VERSION = "1.4" - RUV_FILTER = '(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))' - LDAP = 'ldap' - LDAPS = 'ldaps' -@@ -386,14 +387,17 @@ def ldif_search(LDIF, dn): - return result - - --def get_dns(LDIF, opts): -+def get_dns(LDIF, filename, opts): - ''' Get all the DN's from an LDIF file - ''' - dns = [] - found = False -+ found_ruv = False -+ LDIF.seek(0) - for line in LDIF: - if line.startswith('dn: ') and line[4:].startswith('nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff'): - opts['ruv_dn'] = line[4:].lower().strip() -+ found_ruv = True - elif line.startswith('dn: '): - found = True - dn = line[4:].lower().strip() -@@ -407,6 +411,14 @@ def get_dns(LDIF, opts): - found = False - dns.append(dn) - -+ if not found_ruv: -+ print('Failed to find the database RUV in the LDIF file: ' + filename + ', the LDIF ' + -+ 'file must contain replication state information.') -+ dns = None -+ else: -+ # All good, reset cursor -+ LDIF.seek(0) -+ - return dns - - -@@ -415,6 +427,7 @@ def get_ldif_ruv(LDIF, opts): - ''' - LDIF.seek(0) - result = ldif_search(LDIF, opts['ruv_dn']) -+ LDIF.seek(0) # Reset cursor - return result['entry'].data['nsds50ruv'] - - -@@ -549,6 +562,7 @@ def do_offline_report(opts, output_file=None): - rconflicts = [] - rtombstones = 0 - mtombstones = 0 -+ idx = 0 - - # Open LDIF files - try: -@@ -561,12 +575,36 @@ def do_offline_report(opts, output_file=None): - RLDIF = open(opts['rldif'], "r") - except Exception as e: - print('Failed to open Replica LDIF: ' + str(e)) -+ MLDIF.close() -+ return None -+ -+ # Verify LDIF Files -+ try: -+ print("Validating Master ldif file ({})...".format(opts['mldif'])) -+ LDIFRecordList(MLDIF).parse() -+ except ValueError: -+ print('Master LDIF file in invalid, aborting...') -+ MLDIF.close() -+ RLDIF.close() -+ return None -+ try: -+ print("Validating Replica ldif file ({})...".format(opts['rldif'])) -+ LDIFRecordList(RLDIF).parse() -+ except ValueError: -+ print('Replica LDIF file is invalid, aborting...') -+ MLDIF.close() -+ RLDIF.close() - return None - - # Get all the dn's, and entry counts - print ("Gathering all the DN's...") -- master_dns = get_dns(MLDIF, opts) -- replica_dns = get_dns(RLDIF, opts) -+ master_dns = get_dns(MLDIF, opts['mldif'], opts) -+ replica_dns = get_dns(RLDIF, opts['rldif'], opts) -+ if master_dns is None or replica_dns is None: -+ print("Aborting scan...") -+ MLDIF.close() -+ RLDIF.close() -+ sys.exit(1) - m_count = len(master_dns) - r_count = len(replica_dns) - -@@ -575,11 +613,6 @@ def do_offline_report(opts, output_file=None): - opts['master_ruv'] = get_ldif_ruv(MLDIF, opts) - opts['replica_ruv'] = get_ldif_ruv(RLDIF, opts) - -- # Reset the cursors -- idx = 0 -- MLDIF.seek(idx) -- RLDIF.seek(idx) -- - """ Compare the master entries with the replica's. Take our list of dn's from - the master ldif and get that entry( dn) from the master and replica ldif. In - this phase we keep keep track of conflict/tombstone counts, and we check for --- -2.17.1 - diff --git a/SOURCES/0003-Ticket-50234-one-level-search-returns-not-matching-e.patch b/SOURCES/0003-Ticket-50234-one-level-search-returns-not-matching-e.patch new file mode 100644 index 0000000..cb36b7a --- /dev/null +++ b/SOURCES/0003-Ticket-50234-one-level-search-returns-not-matching-e.patch @@ -0,0 +1,110 @@ +From 94702aa0f07dcea5b7ebe7886b2fdc9ab0092cf4 Mon Sep 17 00:00:00 2001 +From: Ludwig Krispenz +Date: Thu, 21 Feb 2019 16:54:52 +0100 +Subject: [PATCH] Ticket 50234 - one level search returns not matching entry + +Bug: if in a onelevel search the IDList for the parentid is smaller than the filter + threshold and smaller than the list generated by the search filter + then the intersection is aborted and all children are returned. + +Fix: In the above case we need to set the flag that the filter evaluation + cannot be bypassed + +Reviewed by: William, Thierry. Thanks +--- + dirsrvtests/tests/tickets/ticket50234_test.py | 70 +++++++++++++++++++ + ldap/servers/slapd/back-ldbm/idl_set.c | 1 + + 2 files changed, 71 insertions(+) + create mode 100644 dirsrvtests/tests/tickets/ticket50234_test.py + +diff --git a/dirsrvtests/tests/tickets/ticket50234_test.py b/dirsrvtests/tests/tickets/ticket50234_test.py +new file mode 100644 +index 000000000..c605c4531 +--- /dev/null ++++ b/dirsrvtests/tests/tickets/ticket50234_test.py +@@ -0,0 +1,70 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2019 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import logging ++import time ++import ldap ++import pytest ++ ++from lib389.topologies import topology_st ++ ++from lib389._constants import DEFAULT_SUFFIX ++ ++from lib389.idm.user import UserAccount, UserAccounts ++from lib389.idm.organizationalunit import OrganizationalUnit ++ ++log = logging.getLogger(__name__) ++ ++def test_ticket50234(topology_st): ++ """ ++ The fix for ticket 50234 ++ ++ ++ The test sequence is: ++ - create more than 10 entries with objectclass organizational units ou=org{} ++ - add an Account in one of them, eg below ou=org5 ++ - do searches with search base ou=org5 and search filter "objectclass=organizationalunit" ++ - a subtree search should return 1 entry, the base entry ++ - a onelevel search should return no entry ++ """ ++ ++ log.info('Testing Ticket 50234 - onelvel search returns not matching entry') ++ ++ for i in range(1,15): ++ ou = OrganizationalUnit(topology_st.standalone, "ou=Org{},{}".format(i, DEFAULT_SUFFIX)) ++ ou.create(properties={'ou': 'Org'.format(i)}) ++ ++ properties = { ++ 'uid': 'Jeff Vedder', ++ 'cn': 'Jeff Vedder', ++ 'sn': 'user', ++ 'uidNumber': '1000', ++ 'gidNumber': '2000', ++ 'homeDirectory': '/home/' + 'JeffVedder', ++ 'userPassword': 'password' ++ } ++ user = UserAccount(topology_st.standalone, "cn=Jeff Vedder,ou=org5,{}".format(DEFAULT_SUFFIX)) ++ user.create(properties=properties) ++ ++ # in a subtree search the entry used as search base matches the filter and shoul be returned ++ ent = topology_st.standalone.getEntry("ou=org5,{}".format(DEFAULT_SUFFIX), ldap.SCOPE_SUBTREE, "(objectclass=organizationalunit)") ++ ++ # in a onelevel search the only child is an useraccount which does not match the filter ++ # no entry should be returned, which would cause getEntry to raise an exception we need to handle ++ found = 1 ++ try: ++ ent = topology_st.standalone.getEntry("ou=org5,{}".format(DEFAULT_SUFFIX), ldap.SCOPE_ONELEVEL, "(objectclass=organizationalunit)") ++ except ldap.NO_SUCH_OBJECT: ++ found = 0 ++ assert (found == 0) ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) +diff --git a/ldap/servers/slapd/back-ldbm/idl_set.c b/ldap/servers/slapd/back-ldbm/idl_set.c +index f9a900f1f..6b6586799 100644 +--- a/ldap/servers/slapd/back-ldbm/idl_set.c ++++ b/ldap/servers/slapd/back-ldbm/idl_set.c +@@ -371,6 +371,7 @@ idl_set_intersect(IDListSet *idl_set, backend *be) + result_list = idl_set->head; + } else if (idl_set->minimum->b_nids <= FILTER_TEST_THRESHOLD) { + result_list = idl_set->minimum; ++ slapi_be_set_flag(be, SLAPI_BE_FLAG_DONT_BYPASS_FILTERTEST); + + /* Free the other IDLs which are not the minimum. */ + IDList *next = NULL; +-- +2.17.2 + diff --git a/SOURCES/0004-Issue-50091-shadowWarning-is-not-generated-if-passwo.patch b/SOURCES/0004-Issue-50091-shadowWarning-is-not-generated-if-passwo.patch new file mode 100644 index 0000000..5740e36 --- /dev/null +++ b/SOURCES/0004-Issue-50091-shadowWarning-is-not-generated-if-passwo.patch @@ -0,0 +1,96 @@ +From c3c7eabb2dae02977c0a7e1e659a0a928f8fa37d Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Barbora=20Smejkalov=C3=A1?= +Date: Fri, 4 Jan 2019 09:14:34 +0100 +Subject: [PATCH] Issue 50091 - shadowWarning is not generated if + passwordWarning is lower than 86400 seconds (1 day). + +Description: +Added test case to check if shadowWarning attribute is generated when passwordWarning is set to lower value than 84600 seconds. + +https://pagure.io/389-ds-base/issue/50091 + +Reviewed by: vashirov, amsharma, spichugi, firstyear (Thank you!) +--- + .../suites/password/pwdPolicy_warning_test.py | 53 ++++++++++++++++++- + 1 file changed, 51 insertions(+), 2 deletions(-) + +diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py +index 1b955c66e..90b712e9e 100644 +--- a/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py ++++ b/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py +@@ -12,14 +12,14 @@ from ldap.controls.ppolicy import PasswordPolicyControl + from lib389.tasks import * + from lib389.utils import * + from lib389.topologies import topology_st +- ++from lib389.idm.user import UserAccounts + from lib389._constants import (DEFAULT_SUFFIX, DN_CONFIG, PASSWORD, DN_DM, + HOST_STANDALONE, PORT_STANDALONE, SERVERID_STANDALONE) + from dateutil.parser import parse as dt_parse + import datetime + + CONFIG_ATTR = 'passwordSendExpiringTime' +-USER_DN = 'uid=tuser,{:s}'.format(DEFAULT_SUFFIX) ++USER_DN = 'uid=tuser,{}'.format(DEFAULT_SUFFIX) + USER_PASSWD = 'secret123' + + logging.getLogger(__name__).setLevel(logging.INFO) +@@ -546,6 +546,55 @@ def test_with_local_policy(topology_st, global_policy, local_policy): + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + ++@pytest.mark.bz1589144 ++@pytest.mark.ds50091 ++def test_search_shadowWarning_when_passwordWarning_is_lower(topology_st, global_policy): ++ """Test if value shadowWarning is present with global password policy ++ when passwordWarning is set with lower value. ++ ++ :id: c1e82de6-1aa3-42c3-844a-9720172158a3 ++ :setup: Standalone Instance ++ :steps: ++ 1. Bind as Directory Manager ++ 2. Set global password policy ++ 3. Add test user to instance. ++ 4. Modify passwordWarning to have smaller value than 86400 ++ 5. Bind as the new user ++ 6. Search for shadowWarning attribute ++ 7. Rebind as Directory Manager ++ :expectedresults: ++ 1. Binding should be successful ++ 2. Setting password policy should be successful ++ 3. Adding test user should be successful ++ 4. Modifying passwordWarning should be successful ++ 5. Binding should be successful ++ 6. Attribute shadowWarning should be found ++ 7. Binding should be successful ++ """ ++ ++ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) ++ ++ log.info("Bind as %s" % DN_DM) ++ assert topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) ++ ++ log.info("Creating test user") ++ testuser = users.create_test_user(1004) ++ testuser.add('objectclass', 'shadowAccount') ++ testuser.set('userPassword', USER_PASSWD) ++ ++ log.info("Setting passwordWarning to smaller value than 86400") ++ assert topology_st.standalone.config.set('passwordWarning', '86399') ++ ++ log.info("Bind as test user") ++ assert topology_st.standalone.simple_bind_s(testuser.dn, USER_PASSWD) ++ ++ log.info("Check if attribute shadowWarning is present") ++ assert testuser.present('shadowWarning') ++ ++ log.info("Rebinding as DM") ++ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) ++ ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +-- +2.17.2 + diff --git a/SOURCES/0004-Ticket-49893-disable-nunc-stans-by-default.patch b/SOURCES/0004-Ticket-49893-disable-nunc-stans-by-default.patch deleted file mode 100644 index 4044649..0000000 --- a/SOURCES/0004-Ticket-49893-disable-nunc-stans-by-default.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 83949e7e4f3370f48ea5c5fabdb9af04e3d11c75 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Wed, 8 Aug 2018 17:19:27 -0400 -Subject: [PATCH] Ticket 49893 - disable nunc-stans by default - -Description: Until nunc-stans is stablized we need to disable it - -https://pagure.io/389-ds-base/issue/49893 - -Reviewed by: ? - -(cherry picked from commit 2f2d3b1d7e7d847de1bb9ddf2f63e71dbc90f710) ---- - ldap/servers/slapd/libglobs.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c -index 12f6ec396..56b67b79b 100644 ---- a/ldap/servers/slapd/libglobs.c -+++ b/ldap/servers/slapd/libglobs.c -@@ -1683,7 +1683,7 @@ FrontendConfig_init(void) - cfg->maxbersize = SLAPD_DEFAULT_MAXBERSIZE; - cfg->logging_backend = slapi_ch_strdup(SLAPD_INIT_LOGGING_BACKEND_INTERNAL); - cfg->rootdn = slapi_ch_strdup(SLAPD_DEFAULT_DIRECTORY_MANAGER); -- init_enable_nunc_stans = cfg->enable_nunc_stans = LDAP_ON; -+ init_enable_nunc_stans = cfg->enable_nunc_stans = LDAP_OFF; - #if defined(LINUX) - init_malloc_mxfast = cfg->malloc_mxfast = DEFAULT_MALLOC_UNSET; - init_malloc_trim_threshold = cfg->malloc_trim_threshold = DEFAULT_MALLOC_UNSET; --- -2.17.1 - diff --git a/SOURCES/0005-Ticket-49890-ldapsearch-with-server-side-sort-crashe.patch b/SOURCES/0005-Ticket-49890-ldapsearch-with-server-side-sort-crashe.patch deleted file mode 100644 index 6a1d09a..0000000 --- a/SOURCES/0005-Ticket-49890-ldapsearch-with-server-side-sort-crashe.patch +++ /dev/null @@ -1,137 +0,0 @@ -From a21ba4722268349b9c63000145e5d119e1fddd60 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Thu, 9 Aug 2018 15:27:59 -0400 -Subject: [PATCH] Ticket 49890 : ldapsearch with server side sort crashes the - ldap server - -Bug Description: - Server side sort with a specified matching rule trigger a crash - -Fix Description: - Check if the we are able to index the provided value. - If we are not then slapd_qsort returns an error (LDAP_OPERATION_ERROR) - -https://pagure.io/389-ds-base/issue/49890 - -Reviewed by: mreynolds - -Platforms tested: F27 - -Flag Day: no - -Doc impact: no - -(cherry picked from commit c989e18f7a3da060b16d39919b920b6b2a19a0ac) ---- - dirsrvtests/tests/suites/syntax/mr_test.py | 59 ++++++++++++++++++++++ - ldap/servers/slapd/back-ldbm/sort.c | 14 +++++ - 2 files changed, 73 insertions(+) - create mode 100644 dirsrvtests/tests/suites/syntax/mr_test.py - -diff --git a/dirsrvtests/tests/suites/syntax/mr_test.py b/dirsrvtests/tests/suites/syntax/mr_test.py -new file mode 100644 -index 000000000..57061222a ---- /dev/null -+++ b/dirsrvtests/tests/suites/syntax/mr_test.py -@@ -0,0 +1,59 @@ -+import logging -+import pytest -+import os -+import ldap -+from lib389.dbgen import dbgen -+from lib389._constants import * -+from lib389.topologies import topology_st as topo -+from lib389._controls import SSSRequestControl -+ -+DEBUGGING = os.getenv("DEBUGGING", default=False) -+if DEBUGGING: -+ logging.getLogger(__name__).setLevel(logging.DEBUG) -+else: -+ logging.getLogger(__name__).setLevel(logging.INFO) -+log = logging.getLogger(__name__) -+ -+ -+def test_sss_mr(topo): -+ """Test matching rule/server side sort does not crash DS -+ -+ :id: 48c73d76-1694-420f-ab55-187135f2d260 -+ :setup: Standalone Instance -+ :steps: -+ 1. Add sample entries to the database -+ 2. Perform search using server side control (uid:2.5.13.3) -+ :expectedresults: -+ 1. Success -+ 2. Success -+ """ -+ -+ log.info("Creating LDIF...") -+ ldif_dir = topo.standalone.get_ldif_dir() -+ ldif_file = os.path.join(ldif_dir, 'mr-crash.ldif') -+ dbgen(topo.standalone, 5, ldif_file, DEFAULT_SUFFIX) -+ -+ log.info("Importing LDIF...") -+ topo.standalone.stop() -+ assert topo.standalone.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) -+ topo.standalone.start() -+ -+ log.info('Search using server side sorting using undefined mr in the attr...') -+ sort_ctrl = SSSRequestControl(True, ['uid:2.5.13.3']) -+ controls = [sort_ctrl] -+ msg_id = topo.standalone.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, -+ "objectclass=*", serverctrls=controls) -+ try: -+ rtype, rdata, rmsgid, response_ctrl = topo.standalone.result3(msg_id) -+ except ldap.OPERATIONS_ERROR: -+ pass -+ -+ log.info("Test PASSED") -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main(["-s", CURRENT_FILE]) -+ -diff --git a/ldap/servers/slapd/back-ldbm/sort.c b/ldap/servers/slapd/back-ldbm/sort.c -index 5b84d87f3..70ac60803 100644 ---- a/ldap/servers/slapd/back-ldbm/sort.c -+++ b/ldap/servers/slapd/back-ldbm/sort.c -@@ -546,6 +546,16 @@ compare_entries_sv(ID *id_a, ID *id_b, sort_spec *s, baggage_carrier *bc, int *e - /* Now copy it, so the second call doesn't crap on it */ - value_a = slapi_ch_bvecdup(temp_value); /* Really, we'd prefer to not call the chXXX variant...*/ - matchrule_values_to_keys(this_one->mr_pb, actual_value_b, &value_b); -+ -+ if ((actual_value_a && !value_a) || -+ (actual_value_b && !value_b)) { -+ ber_bvecfree(actual_value_a); -+ ber_bvecfree(actual_value_b); -+ CACHE_RETURN(&inst->inst_cache, &a); -+ CACHE_RETURN(&inst->inst_cache, &b); -+ *error = 1; -+ return 0; -+ } - if (actual_value_a) - ber_bvecfree(actual_value_a); - if (actual_value_b) -@@ -717,6 +727,8 @@ recurse: - A[i] >= A[lo] for higuy <= i <= hi */ - - do { -+ if (error) -+ return LDAP_OPERATIONS_ERROR; - loguy++; - } while (loguy <= hi && compare_entries_sv(loguy, lo, s, bc, &error) <= 0); - -@@ -724,6 +736,8 @@ recurse: - either loguy > hi or A[loguy] > A[lo] */ - - do { -+ if (error) -+ return LDAP_OPERATIONS_ERROR; - higuy--; - } while (higuy > lo && compare_entries_sv(higuy, lo, s, bc, &error) >= 0); - --- -2.17.1 - diff --git a/SOURCES/0005-Ticket-50091-shadowWarning-is-not-generated-if-passw.patch b/SOURCES/0005-Ticket-50091-shadowWarning-is-not-generated-if-passw.patch new file mode 100644 index 0000000..dbe4e5a --- /dev/null +++ b/SOURCES/0005-Ticket-50091-shadowWarning-is-not-generated-if-passw.patch @@ -0,0 +1,42 @@ +From 576774dc7cc0f7af7505f0f1a19b52e3c84c6625 Mon Sep 17 00:00:00 2001 +From: Thierry Bordaz +Date: Fri, 14 Dec 2018 17:42:22 +0100 +Subject: [PATCH] Ticket 50091 - shadowWarning is not generated if + passwordWarning is lower than 86400 seconds (1 day) + +Bug Description: + For a shadowAccount, if a password policy defines passwordWarning below 1 days (86400 seconds) + then the shadowWarning (in day) is not returned from the entry. In such case its value is '0'. + +Fix Description: + The fix is to accept shadowWarning = 0 as valid value and return it + +https://pagure.io/389-ds-base/issue/50091 + +Reviewed by: Mark Reynolds + +Platforms tested: F27 + +Flag Day: no + +Doc impact: no +--- + ldap/servers/slapd/pw.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c +index ec23ed1a5..11149f51a 100644 +--- a/ldap/servers/slapd/pw.c ++++ b/ldap/servers/slapd/pw.c +@@ -2844,7 +2844,7 @@ add_shadow_ext_password_attrs(Slapi_PBlock *pb, Slapi_Entry **e) + shadowval = _MAX_SHADOW; + } + } +- if (shadowval > 0) { ++ if (shadowval >= 0) { + shwarn = slapi_entry_attr_get_charptr(*e, "shadowWarning"); + if (shwarn) { + sval = strtoll(shwarn, NULL, 0); +-- +2.17.2 + diff --git a/SOURCES/0006-Bug-1614820-Crash-in-vslapd_log_emergency_error.patch b/SOURCES/0006-Bug-1614820-Crash-in-vslapd_log_emergency_error.patch deleted file mode 100644 index b49777d..0000000 --- a/SOURCES/0006-Bug-1614820-Crash-in-vslapd_log_emergency_error.patch +++ /dev/null @@ -1,85 +0,0 @@ -From 59071a77774c530f0ab570dda27e23a021d23972 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Thu, 23 Aug 2018 10:09:58 -0400 -Subject: [PATCH] Bug 1614820 - Crash in vslapd_log_emergency_error - -Description: We were not locking the error log fd before closing and reopening - the log file. This could cause a crash when multiple threads are - trying to log tothe errors log. ---- - ldap/servers/slapd/log.c | 22 ++++++++++++++++------ - 1 file changed, 16 insertions(+), 6 deletions(-) - -diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c -index 2e4ee03a8..7dd71541b 100644 ---- a/ldap/servers/slapd/log.c -+++ b/ldap/servers/slapd/log.c -@@ -2231,11 +2231,11 @@ vslapd_log_emergency_error(LOGFD fp, const char *msg, int locked) - if (logging_hr_timestamps_enabled == 1) { - struct timespec tsnow; - if (clock_gettime(CLOCK_REALTIME, &tsnow) != 0) { -- syslog(LOG_EMERG, "vslapd_log_emergency_error, Unable to determine system time for message :: %s", msg); -+ syslog(LOG_EMERG, "vslapd_log_emergency_error, Unable to determine system time for message :: %s\n", msg); - return; - } - if (format_localTime_hr_log(tsnow.tv_sec, tsnow.tv_nsec, sizeof(tbuf), tbuf, &size) != 0) { -- syslog(LOG_EMERG, "vslapd_log_emergency_error, Unable to format system time for message :: %s", msg); -+ syslog(LOG_EMERG, "vslapd_log_emergency_error, Unable to format system time for message :: %s\n", msg); - return; - } - } else { -@@ -2243,14 +2243,14 @@ vslapd_log_emergency_error(LOGFD fp, const char *msg, int locked) - time_t tnl; - tnl = slapi_current_utc_time(); - if (format_localTime_log(tnl, sizeof(tbuf), tbuf, &size) != 0) { -- syslog(LOG_EMERG, "vslapd_log_emergency_error, Unable to format system time for message :: %s", msg); -+ syslog(LOG_EMERG, "vslapd_log_emergency_error, Unable to format system time for message :: %s\n", msg); - return; - } - #ifdef HAVE_CLOCK_GETTIME - } - #endif - -- PR_snprintf(buffer, sizeof(buffer), "%s - EMERG - %s", tbuf, msg); -+ PR_snprintf(buffer, sizeof(buffer), "%s - EMERG - %s\n", tbuf, msg); - size = strlen(buffer); - - if (!locked) { -@@ -2531,7 +2531,7 @@ vslapd_log_access(char *fmt, va_list ap) - - if (SLAPI_LOG_BUFSIZ - blen < vlen) { - /* We won't be able to fit the message in! Uh-oh! */ -- /* Should we actually just do the snprintf, and warn that message was trunced? */ -+ /* Should we actually just do the snprintf, and warn that message was truncated? */ - log__error_emergency("Insufficent buffer capacity to fit timestamp and message!", 1, 0); - return -1; - } -@@ -4486,6 +4486,13 @@ log__error_emergency(const char *errstr, int reopen, int locked) - if (!reopen) { - return; - } -+ if (!locked) { -+ /* -+ * Take the lock because we are closing and reopening the error log (fd), -+ * and we don't want any other threads trying to use this fd -+ */ -+ LOG_ERROR_LOCK_WRITE(); -+ } - if (NULL != loginfo.log_error_fdes) { - LOG_CLOSE(loginfo.log_error_fdes); - } -@@ -4494,7 +4501,10 @@ log__error_emergency(const char *errstr, int reopen, int locked) - PRErrorCode prerr = PR_GetError(); - syslog(LOG_ERR, "Failed to reopen errors log file, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", prerr, slapd_pr_strerror(prerr)); - } else { -- vslapd_log_emergency_error(loginfo.log_error_fdes, errstr, locked); -+ vslapd_log_emergency_error(loginfo.log_error_fdes, errstr, 1 /* locked */); -+ } -+ if (!locked) { -+ LOG_ERROR_UNLOCK_WRITE(); - } - return; - } --- -2.17.1 - diff --git a/SOURCES/0006-Ticket-50260-backend-txn-plugins-can-corrupt-entry-c.patch b/SOURCES/0006-Ticket-50260-backend-txn-plugins-can-corrupt-entry-c.patch new file mode 100644 index 0000000..c460987 --- /dev/null +++ b/SOURCES/0006-Ticket-50260-backend-txn-plugins-can-corrupt-entry-c.patch @@ -0,0 +1,700 @@ +From 669d0b288ca55a144fd1f5ba30199d2d2bb82061 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 7 Mar 2019 15:38:25 -0500 +Subject: [PATCH] Ticket 50260 - backend txn plugins can corrupt entry cache + +Bug Description: If a nested backend txn plugin fails, any updates + it made that went into the entry cache still persist + after the database transaction is aborted. + +Fix Description: In order to be sure the entry cache is not corrupted + after a backend txn plugin failure we need to flush + all the cache entries that were added to the cache + after the parent operation was started. + + To do this we record the start time the original operation, + (or parent operation), and we record the time any entry + is added to the cache. Then on failure we do a comparision + and remove the entry from the cache if it's not in use. + If it is in use we add a "invalid" flag which triggers + the entry to be removed when the cache entry is returned + by the owner. + +https://pagure.io/389-ds-base/issue/50260 + +CI tested and ASAN approved. + +Reviewed by: firstyear, tbordaz, and lkrispen (Thanks!!!) + +(cherry picked from commit 7ba8a80cfbaed9f6d727f98ed8c284943b3295e1) +--- + dirsrvtests/tests/suites/betxns/betxn_test.py | 114 ++++++++++++++++-- + ldap/servers/slapd/back-ldbm/back-ldbm.h | 68 ++++++----- + ldap/servers/slapd/back-ldbm/backentry.c | 3 +- + ldap/servers/slapd/back-ldbm/cache.c | 112 ++++++++++++++++- + ldap/servers/slapd/back-ldbm/ldbm_add.c | 14 +++ + ldap/servers/slapd/back-ldbm/ldbm_delete.c | 14 +++ + ldap/servers/slapd/back-ldbm/ldbm_modify.c | 14 +++ + ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 32 +++-- + .../servers/slapd/back-ldbm/proto-back-ldbm.h | 1 + + ldap/servers/slapd/slapi-plugin.h | 6 + + 10 files changed, 321 insertions(+), 57 deletions(-) + +diff --git a/dirsrvtests/tests/suites/betxns/betxn_test.py b/dirsrvtests/tests/suites/betxns/betxn_test.py +index 48181a9ea..f03fb93cc 100644 +--- a/dirsrvtests/tests/suites/betxns/betxn_test.py ++++ b/dirsrvtests/tests/suites/betxns/betxn_test.py +@@ -7,12 +7,10 @@ + # --- END COPYRIGHT BLOCK --- + # + import pytest +-import six + import ldap + from lib389.tasks import * + from lib389.utils import * + from lib389.topologies import topology_st +- + from lib389._constants import DEFAULT_SUFFIX, PLUGIN_7_BIT_CHECK, PLUGIN_ATTR_UNIQUENESS, PLUGIN_MEMBER_OF + + logging.getLogger(__name__).setLevel(logging.DEBUG) +@@ -249,8 +247,8 @@ def test_betxn_memberof(topology_st, dynamic_plugins): + log.info('test_betxn_memberof: PASSED') + + +-def test_betxn_modrdn_memberof(topology_st): +- """Test modrdn operartions and memberOf ++def test_betxn_modrdn_memberof_cache_corruption(topology_st): ++ """Test modrdn operations and memberOf + + :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5994 + +@@ -285,18 +283,18 @@ def test_betxn_modrdn_memberof(topology_st): + + # Create user and add it to group + users = UserAccounts(topology_st.standalone, basedn=DEFAULT_SUFFIX) +- user = users.create(properties=TEST_USER_PROPERTIES) ++ user = users.ensure_state(properties=TEST_USER_PROPERTIES) + if not ds_is_older('1.3.7'): + user.remove('objectClass', 'nsMemberOf') + + group.add_member(user.dn) + + # Attempt modrdn that should fail, but the original entry should stay in the cache +- with pytest.raises(ldap.OBJECTCLASS_VIOLATION): ++ with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + group.rename('cn=group_to_people', newsuperior=peoplebase) + + # Should fail, but not with NO_SUCH_OBJECT as the original entry should still be in the cache +- with pytest.raises(ldap.OBJECTCLASS_VIOLATION): ++ with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + group.rename('cn=group_to_people', newsuperior=peoplebase) + + # +@@ -305,6 +303,108 @@ def test_betxn_modrdn_memberof(topology_st): + log.info('test_betxn_modrdn_memberof: PASSED') + + ++def test_ri_and_mep_cache_corruption(topology_st): ++ """Test RI plugin aborts change after MEP plugin fails. ++ This is really testing the entry cache for corruption ++ ++ :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5995 ++ ++ :setup: Standalone instance ++ ++ :steps: 1. Enable and configure mep and ri plugins ++ 2. Add user and add it to a group ++ 3. Disable MEP plugin and remove MEP group ++ 4. Delete user ++ 5. Check that user is still a member of the group ++ ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. It fails with NO_SUCH_OBJECT ++ 5. Success ++ ++ """ ++ # Start plugins ++ topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on') ++ mep_plugin = ManagedEntriesPlugin(topology_st.standalone) ++ mep_plugin.enable() ++ ri_plugin = ReferentialIntegrityPlugin(topology_st.standalone) ++ ri_plugin.enable() ++ ++ # Add our org units ++ ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) ++ ou_people = ous.create(properties={'ou': 'managed_people'}) ++ ou_groups = ous.create(properties={'ou': 'managed_groups'}) ++ ++ # Configure MEP ++ mep_templates = MEPTemplates(topology_st.standalone, DEFAULT_SUFFIX) ++ mep_template1 = mep_templates.create(properties={ ++ 'cn': 'MEP template', ++ 'mepRDNAttr': 'cn', ++ 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'), ++ 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') ++ }) ++ mep_configs = MEPConfigs(topology_st.standalone) ++ mep_configs.create(properties={'cn': 'config', ++ 'originScope': ou_people.dn, ++ 'originFilter': 'objectclass=posixAccount', ++ 'managedBase': ou_groups.dn, ++ 'managedTemplate': mep_template1.dn}) ++ ++ # Add an entry that meets the MEP scope ++ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, ++ rdn='ou={}'.format(ou_people.rdn)) ++ user = users.create(properties={ ++ 'uid': 'test-user1', ++ 'cn': 'test-user', ++ 'sn': 'test-user', ++ 'uidNumber': '10011', ++ 'gidNumber': '20011', ++ 'homeDirectory': '/home/test-user1' ++ }) ++ ++ # Add group ++ groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) ++ user_group = groups.ensure_state(properties={'cn': 'group', 'member': user.dn}) ++ ++ # Check if a managed group entry was created ++ mep_group = Group(topology_st.standalone, dn='cn={},{}'.format(user.rdn, ou_groups.dn)) ++ if not mep_group.exists(): ++ log.fatal("MEP group was not created for the user") ++ assert False ++ ++ # Mess with MEP so it fails ++ mep_plugin.disable() ++ mep_group.delete() ++ mep_plugin.enable() ++ ++ # Add another group for verify entry cache is not corrupted ++ test_group = groups.create(properties={'cn': 'test_group'}) ++ ++ # Delete user, should fail, and user should still be a member ++ with pytest.raises(ldap.NO_SUCH_OBJECT): ++ user.delete() ++ ++ # Verify membership is intact ++ if not user_group.is_member(user.dn): ++ log.fatal("Member was incorrectly removed from the group!! Or so it seems") ++ ++ # Restart server and test again in case this was a cache issue ++ topology_st.standalone.restart() ++ if user_group.is_member(user.dn): ++ log.info("The entry cache was corrupted") ++ assert False ++ ++ assert False ++ ++ # Verify test group is still found in entry cache by deleting it ++ test_group.delete() ++ ++ # Success ++ log.info("Test PASSED") ++ ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h +index 4727961a9..6cac605c0 100644 +--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h ++++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h +@@ -312,48 +312,52 @@ typedef struct + + struct backcommon + { +- int ep_type; /* to distinguish backdn from backentry */ +- struct backcommon *ep_lrunext; /* for the cache */ +- struct backcommon *ep_lruprev; /* for the cache */ +- ID ep_id; /* entry id */ +- char ep_state; /* state in the cache */ +-#define ENTRY_STATE_DELETED 0x1 /* entry is marked as deleted */ +-#define ENTRY_STATE_CREATING 0x2 /* entry is being created; don't touch it */ +-#define ENTRY_STATE_NOTINCACHE 0x4 /* cache_add failed; not in the cache */ +- int ep_refcnt; /* entry reference cnt */ +- size_t ep_size; /* for cache tracking */ ++ int32_t ep_type; /* to distinguish backdn from backentry */ ++ struct backcommon *ep_lrunext; /* for the cache */ ++ struct backcommon *ep_lruprev; /* for the cache */ ++ ID ep_id; /* entry id */ ++ uint8_t ep_state; /* state in the cache */ ++#define ENTRY_STATE_DELETED 0x1 /* entry is marked as deleted */ ++#define ENTRY_STATE_CREATING 0x2 /* entry is being created; don't touch it */ ++#define ENTRY_STATE_NOTINCACHE 0x4 /* cache_add failed; not in the cache */ ++#define ENTRY_STATE_INVALID 0x8 /* cache entry is invalid and needs to be removed */ ++ int32_t ep_refcnt; /* entry reference cnt */ ++ size_t ep_size; /* for cache tracking */ ++ struct timespec ep_create_time; /* the time the entry was added to the cache */ + }; + +-/* From ep_type through ep_size MUST be identical to backcommon */ ++/* From ep_type through ep_create_time MUST be identical to backcommon */ + struct backentry + { +- int ep_type; /* to distinguish backdn from backentry */ +- struct backcommon *ep_lrunext; /* for the cache */ +- struct backcommon *ep_lruprev; /* for the cache */ +- ID ep_id; /* entry id */ +- char ep_state; /* state in the cache */ +- int ep_refcnt; /* entry reference cnt */ +- size_t ep_size; /* for cache tracking */ +- Slapi_Entry *ep_entry; /* real entry */ ++ int32_t ep_type; /* to distinguish backdn from backentry */ ++ struct backcommon *ep_lrunext; /* for the cache */ ++ struct backcommon *ep_lruprev; /* for the cache */ ++ ID ep_id; /* entry id */ ++ uint8_t ep_state; /* state in the cache */ ++ int32_t ep_refcnt; /* entry reference cnt */ ++ size_t ep_size; /* for cache tracking */ ++ struct timespec ep_create_time; /* the time the entry was added to the cache */ ++ Slapi_Entry *ep_entry; /* real entry */ + Slapi_Entry *ep_vlventry; +- void *ep_dn_link; /* linkage for the 3 hash */ +- void *ep_id_link; /* tables used for */ +- void *ep_uuid_link; /* looking up entries */ +- PRMonitor *ep_mutexp; /* protection for mods; make it reentrant */ ++ void *ep_dn_link; /* linkage for the 3 hash */ ++ void *ep_id_link; /* tables used for */ ++ void *ep_uuid_link; /* looking up entries */ ++ PRMonitor *ep_mutexp; /* protection for mods; make it reentrant */ + }; + +-/* From ep_type through ep_size MUST be identical to backcommon */ ++/* From ep_type through ep_create_time MUST be identical to backcommon */ + struct backdn + { +- int ep_type; /* to distinguish backdn from backentry */ +- struct backcommon *ep_lrunext; /* for the cache */ +- struct backcommon *ep_lruprev; /* for the cache */ +- ID ep_id; /* entry id */ +- char ep_state; /* state in the cache; share ENTRY_STATE_* */ +- int ep_refcnt; /* entry reference cnt */ +- size_t ep_size; /* for cache tracking */ ++ int32_t ep_type; /* to distinguish backdn from backentry */ ++ struct backcommon *ep_lrunext; /* for the cache */ ++ struct backcommon *ep_lruprev; /* for the cache */ ++ ID ep_id; /* entry id */ ++ uint8_t ep_state; /* state in the cache; share ENTRY_STATE_* */ ++ int32_t ep_refcnt; /* entry reference cnt */ ++ size_t ep_size; /* for cache tracking */ ++ struct timespec ep_create_time; /* the time the entry was added to the cache */ + Slapi_DN *dn_sdn; +- void *dn_id_link; /* for hash table */ ++ void *dn_id_link; /* for hash table */ + }; + + /* for the in-core cache of entries */ +diff --git a/ldap/servers/slapd/back-ldbm/backentry.c b/ldap/servers/slapd/back-ldbm/backentry.c +index f2fe780db..972842bcb 100644 +--- a/ldap/servers/slapd/back-ldbm/backentry.c ++++ b/ldap/servers/slapd/back-ldbm/backentry.c +@@ -23,7 +23,8 @@ backentry_free(struct backentry **bep) + return; + } + ep = *bep; +- PR_ASSERT(ep->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_NOTINCACHE)); ++ ++ PR_ASSERT(ep->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_NOTINCACHE | ENTRY_STATE_INVALID)); + if (ep->ep_entry != NULL) { + slapi_entry_free(ep->ep_entry); + } +diff --git a/ldap/servers/slapd/back-ldbm/cache.c b/ldap/servers/slapd/back-ldbm/cache.c +index 86e1f7b39..458d7912f 100644 +--- a/ldap/servers/slapd/back-ldbm/cache.c ++++ b/ldap/servers/slapd/back-ldbm/cache.c +@@ -56,11 +56,14 @@ + #define LOG(...) + #endif + +-#define LRU_DETACH(cache, e) lru_detach((cache), (void *)(e)) ++typedef enum { ++ ENTRY_CACHE, ++ DN_CACHE, ++} CacheType; + ++#define LRU_DETACH(cache, e) lru_detach((cache), (void *)(e)) + #define CACHE_LRU_HEAD(cache, type) ((type)((cache)->c_lruhead)) + #define CACHE_LRU_TAIL(cache, type) ((type)((cache)->c_lrutail)) +- + #define BACK_LRU_NEXT(entry, type) ((type)((entry)->ep_lrunext)) + #define BACK_LRU_PREV(entry, type) ((type)((entry)->ep_lruprev)) + +@@ -185,6 +188,7 @@ new_hash(u_long size, u_long offset, HashFn hfn, HashTestFn tfn) + int + add_hash(Hashtable *ht, void *key, uint32_t keylen, void *entry, void **alt) + { ++ struct backcommon *back_entry = (struct backcommon *)entry; + u_long val, slot; + void *e; + +@@ -202,6 +206,7 @@ add_hash(Hashtable *ht, void *key, uint32_t keylen, void *entry, void **alt) + e = HASH_NEXT(ht, e); + } + /* ok, it's not already there, so add it */ ++ back_entry->ep_create_time = slapi_current_rel_time_hr(); + HASH_NEXT(ht, entry) = ht->slot[slot]; + ht->slot[slot] = entry; + return 1; +@@ -492,6 +497,89 @@ cache_make_hashes(struct cache *cache, int type) + } + } + ++/* ++ * Helper function for flush_hash() to calculate if the entry should be ++ * removed from the cache. ++ */ ++static int32_t ++flush_remove_entry(struct timespec *entry_time, struct timespec *start_time) ++{ ++ struct timespec diff; ++ ++ slapi_timespec_diff(entry_time, start_time, &diff); ++ if (diff.tv_sec >= 0) { ++ return 1; ++ } else { ++ return 0; ++ } ++} ++ ++/* ++ * Flush all the cache entries that were added after the "start time" ++ * This is called when a backend transaction plugin fails, and we need ++ * to remove all the possible invalid entries in the cache. ++ * ++ * If the ref count is 0, we can straight up remove it from the cache, but ++ * if the ref count is greater than 1, then the entry is currently in use. ++ * In the later case we set the entry state to ENTRY_STATE_INVALID, and ++ * when the owning thread cache_returns() the cache entry is automatically ++ * removed so another thread can not use/lock the invalid cache entry. ++ */ ++static void ++flush_hash(struct cache *cache, struct timespec *start_time, int32_t type) ++{ ++ void *e, *laste = NULL; ++ Hashtable *ht = cache->c_idtable; ++ ++ cache_lock(cache); ++ ++ for (size_t i = 0; i < ht->size; i++) { ++ e = ht->slot[i]; ++ while (e) { ++ struct backcommon *entry = (struct backcommon *)e; ++ uint64_t remove_it = 0; ++ if (flush_remove_entry(&entry->ep_create_time, start_time)) { ++ /* Mark the entry to be removed */ ++ slapi_log_err(SLAPI_LOG_CACHE, "flush_hash", "[%s] Removing entry id (%d)\n", ++ type ? "DN CACHE" : "ENTRY CACHE", entry->ep_id); ++ remove_it = 1; ++ } ++ laste = e; ++ e = HASH_NEXT(ht, e); ++ ++ if (remove_it) { ++ /* since we have the cache lock we know we can trust refcnt */ ++ entry->ep_state |= ENTRY_STATE_INVALID; ++ if (entry->ep_refcnt == 0) { ++ entry->ep_refcnt++; ++ lru_delete(cache, laste); ++ if (type == ENTRY_CACHE) { ++ entrycache_remove_int(cache, laste); ++ entrycache_return(cache, (struct backentry **)&laste); ++ } else { ++ dncache_remove_int(cache, laste); ++ dncache_return(cache, (struct backdn **)&laste); ++ } ++ } else { ++ /* Entry flagged for removal */ ++ slapi_log_err(SLAPI_LOG_CACHE, "flush_hash", ++ "[%s] Flagging entry to be removed later: id (%d) refcnt: %d\n", ++ type ? "DN CACHE" : "ENTRY CACHE", entry->ep_id, entry->ep_refcnt); ++ } ++ } ++ } ++ } ++ ++ cache_unlock(cache); ++} ++ ++void ++revert_cache(ldbm_instance *inst, struct timespec *start_time) ++{ ++ flush_hash(&inst->inst_cache, start_time, ENTRY_CACHE); ++ flush_hash(&inst->inst_dncache, start_time, DN_CACHE); ++} ++ + /* initialize the cache */ + int + cache_init(struct cache *cache, uint64_t maxsize, long maxentries, int type) +@@ -1142,7 +1230,7 @@ entrycache_return(struct cache *cache, struct backentry **bep) + } else { + ASSERT(e->ep_refcnt > 0); + if (!--e->ep_refcnt) { +- if (e->ep_state & ENTRY_STATE_DELETED) { ++ if (e->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_INVALID)) { + const char *ndn = slapi_sdn_get_ndn(backentry_get_sdn(e)); + if (ndn) { + /* +@@ -1154,6 +1242,13 @@ entrycache_return(struct cache *cache, struct backentry **bep) + LOG("entrycache_return -Failed to remove %s from dn table\n", ndn); + } + } ++ if (e->ep_state & ENTRY_STATE_INVALID) { ++ /* Remove it from the hash table before we free the back entry */ ++ slapi_log_err(SLAPI_LOG_CACHE, "entrycache_return", ++ "Finally flushing invalid entry: %d (%s)\n", ++ e->ep_id, backentry_get_ndn(e)); ++ entrycache_remove_int(cache, e); ++ } + backentry_free(bep); + } else { + lru_add(cache, e); +@@ -1535,7 +1630,7 @@ cache_lock_entry(struct cache *cache, struct backentry *e) + + /* make sure entry hasn't been deleted now */ + cache_lock(cache); +- if (e->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_NOTINCACHE)) { ++ if (e->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_NOTINCACHE | ENTRY_STATE_INVALID)) { + cache_unlock(cache); + PR_ExitMonitor(e->ep_mutexp); + LOG("<= cache_lock_entry (DELETED)\n"); +@@ -1696,7 +1791,14 @@ dncache_return(struct cache *cache, struct backdn **bdn) + } else { + ASSERT((*bdn)->ep_refcnt > 0); + if (!--(*bdn)->ep_refcnt) { +- if ((*bdn)->ep_state & ENTRY_STATE_DELETED) { ++ if ((*bdn)->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_INVALID)) { ++ if ((*bdn)->ep_state & ENTRY_STATE_INVALID) { ++ /* Remove it from the hash table before we free the back dn */ ++ slapi_log_err(SLAPI_LOG_CACHE, "dncache_return", ++ "Finally flushing invalid entry: %d (%s)\n", ++ (*bdn)->ep_id, slapi_sdn_get_dn((*bdn)->dn_sdn)); ++ dncache_remove_int(cache, (*bdn)); ++ } + backdn_free(bdn); + } else { + lru_add(cache, (void *)*bdn); +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c +index 32c8e71ff..aa5b59aea 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c +@@ -97,6 +97,8 @@ ldbm_back_add(Slapi_PBlock *pb) + PRUint64 conn_id; + int op_id; + int result_sent = 0; ++ int32_t parent_op = 0; ++ struct timespec parent_time; + + if (slapi_pblock_get(pb, SLAPI_CONN_ID, &conn_id) < 0) { + conn_id = 0; /* connection is NULL */ +@@ -147,6 +149,13 @@ ldbm_back_add(Slapi_PBlock *pb) + slapi_entry_delete_values(e, numsubordinates, NULL); + + dblayer_txn_init(li, &txn); ++ ++ if (txn.back_txn_txn == NULL) { ++ /* This is the parent operation, get the time */ ++ parent_op = 1; ++ parent_time = slapi_current_rel_time_hr(); ++ } ++ + /* the calls to perform searches require the parent txn if any + so set txn to the parent_txn until we begin the child transaction */ + if (parent_txn) { +@@ -1212,6 +1221,11 @@ ldbm_back_add(Slapi_PBlock *pb) + slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, ldap_result_code ? &ldap_result_code : &retval); + } + slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); ++ ++ /* Revert the caches if this is the parent operation */ ++ if (parent_op) { ++ revert_cache(inst, &parent_time); ++ } + goto error_return; + } + +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c +index f5f6c1e3a..3f687eb91 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c +@@ -79,6 +79,8 @@ ldbm_back_delete(Slapi_PBlock *pb) + ID tomb_ep_id = 0; + int result_sent = 0; + Connection *pb_conn; ++ int32_t parent_op = 0; ++ struct timespec parent_time; + + if (slapi_pblock_get(pb, SLAPI_CONN_ID, &conn_id) < 0) { + conn_id = 0; /* connection is NULL */ +@@ -100,6 +102,13 @@ ldbm_back_delete(Slapi_PBlock *pb) + dblayer_txn_init(li, &txn); + /* the calls to perform searches require the parent txn if any + so set txn to the parent_txn until we begin the child transaction */ ++ ++ if (txn.back_txn_txn == NULL) { ++ /* This is the parent operation, get the time */ ++ parent_op = 1; ++ parent_time = slapi_current_rel_time_hr(); ++ } ++ + if (parent_txn) { + txn.back_txn_txn = parent_txn; + } else { +@@ -1270,6 +1279,11 @@ replace_entry: + slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, &retval); + } + slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); ++ ++ /* Revert the caches if this is the parent operation */ ++ if (parent_op) { ++ revert_cache(inst, &parent_time); ++ } + goto error_return; + } + if (parent_found) { +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c +index cc4319e5f..b90b3e0f0 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c +@@ -412,6 +412,8 @@ ldbm_back_modify(Slapi_PBlock *pb) + int fixup_tombstone = 0; + int ec_locked = 0; + int result_sent = 0; ++ int32_t parent_op = 0; ++ struct timespec parent_time; + + slapi_pblock_get(pb, SLAPI_BACKEND, &be); + slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li); +@@ -426,6 +428,13 @@ ldbm_back_modify(Slapi_PBlock *pb) + dblayer_txn_init(li, &txn); /* must do this before first goto error_return */ + /* the calls to perform searches require the parent txn if any + so set txn to the parent_txn until we begin the child transaction */ ++ ++ if (txn.back_txn_txn == NULL) { ++ /* This is the parent operation, get the time */ ++ parent_op = 1; ++ parent_time = slapi_current_rel_time_hr(); ++ } ++ + if (parent_txn) { + txn.back_txn_txn = parent_txn; + } else { +@@ -864,6 +873,11 @@ ldbm_back_modify(Slapi_PBlock *pb) + slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, ldap_result_code ? &ldap_result_code : &retval); + } + slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); ++ ++ /* Revert the caches if this is the parent operation */ ++ if (parent_op) { ++ revert_cache(inst, &parent_time); ++ } + goto error_return; + } + retval = plugin_call_mmr_plugin_postop(pb, NULL,SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN); +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c +index e4d0337d4..73e50ebcc 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c +@@ -97,6 +97,8 @@ ldbm_back_modrdn(Slapi_PBlock *pb) + int op_id; + int result_sent = 0; + Connection *pb_conn = NULL; ++ int32_t parent_op = 0; ++ struct timespec parent_time; + + if (slapi_pblock_get(pb, SLAPI_CONN_ID, &conn_id) < 0) { + conn_id = 0; /* connection is NULL */ +@@ -134,6 +136,13 @@ ldbm_back_modrdn(Slapi_PBlock *pb) + + /* dblayer_txn_init needs to be called before "goto error_return" */ + dblayer_txn_init(li, &txn); ++ ++ if (txn.back_txn_txn == NULL) { ++ /* This is the parent operation, get the time */ ++ parent_op = 1; ++ parent_time = slapi_current_rel_time_hr(); ++ } ++ + /* the calls to perform searches require the parent txn if any + so set txn to the parent_txn until we begin the child transaction */ + if (parent_txn) { +@@ -1208,6 +1217,11 @@ ldbm_back_modrdn(Slapi_PBlock *pb) + slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, ldap_result_code ? &ldap_result_code : &retval); + } + slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); ++ ++ /* Revert the caches if this is the parent operation */ ++ if (parent_op) { ++ revert_cache(inst, &parent_time); ++ } + goto error_return; + } + retval = plugin_call_mmr_plugin_postop(pb, NULL,SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN); +@@ -1353,8 +1367,13 @@ error_return: + slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, ldap_result_code ? &ldap_result_code : &retval); + } + slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); ++ ++ /* Revert the caches if this is the parent operation */ ++ if (parent_op) { ++ revert_cache(inst, &parent_time); ++ } + } +- retval = plugin_call_mmr_plugin_postop(pb, NULL,SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN); ++ retval = plugin_call_mmr_plugin_postop(pb, NULL,SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN); + + /* Release SERIAL LOCK */ + dblayer_txn_abort(be, &txn); /* abort crashes in case disk full */ +@@ -1411,17 +1430,6 @@ common_return: + "operation failed, the target entry is cleared from dncache (%s)\n", slapi_entry_get_dn(ec->ep_entry)); + CACHE_REMOVE(&inst->inst_dncache, bdn); + CACHE_RETURN(&inst->inst_dncache, &bdn); +- /* +- * If the new/invalid entry (ec) is in the cache, that means we need to +- * swap it out with the original entry (e) --> to undo the swap that +- * modrdn_rename_entry_update_indexes() did. +- */ +- if (cache_is_in_cache(&inst->inst_cache, ec)) { +- if (cache_replace(&inst->inst_cache, ec, e) != 0) { +- slapi_log_err(SLAPI_LOG_ALERT, "ldbm_back_modrdn", +- "failed to replace cache entry after error\n"); +- } +- } + } + + if (ec && inst) { +diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h +index b56f6ef26..e68765bd4 100644 +--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h ++++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h +@@ -55,6 +55,7 @@ void cache_unlock_entry(struct cache *cache, struct backentry *e); + int cache_replace(struct cache *cache, void *oldptr, void *newptr); + int cache_has_otherref(struct cache *cache, void *bep); + int cache_is_in_cache(struct cache *cache, void *ptr); ++void revert_cache(ldbm_instance *inst, struct timespec *start_time); + + #ifdef CACHE_DEBUG + void check_entry_cache(struct cache *cache, struct backentry *e); +diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h +index 54c195eef..0bc3a6fab 100644 +--- a/ldap/servers/slapd/slapi-plugin.h ++++ b/ldap/servers/slapd/slapi-plugin.h +@@ -6765,6 +6765,12 @@ time_t slapi_current_time(void) __attribute__((deprecated)); + * \return timespec of the current relative system time. + */ + struct timespec slapi_current_time_hr(void); ++/** ++ * Returns the current system time as a hr clock ++ * ++ * \return timespec of the current monotonic time. ++ */ ++struct timespec slapi_current_rel_time_hr(void); + /** + * Returns the current system time as a hr clock in UTC timezone. + * This clock adjusts with ntp steps, and should NOT be +-- +2.17.2 + diff --git a/SOURCES/0007-Ticket-49932-Crash-in-delete_passwdPolicy-when-persi.patch b/SOURCES/0007-Ticket-49932-Crash-in-delete_passwdPolicy-when-persi.patch deleted file mode 100644 index 68427ae..0000000 --- a/SOURCES/0007-Ticket-49932-Crash-in-delete_passwdPolicy-when-persi.patch +++ /dev/null @@ -1,39 +0,0 @@ -From de03e7456108de3f3d28c6a5d33926525b70557f Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Thu, 30 Aug 2018 14:28:10 -0400 -Subject: [PATCH] Ticket 49932 - Crash in delete_passwdPolicy when persistent - search connections are terminated unexpectedly - -Bug Description: We clone a pblock in a psearch search, and under certain - error conditions this pblock is freed, but it frees the - password policy struct which can lead to a double free - when the original pblock is destroyed. - -Fix Description: During the cloning, set the pwppolicy struct to NULL - so the clone allocates its own policy if needed - -https://pagure.io/389-ds-base/issue/49932 - -Reviewed by: ? - -(cherry picked from commit 78fc627accacfa4061ce48977e22301f81ea8d73) ---- - ldap/servers/slapd/pblock.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c -index 4514c3ce6..bc18a7b18 100644 ---- a/ldap/servers/slapd/pblock.c -+++ b/ldap/servers/slapd/pblock.c -@@ -322,6 +322,8 @@ slapi_pblock_clone(Slapi_PBlock *pb) - if (pb->pb_intop != NULL) { - _pblock_assert_pb_intop(new_pb); - *(new_pb->pb_intop) = *(pb->pb_intop); -+ /* set pwdpolicy to NULL so this clone allocates its own policy */ -+ new_pb->pb_intop->pwdpolicy = NULL; - } - if (pb->pb_intplugin != NULL) { - _pblock_assert_pb_intplugin(new_pb); --- -2.17.1 - diff --git a/SOURCES/0007-Ticket-50077-Do-not-automatically-turn-automember-po.patch b/SOURCES/0007-Ticket-50077-Do-not-automatically-turn-automember-po.patch new file mode 100644 index 0000000..46ad200 --- /dev/null +++ b/SOURCES/0007-Ticket-50077-Do-not-automatically-turn-automember-po.patch @@ -0,0 +1,55 @@ +From addb07130e93bf8acb32178190451ba7cc9cc888 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 12 Mar 2019 16:03:29 -0400 +Subject: [PATCH] Ticket 50077 - Do not automatically turn automember postop + modifies on + +Description: Although we have set the new postop processing on by + default in the template-dse.ldif, we do not want to + enable it by default for upgrades (only new installs). + + So if the attribute is not set, it is assumed "off". + +https://pagure.io/389-ds-base/issue/50077 + +Reviewed by: firstyear(Thanks!) + +(cherry picked from commit d318d060f49b67ed1b10f22b52f98e038afa356a) +--- + ldap/servers/plugins/automember/automember.c | 7 +++---- + 1 file changed, 3 insertions(+), 4 deletions(-) + +diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c +index abd6df805..bb6ff1f8e 100644 +--- a/ldap/servers/plugins/automember/automember.c ++++ b/ldap/servers/plugins/automember/automember.c +@@ -90,7 +90,7 @@ static void automember_task_export_destructor(Slapi_Task *task); + static void automember_task_map_destructor(Slapi_Task *task); + + #define DEFAULT_FILE_MODE PR_IRUSR | PR_IWUSR +-static uint64_t plugin_do_modify = 1; ++static uint64_t plugin_do_modify = 0; + static uint64_t plugin_is_betxn = 0; + + /* +@@ -345,15 +345,14 @@ automember_start(Slapi_PBlock *pb) + } + + /* Check and set if we should process modify operations */ +- plugin_do_modify = 1; /* default is "on" */ + if ((slapi_pblock_get(pb, SLAPI_ADD_ENTRY, &plugin_entry) == 0) && plugin_entry){ + if ((do_modify = slapi_fetch_attr(plugin_entry, AUTOMEMBER_DO_MODIFY, NULL)) ) { + if (strcasecmp(do_modify, "on") && strcasecmp(do_modify, "off")) { + slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM, + "automember_start - %s: invalid value \"%s\". Valid values are \"on\" or \"off\". Using default of \"on\"\n", + AUTOMEMBER_DO_MODIFY, do_modify); +- } else if (strcasecmp(do_modify, "off") == 0 ){ +- plugin_do_modify = 0; ++ } else if (strcasecmp(do_modify, "on") == 0 ){ ++ plugin_do_modify = 1; + } + } + } +-- +2.17.2 + diff --git a/SOURCES/0008-Bug-1624004-potential-denial-of-service-attack.patch b/SOURCES/0008-Bug-1624004-potential-denial-of-service-attack.patch deleted file mode 100644 index aafb6eb..0000000 --- a/SOURCES/0008-Bug-1624004-potential-denial-of-service-attack.patch +++ /dev/null @@ -1,99 +0,0 @@ -From ab7848a4a30d79c7433a1689ba1ea18897b73453 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Tue, 18 Sep 2018 16:39:26 -0400 -Subject: [PATCH] Bug 1624004 - potential denial of service attack - -Bug: a search request passing 8MB of NULL bytes as search attributes will - keep one thread busy for a long time. - The reason is that the attr array is copied/normalized to the searchattrs in - the search operation and does this using charray_add() which iterates thru - the array to determine the size of the array and then allocate one element more. - so this means we iterat 8 million times an array with a then average size of - 4 million elements. - -Fix: We already have traversed the array once and know the size, so we can allocate - the needed size once and only copy the element. - In addition we check for the kind of degenerated attributes "" as used in this - test scenario. - So the fix will reject invalid attr liste and improve performance for valid ones - -https://bugzilla.redhat.com/show_bug.cgi?id=1624004 ---- - ldap/servers/slapd/search.c | 16 ++++++++++++++-- - ldap/servers/slapd/unbind.c | 4 ++-- - 2 files changed, 16 insertions(+), 4 deletions(-) - -diff --git a/ldap/servers/slapd/search.c b/ldap/servers/slapd/search.c -index 731c6519e..dc26fc4d2 100644 ---- a/ldap/servers/slapd/search.c -+++ b/ldap/servers/slapd/search.c -@@ -209,6 +209,7 @@ do_search(Slapi_PBlock *pb) - if (attrs != NULL) { - char *normaci = slapi_attr_syntax_normalize("aci"); - int replace_aci = 0; -+ int attr_count = 0; - if (!normaci) { - normaci = slapi_ch_strdup("aci"); - } else if (strcasecmp(normaci, "aci")) { -@@ -218,9 +219,19 @@ do_search(Slapi_PBlock *pb) - /* - * . store gerattrs if any - * . add "aci" once if "*" is given -+ * . check that attrs are not degenerated - */ - for (i = 0; attrs[i] != NULL; i++) { - char *p = NULL; -+ attr_count++; -+ -+ if ( attrs[i][0] == '\0') { -+ log_search_access(pb, base, scope, fstr, "invalid attribute request"); -+ send_ldap_result(pb, LDAP_PROTOCOL_ERROR, NULL, NULL, 0, NULL); -+ slapi_ch_free_string(&normaci); -+ goto free_and_return; -+ } -+ - /* check if @ is included */ - p = strchr(attrs[i], '@'); - if (p) { -@@ -244,6 +255,7 @@ do_search(Slapi_PBlock *pb) - } else if (strcmp(attrs[i], LDAP_ALL_USER_ATTRS /* '*' */) == 0) { - if (!charray_inlist(attrs, normaci)) { - charray_add(&attrs, slapi_ch_strdup(normaci)); -+ attr_count++; - } - } else if (replace_aci && (strcasecmp(attrs[i], "aci") == 0)) { - slapi_ch_free_string(&attrs[i]); -@@ -263,13 +275,13 @@ do_search(Slapi_PBlock *pb) - } - } else { - /* return the chopped type, e.g., "sn" */ -- operation->o_searchattrs = NULL; -+ operation->o_searchattrs = (char **)slapi_ch_calloc(sizeof(char *), attr_count+1); - for (i = 0; attrs[i] != NULL; i++) { - char *type; - type = slapi_attr_syntax_normalize_ext(attrs[i], - ATTR_SYNTAX_NORM_ORIG_ATTR); - /* attrs[i] is consumed */ -- charray_add(&operation->o_searchattrs, attrs[i]); -+ operation->o_searchattrs[i] = attrs[i]; - attrs[i] = type; - } - } -diff --git a/ldap/servers/slapd/unbind.c b/ldap/servers/slapd/unbind.c -index 90f7b1546..686e27a8e 100644 ---- a/ldap/servers/slapd/unbind.c -+++ b/ldap/servers/slapd/unbind.c -@@ -87,8 +87,8 @@ do_unbind(Slapi_PBlock *pb) - /* pass the unbind to all backends */ - be_unbindall(pb_conn, operation); - -+free_and_return:; -+ - /* close the connection to the client */ - disconnect_server(pb_conn, operation->o_connid, operation->o_opid, SLAPD_DISCONNECT_UNBIND, 0); -- --free_and_return:; - } --- -2.17.1 - diff --git a/SOURCES/0008-Ticket-50282-OPERATIONS-ERROR-when-trying-to-delete-.patch b/SOURCES/0008-Ticket-50282-OPERATIONS-ERROR-when-trying-to-delete-.patch new file mode 100644 index 0000000..f75b8d7 --- /dev/null +++ b/SOURCES/0008-Ticket-50282-OPERATIONS-ERROR-when-trying-to-delete-.patch @@ -0,0 +1,69 @@ +From 57f661a8acea18aa19985d0556a78d81a9361b89 Mon Sep 17 00:00:00 2001 +From: Thierry Bordaz +Date: Thu, 14 Mar 2019 17:33:35 +0100 +Subject: [PATCH 1/4] Ticket 50282 - OPERATIONS ERROR when trying to delete a + group with automember members + +Bug Description: + When automember and memberof are enabled, if a user is member of a group + because of an automember rule. Then when the group is deleted, + memberof updates the member (to update 'memberof' attribute) that + trigger automember to reevaluate the automember rule and add the member + to the group. But at this time the group is already deleted. + Chaining back the failure up to the top level operation the deletion + of the group fails + +Fix Description: + The fix consists to check that if a automember rule tries to add a user + in a group, then to check that the group exists before updating it. + +https://pagure.io/389-ds-base/issue/50282 + +Reviewed by: Mark Reynolds, William Brown + +Platforms tested: F29 + +Flag Day: no + +Doc impact: no +--- + ldap/servers/plugins/automember/automember.c | 23 ++++++++++++++++++++ + 1 file changed, 23 insertions(+) + +diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c +index bb6ff1f8e..fcf0cdb9a 100644 +--- a/ldap/servers/plugins/automember/automember.c ++++ b/ldap/servers/plugins/automember/automember.c +@@ -1636,6 +1636,29 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char + char *member_value = NULL; + int freeit = 0; + int rc = 0; ++ Slapi_DN *group_sdn; ++ Slapi_Entry *group_entry = NULL; ++ ++ /* First thing check that the group still exists */ ++ group_sdn = slapi_sdn_new_dn_byval(group_dn); ++ rc = slapi_search_internal_get_entry(group_sdn, NULL, &group_entry, automember_get_plugin_id()); ++ slapi_sdn_free(&group_sdn); ++ if (rc != LDAP_SUCCESS || group_entry == NULL) { ++ if (rc == LDAP_NO_SUCH_OBJECT) { ++ /* the automember group (default or target) does not exist, just skip this definition */ ++ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM, ++ "automember_update_member_value - group (default or target) does not exist (%s)\n", ++ group_dn); ++ rc = 0; ++ } else { ++ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM, ++ "automember_update_member_value - group (default or target) can not be retrieved (%s) err=%d\n", ++ group_dn, rc); ++ } ++ slapi_entry_free(group_entry); ++ return rc; ++ } ++ slapi_entry_free(group_entry); + + /* If grouping_value is dn, we need to fetch the dn instead. */ + if (slapi_attr_type_cmp(grouping_value, "dn", SLAPI_TYPE_CMP_EXACT) == 0) { +-- +2.17.2 + diff --git a/SOURCES/0009-Bug-1624004-fix-regression-in-empty-attribute-list.patch b/SOURCES/0009-Bug-1624004-fix-regression-in-empty-attribute-list.patch deleted file mode 100644 index 49fe251..0000000 --- a/SOURCES/0009-Bug-1624004-fix-regression-in-empty-attribute-list.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 55e961338810d89a6f45f31f27b3fd609535b1da Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Wed, 19 Sep 2018 09:26:59 -0400 -Subject: [PATCH] Bug 1624004 - fix regression in empty attribute list - -https://bugzilla.redhat.com/show_bug.cgi?id=1624004 ---- - ldap/servers/slapd/search.c | 12 ++++++++---- - 1 file changed, 8 insertions(+), 4 deletions(-) - -diff --git a/ldap/servers/slapd/search.c b/ldap/servers/slapd/search.c -index dc26fc4d2..7e253f535 100644 ---- a/ldap/servers/slapd/search.c -+++ b/ldap/servers/slapd/search.c -@@ -210,6 +210,7 @@ do_search(Slapi_PBlock *pb) - char *normaci = slapi_attr_syntax_normalize("aci"); - int replace_aci = 0; - int attr_count = 0; -+ int empty_attrs = 0; - if (!normaci) { - normaci = slapi_ch_strdup("aci"); - } else if (strcasecmp(normaci, "aci")) { -@@ -226,10 +227,13 @@ do_search(Slapi_PBlock *pb) - attr_count++; - - if ( attrs[i][0] == '\0') { -- log_search_access(pb, base, scope, fstr, "invalid attribute request"); -- send_ldap_result(pb, LDAP_PROTOCOL_ERROR, NULL, NULL, 0, NULL); -- slapi_ch_free_string(&normaci); -- goto free_and_return; -+ empty_attrs++; -+ if (empty_attrs > 1) { -+ log_search_access(pb, base, scope, fstr, "invalid attribute request"); -+ send_ldap_result(pb, LDAP_PROTOCOL_ERROR, NULL, NULL, 0, NULL); -+ slapi_ch_free_string(&normaci); -+ goto free_and_return; -+ } - } - - /* check if @ is included */ --- -2.17.1 - diff --git a/SOURCES/0009-Ticket-49561-MEP-plugin-upon-direct-op-failure-will-.patch b/SOURCES/0009-Ticket-49561-MEP-plugin-upon-direct-op-failure-will-.patch new file mode 100644 index 0000000..88ec73c --- /dev/null +++ b/SOURCES/0009-Ticket-49561-MEP-plugin-upon-direct-op-failure-will-.patch @@ -0,0 +1,64 @@ +From 99802f5290466474ca2f1fdab0bf077ec736a013 Mon Sep 17 00:00:00 2001 +From: Thierry Bordaz +Date: Mon, 18 Mar 2019 13:48:03 +0100 +Subject: [PATCH 2/4] Ticket 49561 - MEP plugin, upon direct op failure, will + delete twice the same managed entry + +Bug Description: + When a failure occurs during betxn_post plugin callback, the betxn_post plugins are called again. + This is to process some kind of undo action (for example usn or dna that manage counters). + + If MEP plugin is called for a managing entry, it deletes the managed entry (that become a tombstone). + If later an other betxn_postop fails, then MEP is called again. + But as it does not detect the operation failure (for DEL and ADD), then it tries again + to delete the managed entry that is already a tombstone. + +Fix Description: + The MEP betxn_post plugin callbacks (ADD and DEL) should catch the operation failure + and return. + It is already in place for MODRDN and MOD. + +https://pagure.io/389-ds-base/issue/49561 + +Reviewed by: Mark Reynold, thanks !! + +Platforms tested: F28 + +Flag Day: no + +Doc impact: no +--- + ldap/servers/plugins/mep/mep.c | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +diff --git a/ldap/servers/plugins/mep/mep.c b/ldap/servers/plugins/mep/mep.c +index 7f30f412d..a7b60e129 100644 +--- a/ldap/servers/plugins/mep/mep.c ++++ b/ldap/servers/plugins/mep/mep.c +@@ -2471,6 +2471,11 @@ mep_add_post_op(Slapi_PBlock *pb) + slapi_log_err(SLAPI_LOG_TRACE, MEP_PLUGIN_SUBSYSTEM, + "--> mep_add_post_op\n"); + ++ /* Just bail if we aren't ready to service requests yet. */ ++ if (!mep_oktodo(pb)) { ++ return SLAPI_PLUGIN_SUCCESS; ++ } ++ + /* Reload config if a config entry was added. */ + if ((sdn = mep_get_sdn(pb))) { + if (mep_dn_is_config(sdn)) { +@@ -2543,6 +2548,11 @@ mep_del_post_op(Slapi_PBlock *pb) + slapi_log_err(SLAPI_LOG_TRACE, MEP_PLUGIN_SUBSYSTEM, + "--> mep_del_post_op\n"); + ++ /* Just bail if we aren't ready to service requests yet. */ ++ if (!mep_oktodo(pb)) { ++ return SLAPI_PLUGIN_SUCCESS; ++ } ++ + /* Reload config if a config entry was deleted. */ + if ((sdn = mep_get_sdn(pb))) { + if (mep_dn_is_config(sdn)) +-- +2.17.2 + diff --git a/SOURCES/0010-Ticket-49968-Confusing-CRITICAL-message-list_candida.patch b/SOURCES/0010-Ticket-49968-Confusing-CRITICAL-message-list_candida.patch deleted file mode 100644 index 1ad23eb..0000000 --- a/SOURCES/0010-Ticket-49968-Confusing-CRITICAL-message-list_candida.patch +++ /dev/null @@ -1,266 +0,0 @@ -From 3796e26e93991ded631ac57053049e9aad44c53b Mon Sep 17 00:00:00 2001 -From: Thierry Bordaz -Date: Wed, 10 Oct 2018 15:35:12 +0200 -Subject: [PATCH] Ticket 49968 - Confusing CRITICAL message: list_candidates - - NULL idl was recieved from filter_candidates_ext - -Bug Description: - When a filter component is indexed but returns an empty IDL - an alarming message is logged although it is normal. - -Fix Description: - Remove the alarming message - -https://pagure.io/389-ds-base/issue/49968 - -Reviewed by: Mark Reynolds - -Platforms tested: F27 + testcase - -Flag Day: no - -Doc impact: no ---- - dirsrvtests/tests/suites/basic/basic_test.py | 202 +++++++++++++++++++ - ldap/servers/slapd/back-ldbm/filterindex.c | 10 +- - 2 files changed, 204 insertions(+), 8 deletions(-) - -diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py -index 45988dc7a..dc366cd67 100644 ---- a/dirsrvtests/tests/suites/basic/basic_test.py -+++ b/dirsrvtests/tests/suites/basic/basic_test.py -@@ -868,6 +868,208 @@ adds nsslapd-return-default-opattr attr with value of one operation attribute. - log.fatal('Search failed, error: ' + e.message['desc']) - assert False - -+ -+@pytest.fixture(scope="module") -+def create_users(topology_st): -+ """Add users to the default suffix -+ """ -+ -+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) -+ user_names = ["Directory", "Server", "389", "lib389", "pytest"] -+ -+ log.info('Adding 5 test users') -+ for name in user_names: -+ user = users.create(properties={ -+ 'uid': name, -+ 'sn': name, -+ 'cn': name, -+ 'uidNumber': '1000', -+ 'gidNumber': '1000', -+ 'homeDirectory': '/home/%s' % name, -+ 'mail': '%s@example.com' % name, -+ 'userpassword': 'pass%s' % name, -+ }) -+ -+ -+def test_basic_anonymous_search(topology_st, create_users): -+ """Tests basic anonymous search operations -+ -+ :id: c7831e04-f458-4e50-83c7-b6f77109f639 -+ :setup: Standalone instance -+ Add 5 test users with different user names -+ :steps: -+ 1. Execute anonymous search with different filters -+ :expectedresults: -+ 1. Search should be successful -+ """ -+ -+ filters = ["uid=Directory", "(|(uid=S*)(uid=3*))", "(&(uid=l*)(mail=l*))", "(&(!(uid=D*))(ou=People))"] -+ log.info("Execute anonymous search with different filters") -+ for filtr in filters: -+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filtr) -+ assert len(entries) != 0 -+ -+ -+@pytest.mark.ds604 -+@pytest.mark.bz915801 -+def test_search_original_type(topology_st, create_users): -+ """Test ldapsearch returning original attributes -+ using nsslapd-search-return-original-type-switch -+ -+ :id: d7831d04-f558-4e50-93c7-b6f77109f640 -+ :setup: Standalone instance -+ Add some test entries -+ :steps: -+ 1. Set nsslapd-search-return-original-type-switch to ON -+ 2. Check that ldapsearch *does* return unknown attributes -+ 3. Turn off nsslapd-search-return-original-type-switch -+ 4. Check that ldapsearch doesn't return any unknown attributes -+ :expectedresults: -+ 1. nsslapd-search-return-original-type-switch should be set to ON -+ 2. ldapsearch should return unknown attributes -+ 3. nsslapd-search-return-original-type-switch should be OFF -+ 4. ldapsearch should not return any unknown attributes -+ """ -+ -+ log.info("Set nsslapd-search-return-original-type-switch to ON") -+ topology_st.standalone.config.set('nsslapd-search-return-original-type-switch', 'on') -+ -+ log.info("Check that ldapsearch *does* return unknown attributes") -+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'uid=Directory', -+ ['objectclass overflow', 'unknown']) -+ assert "objectclass overflow" in entries[0].getAttrs() -+ -+ log.info("Set nsslapd-search-return-original-type-switch to Off") -+ topology_st.standalone.config.set('nsslapd-search-return-original-type-switch', 'off') -+ log.info("Check that ldapsearch *does not* return unknown attributes") -+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'uid=Directory', -+ ['objectclass overflow', 'unknown']) -+ assert "objectclass overflow" not in entries[0].getAttrs() -+ -+ -+@pytest.mark.bz192901 -+def test_search_ou(topology_st): -+ """Test that DS should not return an entry that does not match the filter -+ -+ :id: d7831d05-f117-4e89-93c7-b6f77109f640 -+ :setup: Standalone instance -+ :steps: -+ 1. Create an OU entry without sub entries -+ 2. Search from the OU with the filter that does not match the OU -+ :expectedresults: -+ 1. Creation of OU should be successful -+ 2. Search should not return any results -+ """ -+ -+ log.info("Create a test OU without sub entries") -+ ou = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) -+ ou.create(properties={ -+ 'ou': 'test_ou', -+ }) -+ -+ search_base = ("ou=test_ou,%s" % DEFAULT_SUFFIX) -+ log.info("Search from the OU with the filter that does not match the OU, it should not return anything") -+ entries = topology_st.standalone.search_s(search_base, ldap.SCOPE_SUBTREE, 'uid=*', ['dn']) -+ assert len(entries) == 0 -+ -+ -+@pytest.mark.bz1044135 -+@pytest.mark.ds47319 -+def test_connection_buffer_size(topology_st): -+ """Test connection buffer size adjustable with different values(valid values and invalid) -+ -+ :id: e7831d05-f117-4ec9-1203-b6f77109f117 -+ :setup: Standalone instance -+ :steps: -+ 1. Set nsslapd-connection-buffer to some valid values (2, 0 , 1) -+ 2. Set nsslapd-connection-buffer to some invalid values (-1, a) -+ :expectedresults: -+ 1. This should pass -+ 2. This should fail -+ """ -+ -+ valid_values = ['2', '0', '1'] -+ for value in valid_values: -+ topology_st.standalone.config.replace('nsslapd-connection-buffer', value) -+ -+ invalid_values = ['-1', 'a'] -+ for value in invalid_values: -+ with pytest.raises(ldap.OPERATIONS_ERROR): -+ topology_st.standalone.config.replace('nsslapd-connection-buffer', value) -+ -+@pytest.mark.bz1637439 -+def test_critical_msg_on_empty_range_idl(topology_st): -+ """Doing a range index lookup should not report a critical message even if IDL is empty -+ -+ :id: a07a2222-0551-44a6-b113-401d23799364 -+ :setup: Standalone instance -+ :steps: -+ 1. Create an index for internationalISDNNumber. (attribute chosen because it is -+ unlikely that previous tests used it) -+ 2. telephoneNumber being indexed by default create 20 users without telephoneNumber -+ 3. add a telephoneNumber value and delete it to trigger an empty index database -+ 4. Do a search that triggers a range lookup on empty telephoneNumber -+ 5. Check that the critical message is not logged in error logs -+ :expectedresults: -+ 1. This should pass -+ 2. This should pass -+ 3. This should pass -+ 4. This should pass on normal build but could abort a debug build -+ 4. This should pass -+ """ -+ indexedAttr = 'internationalISDNNumber' -+ -+ # Step 1 -+ from lib389.index import Indexes -+ -+ indexes = Indexes(topology_st.standalone) -+ indexes.create(properties={ -+ 'cn': indexedAttr, -+ 'nsSystemIndex': 'false', -+ 'nsIndexType': 'eq' -+ }) -+ topology_st.standalone.restart() -+ -+ # Step 2 -+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) -+ log.info('Adding 20 users without "%s"' % indexedAttr) -+ for i in range(20): -+ name = 'user_%d' % i -+ last_user = users.create(properties={ -+ 'uid': name, -+ 'sn': name, -+ 'cn': name, -+ 'uidNumber': '1000', -+ 'gidNumber': '1000', -+ 'homeDirectory': '/home/%s' % name, -+ 'mail': '%s@example.com' % name, -+ 'userpassword': 'pass%s' % name, -+ }) -+ -+ # Step 3 -+ # required update to create the indexAttr (i.e. 'loginShell') database, and then make it empty -+ topology_st.standalone.modify_s(last_user.dn, [(ldap.MOD_ADD, indexedAttr, b'1234')]) -+ ent = topology_st.standalone.getEntry(last_user.dn, ldap.SCOPE_BASE,) -+ assert ent -+ assert ent.hasAttr(indexedAttr) -+ topology_st.standalone.modify_s(last_user.dn, [(ldap.MOD_DELETE, indexedAttr, None)]) -+ ent = topology_st.standalone.getEntry(last_user.dn, ldap.SCOPE_BASE,) -+ assert ent -+ assert not ent.hasAttr(indexedAttr) -+ -+ # Step 4 -+ # The first component being not indexed the range on second is evaluated -+ try: -+ ents = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(&(sudoNotAfter=*)(%s>=111))' % indexedAttr) -+ assert len(ents) == 0 -+ except ldap.SERVER_DOWN: -+ log.error('Likely testing against a debug version that asserted') -+ pass -+ -+ # Step 5 -+ assert not topology_st.standalone.searchErrorsLog('CRIT - list_candidates - NULL idl was recieved from filter_candidates_ext.') -+ -+ - if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode -diff --git a/ldap/servers/slapd/back-ldbm/filterindex.c b/ldap/servers/slapd/back-ldbm/filterindex.c -index 6d36ba33e..3ef04f884 100644 ---- a/ldap/servers/slapd/back-ldbm/filterindex.c -+++ b/ldap/servers/slapd/back-ldbm/filterindex.c -@@ -803,16 +803,10 @@ list_candidates( - } - - /* -- * Assert we recieved a valid idl. If it was NULL, it means somewhere we failed -- * during the dblayer interactions. -- * -- * idl_set requires a valid idl structure to generate the linked list of -- * idls that we insert. -+ * The IDL for that component is NULL, so no candidate retrieved from that component. This is all normal -+ * Just build a idl with an empty set - */ - if (tmp == NULL) { -- slapi_log_err(SLAPI_LOG_CRIT, "list_candidates", "NULL idl was recieved from filter_candidates_ext."); -- slapi_log_err(SLAPI_LOG_CRIT, "list_candidates", "Falling back to empty IDL set. This may affect your search results."); -- PR_ASSERT(tmp); - tmp = idl_alloc(0); - } - --- -2.17.2 - diff --git a/SOURCES/0010-Ticket-50260-Invalid-cache-flushing-improvements.patch b/SOURCES/0010-Ticket-50260-Invalid-cache-flushing-improvements.patch new file mode 100644 index 0000000..969ccb0 --- /dev/null +++ b/SOURCES/0010-Ticket-50260-Invalid-cache-flushing-improvements.patch @@ -0,0 +1,290 @@ +From e07605531f978f6767c9b1cc947c0012ff6c83e3 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Sun, 17 Mar 2019 13:09:07 -0400 +Subject: [PATCH 3/4] Ticket 50260 - Invalid cache flushing improvements + +Description: The original version of the fix only checked if backend + transaction "post" operation plugins failed, but it did + not check for errors from the backend transaction "pre" + operation plugin. To address this we flush invalid + entries whenever any error occurs. + + We were also not flushing invalid cache entries when + modrdn errors occurred. Modrdns only make changes to + the DN hashtable inside the entry cache, but we were only + checking the ID hashtable. So we also need to check the + DN hashtable in the entry cache for invalid entries. + +https://pagure.io/389-ds-base/issue/50260 + +Reviewed by: firstyear & tbordaz(Thanks!!) + +(cherry picked from commit 33fbced25277b88695bfba7262e606380e9d891f) +--- + dirsrvtests/tests/suites/betxns/betxn_test.py | 23 ++++++---- + ldap/servers/slapd/back-ldbm/cache.c | 42 ++++++++++++++++++- + ldap/servers/slapd/back-ldbm/ldbm_add.c | 9 ++-- + ldap/servers/slapd/back-ldbm/ldbm_delete.c | 11 ++--- + ldap/servers/slapd/back-ldbm/ldbm_modify.c | 10 ++--- + ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 10 ++--- + 6 files changed, 74 insertions(+), 31 deletions(-) + +diff --git a/dirsrvtests/tests/suites/betxns/betxn_test.py b/dirsrvtests/tests/suites/betxns/betxn_test.py +index f03fb93cc..84f5e2087 100644 +--- a/dirsrvtests/tests/suites/betxns/betxn_test.py ++++ b/dirsrvtests/tests/suites/betxns/betxn_test.py +@@ -86,9 +86,7 @@ def test_betxt_7bit(topology_st, dynamic_plugins): + log.fatal('Error while searching for test entry: ' + e.message['desc']) + assert False + +- # + # Cleanup - remove the user +- # + try: + topology_st.standalone.delete_s(USER_DN) + except ldap.LDAPError as e: +@@ -241,14 +239,15 @@ def test_betxn_memberof(topology_st, dynamic_plugins): + except ldap.LDAPError as e: + log.info('test_betxn_memberof: Group2 was correctly rejected (mod add): error ' + e.message['desc']) + +- # ++ # verify entry cache reflects the current/correct state of group1 ++ assert not group1.is_member(group2.dn) ++ + # Done +- # + log.info('test_betxn_memberof: PASSED') + + + def test_betxn_modrdn_memberof_cache_corruption(topology_st): +- """Test modrdn operations and memberOf ++ """Test modrdn operations and memberOf be txn post op failures + + :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5994 + +@@ -297,9 +296,7 @@ def test_betxn_modrdn_memberof_cache_corruption(topology_st): + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + group.rename('cn=group_to_people', newsuperior=peoplebase) + +- # + # Done +- # + log.info('test_betxn_modrdn_memberof: PASSED') + + +@@ -374,15 +371,23 @@ def test_ri_and_mep_cache_corruption(topology_st): + log.fatal("MEP group was not created for the user") + assert False + ++ # Test MEP be txn pre op failure does not corrupt entry cache ++ # Should get the same exception for both rename attempts ++ with pytest.raises(ldap.UNWILLING_TO_PERFORM): ++ mep_group.rename("cn=modrdn group") ++ ++ with pytest.raises(ldap.UNWILLING_TO_PERFORM): ++ mep_group.rename("cn=modrdn group") ++ + # Mess with MEP so it fails + mep_plugin.disable() + mep_group.delete() + mep_plugin.enable() + +- # Add another group for verify entry cache is not corrupted ++ # Add another group to verify entry cache is not corrupted + test_group = groups.create(properties={'cn': 'test_group'}) + +- # Delete user, should fail, and user should still be a member ++ # Delete user, should fail in MEP be txn post op, and user should still be a member + with pytest.raises(ldap.NO_SUCH_OBJECT): + user.delete() + +diff --git a/ldap/servers/slapd/back-ldbm/cache.c b/ldap/servers/slapd/back-ldbm/cache.c +index 458d7912f..02453abac 100644 +--- a/ldap/servers/slapd/back-ldbm/cache.c ++++ b/ldap/servers/slapd/back-ldbm/cache.c +@@ -517,7 +517,8 @@ flush_remove_entry(struct timespec *entry_time, struct timespec *start_time) + /* + * Flush all the cache entries that were added after the "start time" + * This is called when a backend transaction plugin fails, and we need +- * to remove all the possible invalid entries in the cache. ++ * to remove all the possible invalid entries in the cache. We need ++ * to check both the ID and DN hashtables when checking the entry cache. + * + * If the ref count is 0, we can straight up remove it from the cache, but + * if the ref count is greater than 1, then the entry is currently in use. +@@ -528,8 +529,8 @@ flush_remove_entry(struct timespec *entry_time, struct timespec *start_time) + static void + flush_hash(struct cache *cache, struct timespec *start_time, int32_t type) + { ++ Hashtable *ht = cache->c_idtable; /* start with the ID table as it's in both ENTRY and DN caches */ + void *e, *laste = NULL; +- Hashtable *ht = cache->c_idtable; + + cache_lock(cache); + +@@ -570,6 +571,43 @@ flush_hash(struct cache *cache, struct timespec *start_time, int32_t type) + } + } + ++ if (type == ENTRY_CACHE) { ++ /* Also check the DN hashtable */ ++ ht = cache->c_dntable; ++ ++ for (size_t i = 0; i < ht->size; i++) { ++ e = ht->slot[i]; ++ while (e) { ++ struct backcommon *entry = (struct backcommon *)e; ++ uint64_t remove_it = 0; ++ if (flush_remove_entry(&entry->ep_create_time, start_time)) { ++ /* Mark the entry to be removed */ ++ slapi_log_err(SLAPI_LOG_CACHE, "flush_hash", "[ENTRY CACHE] Removing entry id (%d)\n", ++ entry->ep_id); ++ remove_it = 1; ++ } ++ laste = e; ++ e = HASH_NEXT(ht, e); ++ ++ if (remove_it) { ++ /* since we have the cache lock we know we can trust refcnt */ ++ entry->ep_state |= ENTRY_STATE_INVALID; ++ if (entry->ep_refcnt == 0) { ++ entry->ep_refcnt++; ++ lru_delete(cache, laste); ++ entrycache_remove_int(cache, laste); ++ entrycache_return(cache, (struct backentry **)&laste); ++ } else { ++ /* Entry flagged for removal */ ++ slapi_log_err(SLAPI_LOG_CACHE, "flush_hash", ++ "[ENTRY CACHE] Flagging entry to be removed later: id (%d) refcnt: %d\n", ++ entry->ep_id, entry->ep_refcnt); ++ } ++ } ++ } ++ } ++ } ++ + cache_unlock(cache); + } + +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c +index aa5b59aea..264f0ceea 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c +@@ -1221,11 +1221,6 @@ ldbm_back_add(Slapi_PBlock *pb) + slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, ldap_result_code ? &ldap_result_code : &retval); + } + slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); +- +- /* Revert the caches if this is the parent operation */ +- if (parent_op) { +- revert_cache(inst, &parent_time); +- } + goto error_return; + } + +@@ -1253,6 +1248,10 @@ ldbm_back_add(Slapi_PBlock *pb) + goto common_return; + + error_return: ++ /* Revert the caches if this is the parent operation */ ++ if (parent_op) { ++ revert_cache(inst, &parent_time); ++ } + if (addingentry_id_assigned) { + next_id_return(be, addingentry->ep_id); + } +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c +index 3f687eb91..1ad846447 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c +@@ -1279,11 +1279,6 @@ replace_entry: + slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, &retval); + } + slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); +- +- /* Revert the caches if this is the parent operation */ +- if (parent_op) { +- revert_cache(inst, &parent_time); +- } + goto error_return; + } + if (parent_found) { +@@ -1370,6 +1365,11 @@ commit_return: + goto common_return; + + error_return: ++ /* Revert the caches if this is the parent operation */ ++ if (parent_op) { ++ revert_cache(inst, &parent_time); ++ } ++ + if (tombstone) { + if (cache_is_in_cache(&inst->inst_cache, tombstone)) { + tomb_ep_id = tombstone->ep_id; /* Otherwise, tombstone might have been freed. */ +@@ -1388,6 +1388,7 @@ error_return: + CACHE_RETURN(&inst->inst_cache, &tombstone); + tombstone = NULL; + } ++ + if (retval == DB_RUNRECOVERY) { + dblayer_remember_disk_filled(li); + ldbm_nasty("ldbm_back_delete", "Delete", 79, retval); +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c +index b90b3e0f0..b0c477e3f 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c +@@ -873,11 +873,6 @@ ldbm_back_modify(Slapi_PBlock *pb) + slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, ldap_result_code ? &ldap_result_code : &retval); + } + slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); +- +- /* Revert the caches if this is the parent operation */ +- if (parent_op) { +- revert_cache(inst, &parent_time); +- } + goto error_return; + } + retval = plugin_call_mmr_plugin_postop(pb, NULL,SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN); +@@ -901,6 +896,11 @@ ldbm_back_modify(Slapi_PBlock *pb) + goto common_return; + + error_return: ++ /* Revert the caches if this is the parent operation */ ++ if (parent_op) { ++ revert_cache(inst, &parent_time); ++ } ++ + if (postentry != NULL) { + slapi_entry_free(postentry); + postentry = NULL; +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c +index 73e50ebcc..65610d613 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c +@@ -1217,11 +1217,6 @@ ldbm_back_modrdn(Slapi_PBlock *pb) + slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, ldap_result_code ? &ldap_result_code : &retval); + } + slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); +- +- /* Revert the caches if this is the parent operation */ +- if (parent_op) { +- revert_cache(inst, &parent_time); +- } + goto error_return; + } + retval = plugin_call_mmr_plugin_postop(pb, NULL,SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN); +@@ -1290,6 +1285,11 @@ ldbm_back_modrdn(Slapi_PBlock *pb) + goto common_return; + + error_return: ++ /* Revert the caches if this is the parent operation */ ++ if (parent_op) { ++ revert_cache(inst, &parent_time); ++ } ++ + /* result already sent above - just free stuff */ + if (postentry) { + slapi_entry_free(postentry); +-- +2.17.2 + diff --git a/SOURCES/0011-Ticket-49967-entry-cache-corruption-after-failed-MOD.patch b/SOURCES/0011-Ticket-49967-entry-cache-corruption-after-failed-MOD.patch deleted file mode 100644 index a4f8cdb..0000000 --- a/SOURCES/0011-Ticket-49967-entry-cache-corruption-after-failed-MOD.patch +++ /dev/null @@ -1,62 +0,0 @@ -From a7ee52bd4b0bce82402a581ee16659ebb2f8d96e Mon Sep 17 00:00:00 2001 -From: Thierry Bordaz -Date: Wed, 24 Oct 2018 15:31:25 +0200 -Subject: [PATCH 1/2] Ticket 49967 - entry cache corruption after failed MODRDN - -Bug Description: - During a MODRDN the DN cache is updated to replace - source DN with the target DN (modrdn_rename_entry_update_indexes) - If later a failure occurs (for example if BETXN_POSTOP fails) and - the txn is aborted, the target DN (for the specific entryID) remains - in the DN cache. - - If the entry is returned in a search, to build the DN there is - a lookup of the DN cache with the entryID. It retrieves the target DN - rather than the source DN - -Fix Description: - In case of failure of the operation, the entry (from the entryID) - need to be cleared from the DN cache - -https://pagure.io/389-ds-base/issue/49967 - -Reviewed by: Mark Reynolds - -Platforms tested: F27 - -Flag Day: no - -Doc impact: no - -(cherry picked from commit ab4af68ef49fcdc5f2f6d0c1f5c7b9a5333b1bee) ---- - ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 13 +++++++++++++ - 1 file changed, 13 insertions(+) - -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c -index 71e2a8fe0..e2e9d1b46 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c -@@ -1400,6 +1400,19 @@ common_return: - } - } - } -+ -+ if (ec && retval) { -+ /* if the operation failed, the destination entry does not exist -+ * but it has been added in dncache during cache_add_tentative -+ * we need to remove it. Else a retrieval from ep_id can give the wrong DN -+ */ -+ struct backdn *bdn = dncache_find_id(&inst->inst_dncache, ec->ep_id); -+ slapi_log_err(SLAPI_LOG_CACHE, "ldbm_back_modrdn", -+ "operation failed, the target entry is cleared from dncache (%s)\n", slapi_entry_get_dn(ec->ep_entry)); -+ CACHE_REMOVE(&inst->inst_dncache, bdn); -+ CACHE_RETURN(&inst->inst_dncache, &bdn); -+ } -+ - /* remove the new entry from the cache if the op failed - - otherwise, leave it in */ - if (ec && inst) { --- -2.17.2 - diff --git a/SOURCES/0011-Ticket-50265-the-warning-about-skew-time-could-last-.patch b/SOURCES/0011-Ticket-50265-the-warning-about-skew-time-could-last-.patch new file mode 100644 index 0000000..de141e1 --- /dev/null +++ b/SOURCES/0011-Ticket-50265-the-warning-about-skew-time-could-last-.patch @@ -0,0 +1,58 @@ +From d56be1addb6f2a696f59e8971d0874a3e0d80ec7 Mon Sep 17 00:00:00 2001 +From: Ludwig Krispenz +Date: Wed, 20 Mar 2019 12:00:42 +0100 +Subject: [PATCH 4/4] Ticket 50265: the warning about skew time could last + forever + +Bug: if the local system time is set back more than 300 seconds + a worning about too much time skew is logged and the sampled + time is updated. This adjustment is done at every write operation + and can increase the time skew and be logged infinitely + +Fix: the intention of the adjustment was to avoid a roll over of seq_num + if the sampled time is not increased for more than 65k oberations. + But this is already handled with an explicite check for seq_num + rollover. The extra adjustment for negative time skew can be removed. + +Reviewed by: Thierry, William. Thanks. +--- + ldap/servers/slapd/csngen.c | 22 +++++++--------------- + 1 file changed, 7 insertions(+), 15 deletions(-) + +diff --git a/ldap/servers/slapd/csngen.c b/ldap/servers/slapd/csngen.c +index 3afc9176b..68dbbda8e 100644 +--- a/ldap/servers/slapd/csngen.c ++++ b/ldap/servers/slapd/csngen.c +@@ -191,22 +191,14 @@ csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify) + slapi_rwlock_unlock(gen->lock); + return rc; + } +- } else if (delta < -300) { +- /* +- * The maxseqnum could support up to 65535 CSNs per second. +- * That means that we could avoid duplicated CSN's for +- * delta up to 300 secs if update rate is 200/sec (usually +- * the max rate is below 20/sec). +- * Beyond 300 secs, we advance gen->state.sampled_time by +- * one sec to recycle seqnum. +- */ +- slapi_log_err(SLAPI_LOG_WARNING, "csngen_new_csn", "Too much time skew (%d secs). Current seqnum=%0x\n", delta, gen->state.seq_num); +- rc = _csngen_adjust_local_time(gen, gen->state.sampled_time + 1); +- if (rc != CSN_SUCCESS) { +- slapi_rwlock_unlock(gen->lock); +- return rc; +- } + } ++ /* if (delta < 0) this means the local system time was set back ++ * the new csn will be generated based on sampled time, which is ++ * ahead of system time and previously generated csns. ++ * the time stamp of the csn will not change until system time ++ * catches up or is corrected by remote csns. ++ * But we need to ensure that the seq_num does not overflow. ++ */ + + if (gen->state.seq_num == CSN_MAX_SEQNUM) { + slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn", "Sequence rollover; " +-- +2.17.2 + diff --git a/SOURCES/0012-Ticket-49958-extended-search-fail-to-match-entries.patch b/SOURCES/0012-Ticket-49958-extended-search-fail-to-match-entries.patch deleted file mode 100644 index dcc9fa4..0000000 --- a/SOURCES/0012-Ticket-49958-extended-search-fail-to-match-entries.patch +++ /dev/null @@ -1,264 +0,0 @@ -From 02de69d6987b059459980b50de285c2fd7bb3e2c Mon Sep 17 00:00:00 2001 -From: Thierry Bordaz -Date: Mon, 24 Sep 2018 14:14:16 +0200 -Subject: [PATCH] Ticket 49958: extended search fail to match entries - -Bug Description: - During an extended search, a structure is created for each filter component. - The structure contains the keys generated from the assertion and using the given - matching rule indexer. - Later the keys will be compared (with the MR) with keys generated from the - attribute values of the candidate entries. - The bug is that parsing the assertion, instead of removing the heading spaces - the routine clear the assertion that is empty. So the generated keys is NULL. - -Fix Description: - The fix consists to only remove heading spaces - -https://pagure.io/389-ds-base/issue/49958 - -Reviewed by: Mark Reynolds - -Platforms tested: F27 - -Flag Day: no - -Doc impact: no ---- - .../tests/suites/filter/filter_test.py | 203 ++++++++++++++++++ - ldap/servers/plugins/collation/orfilter.c | 4 +- - 2 files changed, 204 insertions(+), 3 deletions(-) - -diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py -index 280db68a3..61c449989 100644 ---- a/dirsrvtests/tests/suites/filter/filter_test.py -+++ b/dirsrvtests/tests/suites/filter/filter_test.py -@@ -83,6 +83,209 @@ def test_filter_search_original_attrs(topology_st): - log.info('test_filter_search_original_attrs: PASSED') - - -+@pytest.mark.bz1511462 -+def test_filter_scope_one(topology_st): -+ """Test ldapsearch with scope one gives only single entry -+ -+ :id: cf5a6078-bbe6-4d43-ac71-553c45923f91 -+ :setup: Standalone instance -+ :steps: -+ 1. Search cn=Directory Administrators,dc=example,dc=com using ldapsearch with -+ scope one using base as dc=example,dc=com -+ 2. Check that search should return only one entry -+ :expectedresults: -+ 1. This should pass -+ 2. This should pass -+ """ -+ -+ parent_dn="dn: dc=example,dc=com" -+ child_dn="dn: cn=Directory Administrators,dc=example,dc=com" -+ -+ log.info('Search user using ldapsearch with scope one') -+ results = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_ONELEVEL,'cn=Directory Administrators',['cn'] ) -+ log.info(results) -+ -+ log.info('Search should only have one entry') -+ assert len(results) == 1 -+ -+@pytest.mark.ds47313 -+def test_filter_with_attribute_subtype(topology_st): -+ """Adds 2 test entries and Search with -+ filters including subtype and ! -+ -+ :id: 0e69f5f2-6a0a-480e-8282-fbcc50231908 -+ :setup: Standalone instance -+ :steps: -+ 1. Add 2 entries and create 3 filters -+ 2. Search for entry with filter: (&(cn=test_entry en only)(!(cn=test_entry fr))) -+ 3. Search for entry with filter: (&(cn=test_entry en only)(!(cn;fr=test_entry fr))) -+ 4. Search for entry with filter: (&(cn=test_entry en only)(!(cn;en=test_entry en))) -+ 5. Delete the added entries -+ :expectedresults: -+ 1. Operation should be successful -+ 2. Search should be successful -+ 3. Search should be successful -+ 4. Search should not be successful -+ 5. Delete the added entries -+ """ -+ -+ # bind as directory manager -+ topology_st.standalone.log.info("Bind as %s" % DN_DM) -+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) -+ -+ # enable filter error logging -+ # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '32')] -+ # topology_st.standalone.modify_s(DN_CONFIG, mod) -+ -+ topology_st.standalone.log.info("\n\n######################### ADD ######################\n") -+ -+ # Prepare the entry with cn;fr & cn;en -+ entry_name_fr = '%s fr' % (ENTRY_NAME) -+ entry_name_en = '%s en' % (ENTRY_NAME) -+ entry_name_both = '%s both' % (ENTRY_NAME) -+ entry_dn_both = 'cn=%s, %s' % (entry_name_both, SUFFIX) -+ entry_both = Entry(entry_dn_both) -+ entry_both.setValues('objectclass', 'top', 'person') -+ entry_both.setValues('sn', entry_name_both) -+ entry_both.setValues('cn', entry_name_both) -+ entry_both.setValues('cn;fr', entry_name_fr) -+ entry_both.setValues('cn;en', entry_name_en) -+ -+ # Prepare the entry with one member -+ entry_name_en_only = '%s en only' % (ENTRY_NAME) -+ entry_dn_en_only = 'cn=%s, %s' % (entry_name_en_only, SUFFIX) -+ entry_en_only = Entry(entry_dn_en_only) -+ entry_en_only.setValues('objectclass', 'top', 'person') -+ entry_en_only.setValues('sn', entry_name_en_only) -+ entry_en_only.setValues('cn', entry_name_en_only) -+ entry_en_only.setValues('cn;en', entry_name_en) -+ -+ topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_dn_both, entry_both)) -+ topology_st.standalone.add_s(entry_both) -+ -+ topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_dn_en_only, entry_en_only)) -+ topology_st.standalone.add_s(entry_en_only) -+ -+ topology_st.standalone.log.info("\n\n######################### SEARCH ######################\n") -+ -+ # filter: (&(cn=test_entry en only)(!(cn=test_entry fr))) -+ myfilter = '(&(sn=%s)(!(cn=%s)))' % (entry_name_en_only, entry_name_fr) -+ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) -+ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) -+ assert len(ents) == 1 -+ assert ensure_str(ents[0].sn) == entry_name_en_only -+ topology_st.standalone.log.info("Found %s" % ents[0].dn) -+ -+ # filter: (&(cn=test_entry en only)(!(cn;fr=test_entry fr))) -+ myfilter = '(&(sn=%s)(!(cn;fr=%s)))' % (entry_name_en_only, entry_name_fr) -+ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) -+ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) -+ assert len(ents) == 1 -+ assert ensure_str(ents[0].sn) == entry_name_en_only -+ topology_st.standalone.log.info("Found %s" % ents[0].dn) -+ -+ # filter: (&(cn=test_entry en only)(!(cn;en=test_entry en))) -+ myfilter = '(&(sn=%s)(!(cn;en=%s)))' % (entry_name_en_only, entry_name_en) -+ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) -+ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) -+ assert len(ents) == 0 -+ topology_st.standalone.log.info("Found none") -+ -+ topology_st.standalone.log.info("\n\n######################### DELETE ######################\n") -+ -+ topology_st.standalone.log.info("Try to delete %s " % entry_dn_both) -+ topology_st.standalone.delete_s(entry_dn_both) -+ -+ topology_st.standalone.log.info("Try to delete %s " % entry_dn_en_only) -+ topology_st.standalone.delete_s(entry_dn_en_only) -+ -+ log.info('Testcase PASSED') -+ -+ -+@pytest.mark.bz1615155 -+def test_extended_search(topology_st): -+ """Test we can search with equality extended matching rule -+ -+ :id: -+ :setup: Standalone instance -+ :steps: -+ 1. Add a test user with 'sn: ext-test-entry' -+ 2. Search '(cn:de:=ext-test-entry)' -+ 3. Search '(sn:caseIgnoreIA5Match:=EXT-TEST-ENTRY)' -+ 4. Search '(sn:caseIgnoreMatch:=EXT-TEST-ENTRY)' -+ 5. Search '(sn:caseExactMatch:=EXT-TEST-ENTRY)' -+ 6. Search '(sn:caseExactMatch:=ext-test-entry)' -+ 7. Search '(sn:caseExactIA5Match:=EXT-TEST-ENTRY)' -+ 8. Search '(sn:caseExactIA5Match:=ext-test-entry)' -+ :expectedresults: -+ 1. This should pass -+ 2. This should return one entry -+ 3. This should return one entry -+ 4. This should return one entry -+ 5. This should return NO entry -+ 6. This should return one entry -+ 7. This should return NO entry -+ 8. This should return one entry -+ 3. return one entry -+ """ -+ log.info('Running test_filter_escaped...') -+ -+ ATTR_VAL = 'ext-test-entry' -+ USER1_DN = "uid=%s,%s" % (ATTR_VAL, DEFAULT_SUFFIX) -+ -+ try: -+ topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), -+ 'sn': ATTR_VAL.encode(), -+ 'cn': ATTR_VAL.encode(), -+ 'uid': ATTR_VAL.encode()}))) -+ except ldap.LDAPError as e: -+ log.fatal('test_extended_search: Failed to add test user ' + USER1_DN + ': error ' + -+ e.message['desc']) -+ assert False -+ -+ # filter: '(cn:de:=ext-test-entry)' -+ myfilter = '(cn:de:=%s)' % ATTR_VAL -+ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) -+ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) -+ assert len(ents) == 1 -+ -+ # filter: '(sn:caseIgnoreIA5Match:=EXT-TEST-ENTRY)' -+ myfilter = '(cn:caseIgnoreIA5Match:=%s)' % ATTR_VAL.upper() -+ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) -+ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) -+ assert len(ents) == 1 -+ -+ # filter: '(sn:caseIgnoreMatch:=EXT-TEST-ENTRY)' -+ myfilter = '(cn:caseIgnoreMatch:=%s)' % ATTR_VAL.upper() -+ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) -+ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) -+ assert len(ents) == 1 -+ -+ # filter: '(sn:caseExactMatch:=EXT-TEST-ENTRY)' -+ myfilter = '(cn:caseExactMatch:=%s)' % ATTR_VAL.upper() -+ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) -+ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) -+ assert len(ents) == 0 -+ -+ # filter: '(sn:caseExactMatch:=ext-test-entry)' -+ myfilter = '(cn:caseExactMatch:=%s)' % ATTR_VAL -+ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) -+ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) -+ assert len(ents) == 1 -+ -+ # filter: '(sn:caseExactIA5Match:=EXT-TEST-ENTRY)' -+ myfilter = '(cn:caseExactIA5Match:=%s)' % ATTR_VAL.upper() -+ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) -+ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) -+ assert len(ents) == 0 -+ -+ # filter: '(sn:caseExactIA5Match:=ext-test-entry)' -+ myfilter = '(cn:caseExactIA5Match:=%s)' % ATTR_VAL -+ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) -+ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) -+ assert len(ents) == 1 -+ -+ - if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode -diff --git a/ldap/servers/plugins/collation/orfilter.c b/ldap/servers/plugins/collation/orfilter.c -index 7705de9d6..c092d77ca 100644 ---- a/ldap/servers/plugins/collation/orfilter.c -+++ b/ldap/servers/plugins/collation/orfilter.c -@@ -531,10 +531,8 @@ or_filter_create(Slapi_PBlock *pb) - default: - break; - } -- for (; len > 0 && *val != ' '; ++val, --len) -+ for (; len > 0 && *val == ' '; ++val, --len) - ; -- if (len > 0) -- ++val, --len; /* skip the space */ - bv.bv_len = len; - bv.bv_val = (len > 0) ? val : NULL; - } else { /* mrOID does not identify an ordering rule. */ --- -2.17.2 - diff --git a/SOURCES/0012-Ticket-50063-Crash-after-attempting-to-restore-a-sin.patch b/SOURCES/0012-Ticket-50063-Crash-after-attempting-to-restore-a-sin.patch new file mode 100644 index 0000000..d85a631 --- /dev/null +++ b/SOURCES/0012-Ticket-50063-Crash-after-attempting-to-restore-a-sin.patch @@ -0,0 +1,39 @@ +From ed430bcca3e1cf0788fc15786002b0b1c31a130b Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Fri, 30 Nov 2018 11:45:39 -0500 +Subject: [PATCH] Ticket 50063 - Crash after attempting to restore a single + backend + +Description: While we do not support backup/restore of individual backends, + it should not crash the server either. PR_OpenDir will crash + if the file name is NULL, so this fix just prevents the crash + by returning an error if the filename is NULL. + +https://pagure.io/389-ds-base/issue/50063 + +Reviewed by: firstyear & tbordaz(Thanks!!) + +(cherry picked from commit d36f796a7b35bade5a05e197690abf4e49d212ce) +--- + ldap/servers/slapd/back-ldbm/dblayer.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c +index fa931ccbf..64a4e9e4f 100644 +--- a/ldap/servers/slapd/back-ldbm/dblayer.c ++++ b/ldap/servers/slapd/back-ldbm/dblayer.c +@@ -5292,6 +5292,11 @@ dblayer_delete_database_ex(struct ldbminfo *li, char *instance, char *cldir) + } + + /* now smash everything else in the db/ dir */ ++ if (priv->dblayer_home_directory == NULL){ ++ slapi_log_err(SLAPI_LOG_ERR, "dblayer_delete_database_ex", ++ "dblayer_home_directory is NULL, can not proceed\n"); ++ return -1; ++ } + dirhandle = PR_OpenDir(priv->dblayer_home_directory); + if (!dirhandle) { + slapi_log_err(SLAPI_LOG_ERR, "dblayer_delete_database_ex", "PR_OpenDir (%s) failed (%d): %s\n", +-- +2.17.2 + diff --git a/SOURCES/0013-Ticket-49915-Master-ns-slapd-had-100-CPU-usage-after.patch b/SOURCES/0013-Ticket-49915-Master-ns-slapd-had-100-CPU-usage-after.patch deleted file mode 100644 index 34d9d01..0000000 --- a/SOURCES/0013-Ticket-49915-Master-ns-slapd-had-100-CPU-usage-after.patch +++ /dev/null @@ -1,354 +0,0 @@ -From 6d67faa0de58cb0b66fc72d43f24b1c9669f88f8 Mon Sep 17 00:00:00 2001 -From: Thierry Bordaz -Date: Mon, 3 Sep 2018 15:36:52 +0200 -Subject: [PATCH] Ticket 49915 - Master ns-slapd had 100% CPU usage after - starting replication and replication cannot finish - -Bug Description: - During a total initialization the supplier builds a candidate list of the entries to send. - Because of https://fedorahosted.org/389/ticket/48755, the candidate list relies on parentid attribute. - All entries, except tombstones and suffix itself, have parentid. - There is an assumption that the first found key (i.e. '=1') contains the suffix children. - So when it finally finds the suffix key it adds its children to a leftover list rather to the candidate list. - Later idl_new_range_fetch loops for ever trying to add suffix children from leftover to candidate list. - -Fix Description: - The fix consist to store the suffix_id (if it does not exist already) in the parentid index (with the key '=0'). - Then get it to detect the suffix key from the index in idl_new_range_fetch. - -https://pagure.io/389-ds-base/issue/49915 - -Reviewed by: Ludwig Krispenz, William Brown (thanks !) - -Platforms tested: F27 - -Flag Day: no - -Doc impact: no ---- - .../plugins/replication/repl5_tot_protocol.c | 48 ++++++++ - ldap/servers/slapd/back-ldbm/dblayer.c | 8 ++ - ldap/servers/slapd/back-ldbm/idl_new.c | 34 +++++- - ldap/servers/slapd/back-ldbm/index.c | 114 ++++++++++++++++++ - ldap/servers/slapd/slapi-plugin.h | 10 +- - 5 files changed, 209 insertions(+), 5 deletions(-) - -diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c -index ee3c9dcb0..1dbbe694f 100644 ---- a/ldap/servers/plugins/replication/repl5_tot_protocol.c -+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c -@@ -283,6 +283,53 @@ repl5_tot_waitfor_async_results(callback_data *cb_data) - } - } - -+/* This routine checks that the entry id of the suffix is -+ * stored in the parentid index -+ * The entry id of the suffix is stored with the equality key 0 (i.e. '=0') -+ * It first checks if the key '=0' exists. If it does not exists or if the first value -+ * stored with that key, does not match the suffix entryid (stored in the suffix entry -+ * from id2entry.db then it updates the value -+ */ -+static void -+check_suffix_entryID(Slapi_Backend *be, Slapi_Entry *suffix) -+{ -+ u_int32_t entryid; -+ char *entryid_str; -+ struct _back_info_index_key bck_info; -+ -+ /* we are using a specific key in parentid to store the suffix entry id: '=0' */ -+ bck_info.index = SLAPI_ATTR_PARENTID; -+ bck_info.key = "0"; -+ -+ /* First try to retrieve from parentid index the suffix entryID */ -+ if (slapi_back_get_info(be, BACK_INFO_INDEX_KEY, (void **) &bck_info)) { -+ slapi_log_err(SLAPI_LOG_REPL, "check_suffix_entryID", "Total update: fail to retrieve suffix entryID. Let's try to write it\n"); -+ } -+ -+ /* Second retrieve the suffix entryid from the suffix entry itself */ -+ entryid_str = slapi_entry_attr_get_charptr(suffix, "entryid"); -+ if (entryid_str == NULL) { -+ char *dn; -+ dn = slapi_entry_get_ndn(suffix); -+ slapi_log_err(SLAPI_LOG_ERR, "check_suffix_entryID", "Unable to retrieve entryid of the suffix entry %s\n", dn ? dn : ""); -+ slapi_ch_free_string(&entryid_str); -+ return; -+ } -+ entryid = (u_int32_t) atoi(entryid_str); -+ slapi_ch_free_string(&entryid_str); -+ -+ if (!bck_info.key_found || bck_info.id != entryid) { -+ /* The suffix entryid is not present in parentid index -+ * or differs from what is in id2entry (entry 'suffix') -+ * So write it to the parentid so that the range index used -+ * during total init will know the entryid of the suffix -+ */ -+ bck_info.id = entryid; -+ if (slapi_back_set_info(be, BACK_INFO_INDEX_KEY, (void **) &bck_info)) { -+ slapi_log_err(SLAPI_LOG_ERR, "check_suffix_entryID", "Total update: fail to register suffix entryid, continue assuming suffix is the first entry\n"); -+ } -+ } -+} - - /* - * Completely refresh a replica. The basic protocol interaction goes -@@ -467,6 +514,7 @@ retry: - replica_subentry_check(area_sdn, rid); - - /* Send the subtree of the suffix in the order of parentid index plus ldapsubentry and nstombstone. */ -+ check_suffix_entryID(be, suffix); - slapi_search_internal_set_pb(pb, slapi_sdn_get_dn(area_sdn), - LDAP_SCOPE_SUBTREE, "(parentid>=1)", NULL, 0, ctrls, NULL, - repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), OP_FLAG_BULK_IMPORT); -diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c -index e84cb7695..fa931ccbf 100644 ---- a/ldap/servers/slapd/back-ldbm/dblayer.c -+++ b/ldap/servers/slapd/back-ldbm/dblayer.c -@@ -7295,6 +7295,10 @@ ldbm_back_get_info(Slapi_Backend *be, int cmd, void **info) - *(int *)info = entryrdn_get_switch(); - break; - } -+ case BACK_INFO_INDEX_KEY : { -+ rc = get_suffix_key(be, (struct _back_info_index_key *)info); -+ break; -+ } - default: - break; - } -@@ -7311,6 +7315,10 @@ ldbm_back_set_info(Slapi_Backend *be, int cmd, void *info) - } - - switch (cmd) { -+ case BACK_INFO_INDEX_KEY : { -+ rc = set_suffix_key(be, (struct _back_info_index_key *)info); -+ break; -+ } - default: - break; - } -diff --git a/ldap/servers/slapd/back-ldbm/idl_new.c b/ldap/servers/slapd/back-ldbm/idl_new.c -index 4e28e3fc2..102265c47 100644 ---- a/ldap/servers/slapd/back-ldbm/idl_new.c -+++ b/ldap/servers/slapd/back-ldbm/idl_new.c -@@ -320,6 +320,9 @@ typedef struct _range_id_pair - * In the total update (bulk import), an entry requires its ancestors already added. - * To guarantee it, the range search with parentid is used with setting the flag - * SLAPI_OP_RANGE_NO_IDL_SORT in operator. -+ * In bulk import the range search is parentid>=1 to retrieve all the entries -+ * But we need to order the IDL with the parents first => retrieve the suffix entry ID -+ * to store the children - * - * If the flag is set, - * 1. the IDList is not sorted by the ID. -@@ -366,6 +369,23 @@ idl_new_range_fetch( - if (NULL == flag_err) { - return NULL; - } -+ if (operator & SLAPI_OP_RANGE_NO_IDL_SORT) { -+ struct _back_info_index_key bck_info; -+ int rc; -+ /* We are doing a bulk import -+ * try to retrieve the suffix entry id from the index -+ */ -+ -+ bck_info.index = SLAPI_ATTR_PARENTID; -+ bck_info.key = "0"; -+ -+ if (rc = slapi_back_get_info(be, BACK_INFO_INDEX_KEY, (void **)&bck_info)) { -+ slapi_log_err(SLAPI_LOG_WARNING, "idl_new_range_fetch", "Total update: fail to retrieve suffix entryID, continue assuming it is the first entry\n"); -+ } -+ if (bck_info.key_found) { -+ suffix = bck_info.id; -+ } -+ } - - if (NEW_IDL_NOOP == *flag_err) { - return NULL; -@@ -455,7 +475,7 @@ idl_new_range_fetch( - *flag_err = LDAP_TIMELIMIT_EXCEEDED; - goto error; - } -- if (operator&SLAPI_OP_RANGE_NO_IDL_SORT) { -+ if (operator & SLAPI_OP_RANGE_NO_IDL_SORT) { - key = (ID)strtol((char *)cur_key.data + 1, (char **)NULL, 10); - } - while (PR_TRUE) { -@@ -487,9 +507,13 @@ idl_new_range_fetch( - /* note the last id read to check for dups */ - lastid = id; - /* we got another ID, add it to our IDL */ -- if (operator&SLAPI_OP_RANGE_NO_IDL_SORT) { -- if (count == 0) { -- /* First time. Keep the suffix ID. */ -+ if (operator & SLAPI_OP_RANGE_NO_IDL_SORT) { -+ if ((count == 0) && (suffix == 0)) { -+ /* First time. Keep the suffix ID. -+ * note that 'suffix==0' mean we did not retrieve the suffix entry id -+ * from the parentid index (key '=0'), so let assume the first -+ * found entry is the one from the suffix -+ */ - suffix = key; - idl_rc = idl_append_extend(&idl, id); - } else if ((key == suffix) || idl_id_is_in_idlist(idl, key)) { -@@ -615,9 +639,11 @@ error: - } - if (operator&SLAPI_OP_RANGE_NO_IDL_SORT) { - size_t remaining = leftovercnt; -+ - while(remaining > 0) { - for (size_t i = 0; i < leftovercnt; i++) { - if (leftover[i].key > 0 && idl_id_is_in_idlist(idl, leftover[i].key) != 0) { -+ /* if the leftover key has its parent in the idl */ - idl_rc = idl_append_extend(&idl, leftover[i].id); - if (idl_rc) { - slapi_log_err(SLAPI_LOG_ERR, "idl_new_range_fetch", -diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c -index 222f64dff..dea6e9a3e 100644 ---- a/ldap/servers/slapd/back-ldbm/index.c -+++ b/ldap/servers/slapd/back-ldbm/index.c -@@ -1236,6 +1236,120 @@ error: - return ret; - } - -+/* This routine add in a given index (parentid) -+ * the key/value = '=0'/ -+ * Input: -+ * info->key contains the key to lookup (i.e. '0') -+ * info->index index name used to retrieve syntax and db file -+ * info->id the entryID of the suffix -+ */ -+int -+set_suffix_key(Slapi_Backend *be, struct _back_info_index_key *info) -+{ -+ struct ldbminfo *li; -+ int rc; -+ back_txn txn; -+ Slapi_Value *sv_key[2]; -+ Slapi_Value tmpval; -+ -+ if (info->index== NULL || info->key == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "set_suffix_key", "Invalid index %s or key %s\n", -+ info->index ? info->index : "NULL", -+ info->key ? info->key : "NULL"); -+ return -1; -+ } -+ -+ /* Start a txn */ -+ li = (struct ldbminfo *)be->be_database->plg_private; -+ dblayer_txn_init(li, &txn); -+ if (rc = dblayer_txn_begin(be, txn.back_txn_txn, &txn)) { -+ slapi_log_err(SLAPI_LOG_ERR, "set_suffix_key", "Fail to update %s index with %s/%d (key/ID): txn begin fails\n", -+ info->index, info->key, info->id); -+ return rc; -+ } -+ -+ sv_key[0] = &tmpval; -+ sv_key[1] = NULL; -+ slapi_value_init_string(sv_key[0], info->key); -+ -+ if (rc = index_addordel_values_sv(be, info->index, sv_key, NULL, info->id, BE_INDEX_ADD, &txn)) { -+ value_done(sv_key[0]); -+ dblayer_txn_abort(be, &txn); -+ slapi_log_err(SLAPI_LOG_ERR, "set_suffix_key", "Fail to update %s index with %s/%d (key/ID): index_addordel_values_sv fails\n", -+ info->index, info->key, info->id); -+ return rc; -+ } -+ -+ value_done(sv_key[0]); -+ if (rc = dblayer_txn_commit(be, &txn)) { -+ slapi_log_err(SLAPI_LOG_ERR, "set_suffix_key", "Fail to update %s index with %s/%d (key/ID): commit fails\n", -+ info->index, info->key, info->id); -+ return rc; -+ } -+ -+ return 0; -+} -+/* This routine retrieves from a given index (parentid) -+ * the key/value = '=0'/ -+ * Input: -+ * info->key contains the key to lookup (i.e. '0') -+ * info->index index name used to retrieve syntax and db file -+ * Output -+ * info->id It returns the first id that is found for the key. -+ * If the key is not found, or there is no value for the key -+ * it contains '0' -+ * info->key_found Boolean that says if the key leads to a valid ID in info->id -+ */ -+int -+get_suffix_key(Slapi_Backend *be, struct _back_info_index_key *info) -+{ -+ struct berval bv; -+ int err; -+ IDList *idl = NULL; -+ ID id; -+ int rc = 0; -+ -+ if (info->index== NULL || info->key == NULL) { -+ slapi_log_err(SLAPI_LOG_ERR, "get_suffix_key", "Invalid index %s or key %s\n", -+ info->index ? info->index : "NULL", -+ info->key ? info->key : "NULL"); -+ return -1; -+ } -+ -+ /* This is the key to retrieve */ -+ bv.bv_val = info->key; -+ bv.bv_len = strlen(bv.bv_val); -+ -+ /* Assuming we are not going to find the key*/ -+ info->key_found = PR_FALSE; -+ id = 0; -+ idl = index_read(be, info->index, indextype_EQUALITY, &bv, NULL, &err); -+ -+ if (idl == NULL) { -+ if (err != 0 && err != DB_NOTFOUND) { -+ slapi_log_err(SLAPI_LOG_ERR, "get_suffix_key", "Fail to read key %s (err=%d)\n", -+ info->key ? info->key : "NULL", -+ err); -+ rc = err; -+ } -+ } else { -+ /* info->key was found */ -+ id = idl_firstid(idl); -+ if (id != NOID) { -+ info->key_found = PR_TRUE; -+ } else { -+ /* there is no ID in that key, make it as it was not found */ -+ id = 0; -+ } -+ idl_free(&idl); -+ } -+ -+ /* now set the returned id */ -+ info->id = id; -+ -+ return rc; -+} -+ - IDList * - index_range_read_ext( - Slapi_PBlock *pb, -diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h -index 0646cdfdd..4b75654e7 100644 ---- a/ldap/servers/slapd/slapi-plugin.h -+++ b/ldap/servers/slapd/slapi-plugin.h -@@ -7763,9 +7763,17 @@ enum - BACK_INFO_CRYPT_DECRYPT_VALUE, /* Ctrl: clcrypt_decrypt_value */ - BACK_INFO_DIRECTORY, /* Get the directory path */ - BACK_INFO_LOG_DIRECTORY, /* Get the txn log directory */ -- BACK_INFO_IS_ENTRYRDN /* Get the flag for entryrdn */ -+ BACK_INFO_IS_ENTRYRDN, /* Get the flag for entryrdn */ -+ BACK_INFO_INDEX_KEY /* Get the status of a key in an index */ - }; - -+struct _back_info_index_key -+{ -+ char *index; /* input: name of the index (parentid) */ -+ char *key; /* input: searched key (0) with equality -> '=0' */ -+ PRBool key_found; /* output: TRUE if '=0' is found in the index */ -+ u_int32_t id; /* output: if key_found it is the first value (suffix entryID) */ -+}; - struct _back_info_crypt_init - { - char *dn; /* input -- entry to store nsSymmetricKey */ --- -2.17.2 - diff --git a/SOURCES/0013-Ticket-49946-upgrade-of-389-ds-base-could-remove-rep.patch b/SOURCES/0013-Ticket-49946-upgrade-of-389-ds-base-could-remove-rep.patch new file mode 100644 index 0000000..0204d33 --- /dev/null +++ b/SOURCES/0013-Ticket-49946-upgrade-of-389-ds-base-could-remove-rep.patch @@ -0,0 +1,40 @@ +From 11f8ecf946c0b56d7d49f7b46429d153d5c19ee1 Mon Sep 17 00:00:00 2001 +From: German Parente +Date: Wed, 10 Oct 2018 10:24:08 +0200 +Subject: [PATCH] Ticket #49946 upgrade of 389-ds-base could remove replication + agreements. + +Bug Description: + +when a replication agreement starts with "cn=->...", the upgrade is removing +the entry. + +Fix Description: + +a check is missing when re-building dse.ldif in "setup-ds.pl -u" that provoked this entry not to be re-added to the file. + +https://pagure.io/389-ds-base/issue/49946 + +Author: German Parente + +Review by: ??? +--- + ldap/admin/src/scripts/FileConn.pm | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/admin/src/scripts/FileConn.pm b/ldap/admin/src/scripts/FileConn.pm +index dcea70904..8a2a1afe6 100644 +--- a/ldap/admin/src/scripts/FileConn.pm ++++ b/ldap/admin/src/scripts/FileConn.pm +@@ -360,7 +360,7 @@ sub add { + return $self->write(); + } + +- if (exists($self->{$ndn})) { ++ if ($ndn && exists($self->{$ndn})) { + $self->setErrorCode(LDAP_ALREADY_EXISTS); + return 0; + } +-- +2.17.2 + diff --git a/SOURCES/0014-Ticket-49873-Contention-on-virtual-attribute-lookup.patch b/SOURCES/0014-Ticket-49873-Contention-on-virtual-attribute-lookup.patch new file mode 100644 index 0000000..7918e9e --- /dev/null +++ b/SOURCES/0014-Ticket-49873-Contention-on-virtual-attribute-lookup.patch @@ -0,0 +1,359 @@ +From 82b21b4b939acd4dfac8c061bf19ad2494680485 Mon Sep 17 00:00:00 2001 +From: Thierry Bordaz +Date: Tue, 15 Jan 2019 11:13:42 +0100 +Subject: [PATCH] Ticket 49873 - Contention on virtual attribute lookup + +Bug Description: + During lookup of the virtual attribute table (filter evaluation and returned attribute) + the lock is acquired many times in read. For example it is acquired for each targetfilter aci and for + each evaluated entry. + Unfortunately RW lock is expensive and appears frequently on pstacks. + The lock exists because the table can be updated but update is very rare (addition of a new service provider). + So it slows down general proceeding for exceptional events. + +Fix Description: + The fix is to acquire/release the read lock at the operation level and set a per-cpu flag, so that later lookup + would just check the flag. + + SSL initialization does internal searches that access the vattr_global_lock + Call of vattr_global_lock_create needs to be called before slapd_do_all_nss_ssl_init. + + Also, 'main' may or may not fork, the initialization fo the thread private variable + is done either on the child or parent depending if main forks or not. + + The leak is fixed using a destructor callback of the private variable and so + call PR_SetThreadPrivate only if there is no private variable. + + This patch is the merge of the four 49873 patches done in master + +https://pagure.io/389-ds-base/issue/49873 + +Reviewed by: Ludwig Krispenz, William Brown , Simon Pichugi (thanks !!) + +Platforms tested: F28 + +Flag Day: no + +Doc impact: no +--- + ldap/servers/slapd/detach.c | 9 ++ + ldap/servers/slapd/opshared.c | 6 ++ + ldap/servers/slapd/proto-slap.h | 5 + + ldap/servers/slapd/vattr.c | 164 ++++++++++++++++++++++++++++---- + 4 files changed, 167 insertions(+), 17 deletions(-) + +diff --git a/ldap/servers/slapd/detach.c b/ldap/servers/slapd/detach.c +index 681e6a701..d5c95a04f 100644 +--- a/ldap/servers/slapd/detach.c ++++ b/ldap/servers/slapd/detach.c +@@ -144,6 +144,10 @@ detach(int slapd_exemode, int importexport_encrypt, int s_port, daemon_ports_t * + } + break; + } ++ /* The thread private counter needs to be allocated after the fork ++ * it is not inherited from parent process ++ */ ++ vattr_global_lock_create(); + + /* call this right after the fork, but before closing stdin */ + if (slapd_do_all_nss_ssl_init(slapd_exemode, importexport_encrypt, s_port, ports_info)) { +@@ -174,6 +178,11 @@ detach(int slapd_exemode, int importexport_encrypt, int s_port, daemon_ports_t * + + g_set_detached(1); + } else { /* not detaching - call nss/ssl init */ ++ /* The thread private counter needs to be allocated after the fork ++ * it is not inherited from parent process ++ */ ++ vattr_global_lock_create(); ++ + if (slapd_do_all_nss_ssl_init(slapd_exemode, importexport_encrypt, s_port, ports_info)) { + return 1; + } +diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c +index 50b7ae8f6..cf6cdff01 100644 +--- a/ldap/servers/slapd/opshared.c ++++ b/ldap/servers/slapd/opshared.c +@@ -244,6 +244,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) + int pr_idx = -1; + Slapi_DN *orig_sdn = NULL; + int free_sdn = 0; ++ PRBool vattr_lock_acquired = PR_FALSE; + + be_list[0] = NULL; + referral_list[0] = NULL; +@@ -511,6 +512,8 @@ op_shared_search(Slapi_PBlock *pb, int send_result) + } + + slapi_pblock_set(pb, SLAPI_BACKEND_COUNT, &index); ++ vattr_rdlock(); ++ vattr_lock_acquired = PR_TRUE; + + if (be) { + slapi_pblock_set(pb, SLAPI_BACKEND, be); +@@ -969,6 +972,9 @@ free_and_return: + } else if (be_single) { + slapi_be_Unlock(be_single); + } ++ if (vattr_lock_acquired) { ++ vattr_rd_unlock(); ++ } + + free_and_return_nolock: + slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, &rc); +diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h +index 7a429b238..79017e68d 100644 +--- a/ldap/servers/slapd/proto-slap.h ++++ b/ldap/servers/slapd/proto-slap.h +@@ -1409,6 +1409,11 @@ void subentry_create_filter(Slapi_Filter **filter); + * vattr.c + */ + void vattr_init(void); ++void vattr_global_lock_create(void); ++void vattr_rdlock(); ++void vattr_rd_unlock(); ++void vattr_wrlock(); ++void vattr_wr_unlock(); + void vattr_cleanup(void); + + /* +diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c +index f7c473ab1..852a887ce 100644 +--- a/ldap/servers/slapd/vattr.c ++++ b/ldap/servers/slapd/vattr.c +@@ -102,6 +102,16 @@ int vattr_basic_sp_init(); + + void **statechange_api; + ++struct _vattr_map ++{ ++ Slapi_RWLock *lock; ++ PLHashTable *hashtable; /* Hash table */ ++}; ++typedef struct _vattr_map vattr_map; ++ ++static vattr_map *the_map = NULL; ++static PRUintn thread_private_global_vattr_lock; ++ + /* Housekeeping Functions, called by server startup/shutdown code */ + + /* Called on server startup, init all structures etc */ +@@ -115,7 +125,136 @@ vattr_init() + vattr_basic_sp_init(); + #endif + } ++/* ++ * https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSPR/Reference/PR_NewThreadPrivateIndex ++ * It is called each time: ++ * - PR_SetThreadPrivate is call with a not NULL private value ++ * - on thread exit ++ */ ++static void ++vattr_global_lock_free(void *ptr) ++{ ++ int *nb_acquired = ptr; ++ if (nb_acquired) { ++ slapi_ch_free((void **)&nb_acquired); ++ } ++} ++/* Create a private variable for each individual thread of the current process */ ++void ++vattr_global_lock_create() ++{ ++ if (PR_NewThreadPrivateIndex(&thread_private_global_vattr_lock, vattr_global_lock_free) != PR_SUCCESS) { ++ slapi_log_err(SLAPI_LOG_ALERT, ++ "vattr_global_lock_create", "Failure to create global lock for virtual attribute !\n"); ++ PR_ASSERT(0); ++ } ++} ++static int ++global_vattr_lock_get_acquired_count() ++{ ++ int *nb_acquired; ++ nb_acquired = (int *) PR_GetThreadPrivate(thread_private_global_vattr_lock); ++ if (nb_acquired == NULL) { ++ /* if it was not initialized set it to zero */ ++ nb_acquired = (int *) slapi_ch_calloc(1, sizeof(int)); ++ PR_SetThreadPrivate(thread_private_global_vattr_lock, (void *) nb_acquired); ++ } ++ return *nb_acquired; ++} ++static void ++global_vattr_lock_set_acquired_count(int nb_acquired) ++{ ++ int *val; ++ val = (int *) PR_GetThreadPrivate(thread_private_global_vattr_lock); ++ if (val == NULL) { ++ /* if it was not initialized set it to zero */ ++ val = (int *) slapi_ch_calloc(1, sizeof(int)); ++ PR_SetThreadPrivate(thread_private_global_vattr_lock, (void *) val); ++ } ++ *val = nb_acquired; ++} ++/* The map lock can be acquired recursively. So only the first rdlock ++ * will acquire the lock. ++ * A optimization acquires it at high level (op_shared_search), so that ++ * later calls during the operation processing will just increase/decrease a counter. ++ */ ++void ++vattr_rdlock() ++{ ++ int nb_acquire = global_vattr_lock_get_acquired_count(); ++ ++ if (nb_acquire == 0) { ++ /* The lock was not held just acquire it */ ++ slapi_rwlock_rdlock(the_map->lock); ++ } ++ nb_acquire++; ++ global_vattr_lock_set_acquired_count(nb_acquire); ++ ++} ++/* The map lock can be acquired recursively. So only the last unlock ++ * will release the lock. ++ * A optimization acquires it at high level (op_shared_search), so that ++ * later calls during the operation processing will just increase/decrease a counter. ++ */ ++void ++vattr_rd_unlock() ++{ ++ int nb_acquire = global_vattr_lock_get_acquired_count(); + ++ if (nb_acquire >= 1) { ++ nb_acquire--; ++ if (nb_acquire == 0) { ++ slapi_rwlock_unlock(the_map->lock); ++ } ++ global_vattr_lock_set_acquired_count(nb_acquire); ++ } else { ++ /* this is likely the consequence of lock acquire in read during an internal search ++ * but the search callback updated the map and release the readlock and acquired ++ * it in write. ++ * So after the update the lock was no longer held but when completing the internal ++ * search we release the global read lock, that now has nothing to do ++ */ ++ slapi_log_err(SLAPI_LOG_DEBUG, ++ "vattr_rd_unlock", "vattr lock no longer acquired in read.\n"); ++ } ++} ++ ++/* The map lock is acquired in write (updating the map) ++ * It exists a possibility that lock is acquired in write while it is already ++ * hold in read by this thread (internal search with updating callback) ++ * In such situation, the we must abandon the read global lock and acquire in write ++ */ ++void ++vattr_wrlock() ++{ ++ int nb_read_acquire = global_vattr_lock_get_acquired_count(); ++ ++ if (nb_read_acquire) { ++ /* The lock was acquired in read but we need it in write ++ * release it and set the global vattr_lock counter to 0 ++ */ ++ slapi_rwlock_unlock(the_map->lock); ++ global_vattr_lock_set_acquired_count(0); ++ } ++ slapi_rwlock_wrlock(the_map->lock); ++} ++/* The map lock is release from a write write (updating the map) ++ */ ++void ++vattr_wr_unlock() ++{ ++ int nb_read_acquire = global_vattr_lock_get_acquired_count(); ++ ++ if (nb_read_acquire) { ++ /* The lock being acquired in write, the private thread counter ++ * (that count the number of time it was acquired in read) should be 0 ++ */ ++ slapi_log_err(SLAPI_LOG_INFO, ++ "vattr_unlock", "The lock was acquired in write. We should not be here\n"); ++ PR_ASSERT(nb_read_acquire == 0); ++ } ++ slapi_rwlock_unlock(the_map->lock); ++} + /* Called on server shutdown, free all structures, inform service providers that we're going down etc */ + void + vattr_cleanup() +@@ -1811,15 +1950,6 @@ typedef struct _vattr_map_entry vattr_map_entry; + + vattr_map_entry test_entry = {NULL}; + +-struct _vattr_map +-{ +- Slapi_RWLock *lock; +- PLHashTable *hashtable; /* Hash table */ +-}; +-typedef struct _vattr_map vattr_map; +- +-static vattr_map *the_map = NULL; +- + static PRIntn + vattr_hash_compare_keys(const void *v1, const void *v2) + { +@@ -1939,11 +2069,11 @@ vattr_map_lookup(const char *type_to_find, vattr_map_entry **result) + } + + /* Get the reader lock */ +- slapi_rwlock_rdlock(the_map->lock); ++ vattr_rdlock(); + *result = (vattr_map_entry *)PL_HashTableLookupConst(the_map->hashtable, + (void *)basetype); + /* Release ze lock */ +- slapi_rwlock_unlock(the_map->lock); ++ vattr_rd_unlock(); + + if (tmp) { + slapi_ch_free_string(&tmp); +@@ -1962,13 +2092,13 @@ vattr_map_insert(vattr_map_entry *vae) + { + PR_ASSERT(the_map); + /* Get the writer lock */ +- slapi_rwlock_wrlock(the_map->lock); ++ vattr_wrlock(); + /* Insert the thing */ + /* It's illegal to call this function if the entry is already there */ + PR_ASSERT(NULL == PL_HashTableLookupConst(the_map->hashtable, (void *)vae->type_name)); + PL_HashTableAdd(the_map->hashtable, (void *)vae->type_name, (void *)vae); + /* Unlock and we're done */ +- slapi_rwlock_unlock(the_map->lock); ++ vattr_wr_unlock(); + return 0; + } + +@@ -2105,13 +2235,13 @@ schema_changed_callback(Slapi_Entry *e __attribute__((unused)), + void *caller_data __attribute__((unused))) + { + /* Get the writer lock */ +- slapi_rwlock_wrlock(the_map->lock); ++ vattr_wrlock(); + + /* go through the list */ + PL_HashTableEnumerateEntries(the_map->hashtable, vattr_map_entry_rebuild_schema, 0); + + /* Unlock and we're done */ +- slapi_rwlock_unlock(the_map->lock); ++ vattr_wr_unlock(); + } + + +@@ -2131,7 +2261,7 @@ slapi_vattr_schema_check_type(Slapi_Entry *e, char *type) + objAttrValue *obj; + + if (0 == vattr_map_lookup(type, &map_entry)) { +- slapi_rwlock_rdlock(the_map->lock); ++ vattr_rdlock(); + + obj = map_entry->objectclasses; + +@@ -2148,7 +2278,7 @@ slapi_vattr_schema_check_type(Slapi_Entry *e, char *type) + obj = obj->pNext; + } + +- slapi_rwlock_unlock(the_map->lock); ++ vattr_rd_unlock(); + } + + slapi_valueset_free(vs); +-- +2.17.2 + diff --git a/SOURCES/0014-Ticket-49950-PassSync-not-setting-pwdLastSet-attribu.patch b/SOURCES/0014-Ticket-49950-PassSync-not-setting-pwdLastSet-attribu.patch deleted file mode 100644 index 0395971..0000000 --- a/SOURCES/0014-Ticket-49950-PassSync-not-setting-pwdLastSet-attribu.patch +++ /dev/null @@ -1,521 +0,0 @@ -From 0d2456fd9e2678f6db075b224528727b741ff205 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Fri, 14 Sep 2018 11:24:35 -0400 -Subject: [PATCH] Ticket 49950 - PassSync not setting pwdLastSet attribute in - Active Directory after Pw update from LDAP sync for normal user - -Bug Description: - -If a user's password was reset by an "Admin" or directory manager, the -password policy requires a user must change their password after it's -been "reset", and the user then resets their password in DS, this -information was not sent to AD. Then if the user logged in AD after -resetting their password in DS they still get forced to change their -password again in AD. - -Fix Description: - -When sending a password update to AD, and AD is enforcing password must -be reset, check if the user's did reset thier password. If so, set the -correct "pwdLastSet" value to prevent AD from forcing that user to -change their password again. - -But this only works going from DS to AD. The information needed to make -it work from AD -> DS is not available to passSync, and if it was available -it could not be correctly sent to DS anyway (not without a major redesign). - -Side Note: - -Also moved iand consolidated the function "fetch_attr" to util.c. It -was reused and redefined in many plugins. So I added the definition -to slapi-plugin.h and removed the duplicate definitions. - -https://pagure.io/389-ds-base/issue/49950 - -Reviewed by: tbordaz(Thanks!) - -(cherry picked from commit d9437be2e60fdbd6a5f1364f5887e1a3c89cda68) -(cherry picked from commit ac500d378aa22d5e818b110074ac9cd3e421e38d) ---- - ldap/servers/plugins/automember/automember.c | 20 ----- - ldap/servers/plugins/linkedattrs/fixup_task.c | 20 ----- - ldap/servers/plugins/memberof/memberof.c | 17 ---- - .../plugins/posix-winsync/posix-group-task.c | 18 +--- - .../replication/repl5_replica_config.c | 13 --- - .../replication/windows_protocol_util.c | 90 ++++++++++++++----- - .../plugins/schema_reload/schema_reload.c | 17 ---- - ldap/servers/plugins/syntaxes/validate_task.c | 20 ----- - ldap/servers/slapd/slapi-plugin.h | 2 + - ldap/servers/slapd/task.c | 17 ---- - ldap/servers/slapd/test-plugins/sampletask.c | 16 ---- - ldap/servers/slapd/util.c | 17 ++++ - 12 files changed, 89 insertions(+), 178 deletions(-) - -diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c -index c91aa4e8e..d982d49a3 100644 ---- a/ldap/servers/plugins/automember/automember.c -+++ b/ldap/servers/plugins/automember/automember.c -@@ -74,7 +74,6 @@ static void automember_free_regex_rule(struct automemberRegexRule *rule); - static int automember_parse_grouping_attr(char *value, char **grouping_attr, char **grouping_value); - static int automember_update_membership(struct configEntry *config, Slapi_Entry *e, PRFileDesc *ldif_fd); - static int automember_add_member_value(Slapi_Entry *member_e, const char *group_dn, char *grouping_attr, char *grouping_value, PRFileDesc *ldif_fd); --const char *fetch_attr(Slapi_Entry *e, const char *attrname, const char *default_val); - - /* - * task functions -@@ -1927,25 +1926,6 @@ typedef struct _task_data - int scope; - } task_data; - --/* -- * extract a single value from the entry (as a string) -- if it's not in the -- * entry, the default will be returned (which can be NULL). -- * you do not need to free anything returned by this. -- */ --const char * --fetch_attr(Slapi_Entry *e, const char *attrname, const char *default_val) --{ -- Slapi_Value *val = NULL; -- Slapi_Attr *attr; -- -- if (slapi_entry_attr_find(e, attrname, &attr) != 0) { -- return default_val; -- } -- slapi_attr_first_value(attr, &val); -- -- return slapi_value_get_string(val); --} -- - static void - automember_task_destructor(Slapi_Task *task) - { -diff --git a/ldap/servers/plugins/linkedattrs/fixup_task.c b/ldap/servers/plugins/linkedattrs/fixup_task.c -index 900ee1135..4929714b4 100644 ---- a/ldap/servers/plugins/linkedattrs/fixup_task.c -+++ b/ldap/servers/plugins/linkedattrs/fixup_task.c -@@ -22,7 +22,6 @@ static void linked_attrs_fixup_task_thread(void *arg); - static void linked_attrs_fixup_links(struct configEntry *config); - static int linked_attrs_remove_backlinks_callback(Slapi_Entry *e, void *callback_data); - static int linked_attrs_add_backlinks_callback(Slapi_Entry *e, void *callback_data); --static const char *fetch_attr(Slapi_Entry *e, const char *attrname, const char *default_val); - - /* - * Function Implementations -@@ -459,22 +458,3 @@ done: - - return rc; - } -- --/* extract a single value from the entry (as a string) -- if it's not in the -- * entry, the default will be returned (which can be NULL). -- * you do not need to free anything returned by this. -- */ --static const char * --fetch_attr(Slapi_Entry *e, const char *attrname, const char *default_val) --{ -- Slapi_Attr *attr; -- Slapi_Value *val = NULL; -- -- if (slapi_entry_attr_find(e, attrname, &attr) != 0) { -- return default_val; -- } -- -- slapi_attr_first_value(attr, &val); -- -- return slapi_value_get_string(val); --} -diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c -index 87313ff19..26236dc68 100644 ---- a/ldap/servers/plugins/memberof/memberof.c -+++ b/ldap/servers/plugins/memberof/memberof.c -@@ -142,7 +142,6 @@ static int memberof_replace_dn_from_groups(Slapi_PBlock *pb, MemberOfConfig *con - static int memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_op, Slapi_DN *group_sdn, Slapi_DN *op_this_sdn, Slapi_DN *replace_with_sdn, Slapi_DN *op_to_sdn, memberofstringll *stack); - static int memberof_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, int *returncode, char *returntext, void *arg); - static void memberof_task_destructor(Slapi_Task *task); --static const char *fetch_attr(Slapi_Entry *e, const char *attrname, const char *default_val); - static void memberof_fixup_task_thread(void *arg); - static int memberof_fix_memberof(MemberOfConfig *config, Slapi_Task *task, task_data *td); - static int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data); -@@ -2871,22 +2870,6 @@ done: - "memberof_fixup_task_thread - refcount decremented.\n"); - } - --/* extract a single value from the entry (as a string) -- if it's not in the -- * entry, the default will be returned (which can be NULL). -- * you do not need to free anything returned by this. -- */ --const char * --fetch_attr(Slapi_Entry *e, const char *attrname, const char *default_val) --{ -- Slapi_Attr *attr; -- Slapi_Value *val = NULL; -- -- if (slapi_entry_attr_find(e, attrname, &attr) != 0) -- return default_val; -- slapi_attr_first_value(attr, &val); -- return slapi_value_get_string(val); --} -- - int - memberof_task_add(Slapi_PBlock *pb, - Slapi_Entry *e, -diff --git a/ldap/servers/plugins/posix-winsync/posix-group-task.c b/ldap/servers/plugins/posix-winsync/posix-group-task.c -index b4c507595..d8b6addd4 100644 ---- a/ldap/servers/plugins/posix-winsync/posix-group-task.c -+++ b/ldap/servers/plugins/posix-winsync/posix-group-task.c -@@ -42,22 +42,6 @@ posix_group_fixup_task_thread(void *arg); - static int - posix_group_fix_memberuid_callback(Slapi_Entry *e, void *callback_data); - --/* extract a single value from the entry (as a string) -- if it's not in the -- * entry, the default will be returned (which can be NULL). -- * you do not need to free anything returned by this. -- */ --static const char * --fetch_attr(Slapi_Entry *e, const char *attrname, const char *default_val) --{ -- Slapi_Attr *attr; -- Slapi_Value *val = NULL; -- -- if (slapi_entry_attr_find(e, attrname, &attr) != 0) -- return default_val; -- slapi_attr_first_value(attr, &val); -- return slapi_value_get_string(val); --} -- - /* e configEntry */ - int - posix_group_task_add(Slapi_PBlock *pb __attribute__((unused)), -@@ -82,7 +66,7 @@ posix_group_task_add(Slapi_PBlock *pb __attribute__((unused)), - - /* get arg(s) */ - /* default: set replication basedn */ -- if ((dn = fetch_attr(e, "basedn", slapi_sdn_get_dn(posix_winsync_config_get_suffix()))) == NULL) { -+ if ((dn = fetch_attr(e, "basedn", (char *)slapi_sdn_get_dn(posix_winsync_config_get_suffix()))) == NULL) { - *returncode = LDAP_OBJECT_CLASS_VIOLATION; - rv = SLAPI_DSE_CALLBACK_ERROR; - goto out; -diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c -index ea430d9a4..84e02639b 100644 ---- a/ldap/servers/plugins/replication/repl5_replica_config.c -+++ b/ldap/servers/plugins/replication/repl5_replica_config.c -@@ -1353,19 +1353,6 @@ replica_execute_cleanruv_task(Object *r, ReplicaId rid, char *returntext __attri - return LDAP_SUCCESS; - } - --const char * --fetch_attr(Slapi_Entry *e, const char *attrname, const char *default_val) --{ -- Slapi_Attr *attr; -- Slapi_Value *val = NULL; -- -- if (slapi_entry_attr_find(e, attrname, &attr) != 0) -- return default_val; -- -- slapi_attr_first_value(attr, &val); -- return slapi_value_get_string(val); --} -- - static int - replica_cleanall_ruv_task(Slapi_PBlock *pb __attribute__((unused)), - Slapi_Entry *e, -diff --git a/ldap/servers/plugins/replication/windows_protocol_util.c b/ldap/servers/plugins/replication/windows_protocol_util.c -index f350b6d34..f6898d018 100644 ---- a/ldap/servers/plugins/replication/windows_protocol_util.c -+++ b/ldap/servers/plugins/replication/windows_protocol_util.c -@@ -720,39 +720,79 @@ send_password_modify(Slapi_DN *sdn, - } else { - Slapi_Attr *attr = NULL; - int force_reset_pw = 0; -+ int pwd_already_reset = 0; -+ int ds_must_change = config_get_pw_must_change(); -+ - /* -- * If AD entry has password must change flag is set, -- * we keep the flag (pwdLastSet == 0). -- * msdn.microsoft.com: Windows Dev Centor - Desktop -- * To force a user to change their password at next logon, -- * set the pwdLastSet attribute to zero (0). -- */ -+ * If AD entry has password must change flag is set, -+ * we keep the flag (pwdLastSet == 0). -+ * msdn.microsoft.com: Windows Dev Centor - Desktop -+ * To force a user to change their password at next logon, -+ * set the pwdLastSet attribute to zero (0). -+ */ - if (remote_entry && - (0 == slapi_entry_attr_find(remote_entry, "pwdLastSet", &attr)) && -- attr) { -+ attr) -+ { - Slapi_Value *v = NULL; - int i = 0; -+ - for (i = slapi_attr_first_value(attr, &v); - v && (i != -1); -- i = slapi_attr_next_value(attr, i, &v)) { -+ i = slapi_attr_next_value(attr, i, &v)) -+ { - const char *s = slapi_value_get_string(v); - if (NULL == s) { - continue; - } - if (0 == strcmp(s, "0")) { -- slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, -- "%s: AD entry %s set \"user must change password at next logon\". ", -- agmt_get_long_name(prp->agmt), slapi_entry_get_dn(remote_entry)); - force_reset_pw = 1; -+ if (ds_must_change) { -+ /* -+ * DS already enforces "password must be changed after reset". -+ * Do an internal search and check the passwordExpirationtime -+ * to see if is it actually needs to be reset. If it doesn't, -+ * then set pwdLastSet to -1 -+ */ -+ char *expiration_val; -+ int rc = 0; -+ Slapi_DN *local_sdn = NULL; -+ -+ rc = map_entry_dn_inbound(remote_entry, &local_sdn, prp->agmt); -+ if ((0 == rc) && local_sdn) { -+ Slapi_Entry *local_entry = NULL; -+ /* Get the local entry if it exists */ -+ rc = windows_get_local_entry(local_sdn, &local_entry); -+ if ((0 == rc) && local_entry) { -+ expiration_val = (char *)fetch_attr(local_entry, "passwordExpirationtime", NULL); -+ if (expiration_val && parse_genTime(expiration_val) != NO_TIME){ -+ /* The user did reset their password */ -+ slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, -+ "send_password_modify - entry (%s) password was reset by user send that info to AD\n", -+ slapi_sdn_get_dn(local_sdn)); -+ pwd_already_reset = 1; -+ force_reset_pw = 0; -+ } -+ slapi_entry_free(local_entry); -+ } -+ } -+ slapi_sdn_free(&local_sdn); -+ } else { -+ slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, -+ "%s: AD entry %s set \"user must change password at next logon\n", -+ agmt_get_long_name(prp->agmt), slapi_entry_get_dn(remote_entry));; -+ } - } - } - } -- /* We will attempt to bind to AD with the new password first. We do -- * this to avoid playing a password change that originated from AD -- * back to AD. If we just played the password change back, then -- * both sides would be in sync, but AD would contain the new password -- * twice in it's password history, which undermines the password -- * history policies in AD. */ -+ /* -+ * We will attempt to bind to AD with the new password first. We do -+ * this to avoid playing a password change that originated from AD -+ * back to AD. If we just played the password change back, then -+ * both sides would be in sync, but AD would contain the new password -+ * twice in it's password history, which undermines the password -+ * history policies in AD. -+ */ - if (windows_check_user_password(prp->conn, sdn, password)) { - char *quoted_password = NULL; - /* AD wants the password in quotes ! */ -@@ -792,9 +832,18 @@ send_password_modify(Slapi_DN *sdn, - pw_mod.mod_bvalues = bvals; - - pw_mods[0] = &pw_mod; -- if (force_reset_pw) { -- reset_bv.bv_len = 1; -- reset_bv.bv_val = "0"; -+ -+ if (force_reset_pw || pwd_already_reset) { -+ if (force_reset_pw) { -+ reset_bv.bv_val = "0"; -+ reset_bv.bv_len = 1; -+ } else if (pwd_already_reset) { -+ /* Password was reset by the user, there is no -+ * need to make the user change their password -+ * again in AD so set pwdLastSet to -1 */ -+ reset_bv.bv_val = "-1"; -+ reset_bv.bv_len = 2; -+ } - reset_bvals[0] = &reset_bv; - reset_bvals[1] = NULL; - reset_pw_mod.mod_type = "pwdLastSet"; -@@ -807,7 +856,6 @@ send_password_modify(Slapi_DN *sdn, - } - - pw_return = windows_conn_send_modify(prp->conn, slapi_sdn_get_dn(sdn), pw_mods, NULL, NULL); -- - slapi_ch_free((void **)&unicode_password); - } - PR_smprintf_free(quoted_password); -diff --git a/ldap/servers/plugins/schema_reload/schema_reload.c b/ldap/servers/plugins/schema_reload/schema_reload.c -index ee3b00c3c..c2399e5c3 100644 ---- a/ldap/servers/plugins/schema_reload/schema_reload.c -+++ b/ldap/servers/plugins/schema_reload/schema_reload.c -@@ -187,23 +187,6 @@ schemareload_thread(void *arg) - "schemareload_thread <-- refcount decremented.\n"); - } - --/* extract a single value from the entry (as a string) -- if it's not in the -- * entry, the default will be returned (which can be NULL). -- * you do not need to free anything returned by this. -- */ --static const char * --fetch_attr(Slapi_Entry *e, const char *attrname, const char *default_val) --{ -- Slapi_Attr *attr; -- Slapi_Value *val = NULL; -- -- if (slapi_entry_attr_find(e, attrname, &attr) != 0) -- return default_val; -- slapi_attr_first_value(attr, &val); -- -- return slapi_value_get_string(val); --} -- - static void - schemareload_destructor(Slapi_Task *task) - { -diff --git a/ldap/servers/plugins/syntaxes/validate_task.c b/ldap/servers/plugins/syntaxes/validate_task.c -index 2c625ba71..afec9ef7a 100644 ---- a/ldap/servers/plugins/syntaxes/validate_task.c -+++ b/ldap/servers/plugins/syntaxes/validate_task.c -@@ -43,7 +43,6 @@ static int syntax_validate_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entr - static void syntax_validate_task_destructor(Slapi_Task *task); - static void syntax_validate_task_thread(void *arg); - static int syntax_validate_task_callback(Slapi_Entry *e, void *callback_data); --static const char *fetch_attr(Slapi_Entry *e, const char *attrname, const char *default_val); - static void syntax_validate_set_plugin_id(void *plugin_id); - static void *syntax_validate_get_plugin_id(void); - -@@ -258,25 +257,6 @@ bail: - return rc; - } - --/* extract a single value from the entry (as a string) -- if it's not in the -- * entry, the default will be returned (which can be NULL). -- * you do not need to free anything returned by this. -- */ --static const char * --fetch_attr(Slapi_Entry *e, const char *attrname, const char *default_val) --{ -- Slapi_Attr *attr; -- Slapi_Value *val = NULL; -- -- if (slapi_entry_attr_find(e, attrname, &attr) != 0) { -- return default_val; -- } -- -- slapi_attr_first_value(attr, &val); -- -- return slapi_value_get_string(val); --} -- - /* - * Plug-in identity management helper functions - */ -diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h -index 4b75654e7..bdad4e59e 100644 ---- a/ldap/servers/slapd/slapi-plugin.h -+++ b/ldap/servers/slapd/slapi-plugin.h -@@ -8294,6 +8294,8 @@ int32_t slapi_atomic_decr_32(int32_t *ptr, int memorder); - */ - uint64_t slapi_atomic_decr_64(uint64_t *ptr, int memorder); - -+/* helper function */ -+const char * fetch_attr(Slapi_Entry *e, const char *attrname, char *default_val); - - #ifdef __cplusplus - } -diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c -index 3f9d5d995..698ee19b9 100644 ---- a/ldap/servers/slapd/task.c -+++ b/ldap/servers/slapd/task.c -@@ -80,7 +80,6 @@ static void destroy_task(time_t when, void *arg); - static int task_modify(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, int *returncode, char *returntext, void *arg); - static int task_deny(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, int *returncode, char *returntext, void *arg); - static void task_generic_destructor(Slapi_Task *task); --static const char *fetch_attr(Slapi_Entry *e, const char *attrname, const char *default_val); - static Slapi_Entry *get_internal_entry(Slapi_PBlock *pb, char *dn); - static void modify_internal_entry(char *dn, LDAPMod **mods); - static void fixup_tombstone_task_destructor(Slapi_Task *task); -@@ -684,22 +683,6 @@ destroy_task(time_t when, void *arg) - slapi_ch_free((void **)&task); - } - --/* extract a single value from the entry (as a string) -- if it's not in the -- * entry, the default will be returned (which can be NULL). -- * you do not need to free anything returned by this. -- */ --static const char * --fetch_attr(Slapi_Entry *e, const char *attrname, const char *default_val) --{ -- Slapi_Attr *attr; -- Slapi_Value *val = NULL; -- -- if (slapi_entry_attr_find(e, attrname, &attr) != 0) -- return default_val; -- slapi_attr_first_value(attr, &val); -- return slapi_value_get_string(val); --} -- - /* supply the pblock, destroy it when you're done */ - static Slapi_Entry * - get_internal_entry(Slapi_PBlock *pb, char *dn) -diff --git a/ldap/servers/slapd/test-plugins/sampletask.c b/ldap/servers/slapd/test-plugins/sampletask.c -index d04f21b3d..22d43dd48 100644 ---- a/ldap/servers/slapd/test-plugins/sampletask.c -+++ b/ldap/servers/slapd/test-plugins/sampletask.c -@@ -116,22 +116,6 @@ task_sampletask_thread(void *arg) - slapi_task_finish(task, rv); - } - --/* extract a single value from the entry (as a string) -- if it's not in the -- * entry, the default will be returned (which can be NULL). -- * you do not need to free anything returned by this. -- */ --static const char * --fetch_attr(Slapi_Entry *e, const char *attrname, const char *default_val) --{ -- Slapi_Attr *attr; -- Slapi_Value *val = NULL; -- -- if (slapi_entry_attr_find(e, attrname, &attr) != 0) -- return default_val; -- slapi_attr_first_value(attr, &val); -- return slapi_value_get_string(val); --} -- - static void - task_sampletask_destructor(Slapi_Task *task) - { -diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c -index cb46efb3d..8563c5d27 100644 ---- a/ldap/servers/slapd/util.c -+++ b/ldap/servers/slapd/util.c -@@ -1579,3 +1579,20 @@ slapi_create_errormsg( - va_end(ap); - } - } -+ -+/* -+ * Extract a single value from an entry (as a string) -- if it's not in the -+ * entry, the default will be returned (which can be NULL). You do not need -+ * to free the returned string value. -+ */ -+const char * -+fetch_attr(Slapi_Entry *e, const char *attrname, char *default_val) -+{ -+ Slapi_Attr *attr; -+ Slapi_Value *val = NULL; -+ -+ if (slapi_entry_attr_find(e, attrname, &attr) != 0) -+ return default_val; -+ slapi_attr_first_value(attr, &val); -+ return slapi_value_get_string(val); -+} --- -2.17.2 - diff --git a/SOURCES/0015-Ticket-49915-fix-compiler-warnings.patch b/SOURCES/0015-Ticket-49915-fix-compiler-warnings.patch deleted file mode 100644 index 4357858..0000000 --- a/SOURCES/0015-Ticket-49915-fix-compiler-warnings.patch +++ /dev/null @@ -1,57 +0,0 @@ -From b1cc97c13bbbcaa6a3c217d285283203809fa90b Mon Sep 17 00:00:00 2001 -From: Thierry Bordaz -Date: Tue, 16 Oct 2018 10:49:29 +0200 -Subject: [PATCH] Ticket 49915 - fix compiler warnings - ---- - ldap/servers/slapd/back-ldbm/idl_new.c | 2 +- - ldap/servers/slapd/back-ldbm/index.c | 6 +++--- - 2 files changed, 4 insertions(+), 4 deletions(-) - -diff --git a/ldap/servers/slapd/back-ldbm/idl_new.c b/ldap/servers/slapd/back-ldbm/idl_new.c -index 102265c47..81172f590 100644 ---- a/ldap/servers/slapd/back-ldbm/idl_new.c -+++ b/ldap/servers/slapd/back-ldbm/idl_new.c -@@ -379,7 +379,7 @@ idl_new_range_fetch( - bck_info.index = SLAPI_ATTR_PARENTID; - bck_info.key = "0"; - -- if (rc = slapi_back_get_info(be, BACK_INFO_INDEX_KEY, (void **)&bck_info)) { -+ if ((rc = slapi_back_get_info(be, BACK_INFO_INDEX_KEY, (void **)&bck_info))) { - slapi_log_err(SLAPI_LOG_WARNING, "idl_new_range_fetch", "Total update: fail to retrieve suffix entryID, continue assuming it is the first entry\n"); - } - if (bck_info.key_found) { -diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c -index dea6e9a3e..f0b969ff4 100644 ---- a/ldap/servers/slapd/back-ldbm/index.c -+++ b/ldap/servers/slapd/back-ldbm/index.c -@@ -1262,7 +1262,7 @@ set_suffix_key(Slapi_Backend *be, struct _back_info_index_key *info) - /* Start a txn */ - li = (struct ldbminfo *)be->be_database->plg_private; - dblayer_txn_init(li, &txn); -- if (rc = dblayer_txn_begin(be, txn.back_txn_txn, &txn)) { -+ if ((rc = dblayer_txn_begin(be, txn.back_txn_txn, &txn))) { - slapi_log_err(SLAPI_LOG_ERR, "set_suffix_key", "Fail to update %s index with %s/%d (key/ID): txn begin fails\n", - info->index, info->key, info->id); - return rc; -@@ -1272,7 +1272,7 @@ set_suffix_key(Slapi_Backend *be, struct _back_info_index_key *info) - sv_key[1] = NULL; - slapi_value_init_string(sv_key[0], info->key); - -- if (rc = index_addordel_values_sv(be, info->index, sv_key, NULL, info->id, BE_INDEX_ADD, &txn)) { -+ if ((rc = index_addordel_values_sv(be, info->index, sv_key, NULL, info->id, BE_INDEX_ADD, &txn))) { - value_done(sv_key[0]); - dblayer_txn_abort(be, &txn); - slapi_log_err(SLAPI_LOG_ERR, "set_suffix_key", "Fail to update %s index with %s/%d (key/ID): index_addordel_values_sv fails\n", -@@ -1281,7 +1281,7 @@ set_suffix_key(Slapi_Backend *be, struct _back_info_index_key *info) - } - - value_done(sv_key[0]); -- if (rc = dblayer_txn_commit(be, &txn)) { -+ if ((rc = dblayer_txn_commit(be, &txn))) { - slapi_log_err(SLAPI_LOG_ERR, "set_suffix_key", "Fail to update %s index with %s/%d (key/ID): commit fails\n", - info->index, info->key, info->id); - return rc; --- -2.17.2 - diff --git a/SOURCES/0015-Ticket-49958-extended-search-fail-to-match-entries.patch b/SOURCES/0015-Ticket-49958-extended-search-fail-to-match-entries.patch new file mode 100644 index 0000000..dcd583f --- /dev/null +++ b/SOURCES/0015-Ticket-49958-extended-search-fail-to-match-entries.patch @@ -0,0 +1,261 @@ +From cfadaa740b34f1bc1b14db253d7a7cb25f7125d8 Mon Sep 17 00:00:00 2001 +From: Thierry Bordaz +Date: Mon, 24 Sep 2018 14:14:16 +0200 +Subject: [PATCH] Ticket 49958: extended search fail to match entries + +Bug Description: + During an extended search, a structure is created for each filter component. + The structure contains the keys generated from the assertion and using the given + matching rule indexer. + Later the keys will be compared (with the MR) with keys generated from the + attribute values of the candidate entries. + The bug is that parsing the assertion, instead of removing the heading spaces + the routine clear the assertion that is empty. So the generated keys is NULL. + +Fix Description: + The fix consists to only remove heading spaces + +https://pagure.io/389-ds-base/issue/49958 + +Reviewed by: Mark Reynolds + +Platforms tested: F27 + +Flag Day: no + +Doc impact: no +--- + .../tests/suites/filter/filter_test.py | 200 ++++++++++++++++++ + ldap/servers/plugins/collation/orfilter.c | 4 +- + 2 files changed, 201 insertions(+), 3 deletions(-) + +diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py +index 280db68a3..4d2dd3055 100644 +--- a/dirsrvtests/tests/suites/filter/filter_test.py ++++ b/dirsrvtests/tests/suites/filter/filter_test.py +@@ -82,6 +82,206 @@ def test_filter_search_original_attrs(topology_st): + + log.info('test_filter_search_original_attrs: PASSED') + ++@pytest.mark.bz1511462 ++def test_filter_scope_one(topology_st): ++ """Test ldapsearch with scope one gives only single entry ++ ++ :id: cf5a6078-bbe6-4d43-ac71-553c45923f91 ++ :setup: Standalone instance ++ :steps: ++ 1. Search cn=Directory Administrators,dc=example,dc=com using ldapsearch with ++ scope one using base as dc=example,dc=com ++ 2. Check that search should return only one entry ++ :expectedresults: ++ 1. This should pass ++ 2. This should pass ++ """ ++ ++ parent_dn="dn: dc=example,dc=com" ++ child_dn="dn: cn=Directory Administrators,dc=example,dc=com" ++ ++ log.info('Search user using ldapsearch with scope one') ++ results = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_ONELEVEL,'cn=Directory Administrators',['cn'] ) ++ log.info(results) ++ ++ log.info('Search should only have one entry') ++ assert len(results) == 1 ++ ++@pytest.mark.ds47313 ++def test_filter_with_attribute_subtype(topology_st): ++ """Adds 2 test entries and Search with ++ filters including subtype and ! ++ ++ :id: 0e69f5f2-6a0a-480e-8282-fbcc50231908 ++ :setup: Standalone instance ++ :steps: ++ 1. Add 2 entries and create 3 filters ++ 2. Search for entry with filter: (&(cn=test_entry en only)(!(cn=test_entry fr))) ++ 3. Search for entry with filter: (&(cn=test_entry en only)(!(cn;fr=test_entry fr))) ++ 4. Search for entry with filter: (&(cn=test_entry en only)(!(cn;en=test_entry en))) ++ 5. Delete the added entries ++ :expectedresults: ++ 1. Operation should be successful ++ 2. Search should be successful ++ 3. Search should be successful ++ 4. Search should not be successful ++ 5. Delete the added entries ++ """ ++ ++ # bind as directory manager ++ topology_st.standalone.log.info("Bind as %s" % DN_DM) ++ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) ++ ++ # enable filter error logging ++ # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '32')] ++ # topology_st.standalone.modify_s(DN_CONFIG, mod) ++ ++ topology_st.standalone.log.info("\n\n######################### ADD ######################\n") ++ ++ # Prepare the entry with cn;fr & cn;en ++ entry_name_fr = '%s fr' % (ENTRY_NAME) ++ entry_name_en = '%s en' % (ENTRY_NAME) ++ entry_name_both = '%s both' % (ENTRY_NAME) ++ entry_dn_both = 'cn=%s, %s' % (entry_name_both, SUFFIX) ++ entry_both = Entry(entry_dn_both) ++ entry_both.setValues('objectclass', 'top', 'person') ++ entry_both.setValues('sn', entry_name_both) ++ entry_both.setValues('cn', entry_name_both) ++ entry_both.setValues('cn;fr', entry_name_fr) ++ entry_both.setValues('cn;en', entry_name_en) ++ ++ # Prepare the entry with one member ++ entry_name_en_only = '%s en only' % (ENTRY_NAME) ++ entry_dn_en_only = 'cn=%s, %s' % (entry_name_en_only, SUFFIX) ++ entry_en_only = Entry(entry_dn_en_only) ++ entry_en_only.setValues('objectclass', 'top', 'person') ++ entry_en_only.setValues('sn', entry_name_en_only) ++ entry_en_only.setValues('cn', entry_name_en_only) ++ entry_en_only.setValues('cn;en', entry_name_en) ++ ++ topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_dn_both, entry_both)) ++ topology_st.standalone.add_s(entry_both) ++ ++ topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_dn_en_only, entry_en_only)) ++ topology_st.standalone.add_s(entry_en_only) ++ ++ topology_st.standalone.log.info("\n\n######################### SEARCH ######################\n") ++ ++ # filter: (&(cn=test_entry en only)(!(cn=test_entry fr))) ++ myfilter = '(&(sn=%s)(!(cn=%s)))' % (entry_name_en_only, entry_name_fr) ++ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ++ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) ++ assert len(ents) == 1 ++ assert ensure_str(ents[0].sn) == entry_name_en_only ++ topology_st.standalone.log.info("Found %s" % ents[0].dn) ++ ++ # filter: (&(cn=test_entry en only)(!(cn;fr=test_entry fr))) ++ myfilter = '(&(sn=%s)(!(cn;fr=%s)))' % (entry_name_en_only, entry_name_fr) ++ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ++ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) ++ assert len(ents) == 1 ++ assert ensure_str(ents[0].sn) == entry_name_en_only ++ topology_st.standalone.log.info("Found %s" % ents[0].dn) ++ ++ # filter: (&(cn=test_entry en only)(!(cn;en=test_entry en))) ++ myfilter = '(&(sn=%s)(!(cn;en=%s)))' % (entry_name_en_only, entry_name_en) ++ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ++ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) ++ assert len(ents) == 0 ++ topology_st.standalone.log.info("Found none") ++ ++ topology_st.standalone.log.info("\n\n######################### DELETE ######################\n") ++ ++ topology_st.standalone.log.info("Try to delete %s " % entry_dn_both) ++ topology_st.standalone.delete_s(entry_dn_both) ++ ++ topology_st.standalone.log.info("Try to delete %s " % entry_dn_en_only) ++ topology_st.standalone.delete_s(entry_dn_en_only) ++ ++ log.info('Testcase PASSED') ++ ++@pytest.mark.bz1615155 ++def test_extended_search(topology_st): ++ """Test we can search with equality extended matching rule ++ ++ :id: ++ :setup: Standalone instance ++ :steps: ++ 1. Add a test user with 'sn: ext-test-entry' ++ 2. Search '(cn:de:=ext-test-entry)' ++ 3. Search '(sn:caseIgnoreIA5Match:=EXT-TEST-ENTRY)' ++ 4. Search '(sn:caseIgnoreMatch:=EXT-TEST-ENTRY)' ++ 5. Search '(sn:caseExactMatch:=EXT-TEST-ENTRY)' ++ 6. Search '(sn:caseExactMatch:=ext-test-entry)' ++ 7. Search '(sn:caseExactIA5Match:=EXT-TEST-ENTRY)' ++ 8. Search '(sn:caseExactIA5Match:=ext-test-entry)' ++ :expectedresults: ++ 1. This should pass ++ 2. This should return one entry ++ 3. This should return one entry ++ 4. This should return one entry ++ 5. This should return NO entry ++ 6. This should return one entry ++ 7. This should return NO entry ++ 8. This should return one entry ++ 3. return one entry ++ """ ++ log.info('Running test_filter_escaped...') ++ ++ ATTR_VAL = 'ext-test-entry' ++ USER1_DN = "uid=%s,%s" % (ATTR_VAL, DEFAULT_SUFFIX) ++ ++ try: ++ topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), ++ 'sn': ATTR_VAL.encode(), ++ 'cn': ATTR_VAL.encode(), ++ 'uid': ATTR_VAL.encode()}))) ++ except ldap.LDAPError as e: ++ log.fatal('test_extended_search: Failed to add test user ' + USER1_DN + ': error ' + ++ e.message['desc']) ++ assert False ++ ++ # filter: '(cn:de:=ext-test-entry)' ++ myfilter = '(cn:de:=%s)' % ATTR_VAL ++ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ++ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) ++ assert len(ents) == 1 ++ ++ # filter: '(sn:caseIgnoreIA5Match:=EXT-TEST-ENTRY)' ++ myfilter = '(cn:caseIgnoreIA5Match:=%s)' % ATTR_VAL.upper() ++ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ++ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) ++ assert len(ents) == 1 ++ ++ # filter: '(sn:caseIgnoreMatch:=EXT-TEST-ENTRY)' ++ myfilter = '(cn:caseIgnoreMatch:=%s)' % ATTR_VAL.upper() ++ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ++ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) ++ assert len(ents) == 1 ++ ++ # filter: '(sn:caseExactMatch:=EXT-TEST-ENTRY)' ++ myfilter = '(cn:caseExactMatch:=%s)' % ATTR_VAL.upper() ++ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ++ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) ++ assert len(ents) == 0 ++ ++ # filter: '(sn:caseExactMatch:=ext-test-entry)' ++ myfilter = '(cn:caseExactMatch:=%s)' % ATTR_VAL ++ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ++ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) ++ assert len(ents) == 1 ++ ++ # filter: '(sn:caseExactIA5Match:=EXT-TEST-ENTRY)' ++ myfilter = '(cn:caseExactIA5Match:=%s)' % ATTR_VAL.upper() ++ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ++ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) ++ assert len(ents) == 0 ++ ++ # filter: '(sn:caseExactIA5Match:=ext-test-entry)' ++ myfilter = '(cn:caseExactIA5Match:=%s)' % ATTR_VAL ++ topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ++ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) ++ assert len(ents) == 1 + + if __name__ == '__main__': + # Run isolated +diff --git a/ldap/servers/plugins/collation/orfilter.c b/ldap/servers/plugins/collation/orfilter.c +index 7705de9d6..c092d77ca 100644 +--- a/ldap/servers/plugins/collation/orfilter.c ++++ b/ldap/servers/plugins/collation/orfilter.c +@@ -531,10 +531,8 @@ or_filter_create(Slapi_PBlock *pb) + default: + break; + } +- for (; len > 0 && *val != ' '; ++val, --len) ++ for (; len > 0 && *val == ' '; ++val, --len) + ; +- if (len > 0) +- ++val, --len; /* skip the space */ + bv.bv_len = len; + bv.bv_val = (len > 0) ? val : NULL; + } else { /* mrOID does not identify an ordering rule. */ +-- +2.17.2 + diff --git a/SOURCES/0016-Ticket-49915-fix-compiler-warnings-2nd.patch b/SOURCES/0016-Ticket-49915-fix-compiler-warnings-2nd.patch deleted file mode 100644 index ce463a8..0000000 --- a/SOURCES/0016-Ticket-49915-fix-compiler-warnings-2nd.patch +++ /dev/null @@ -1,25 +0,0 @@ -From 309b90041dcaa2ddc1931dddea0827cef0cbb9bf Mon Sep 17 00:00:00 2001 -From: Thierry Bordaz -Date: Tue, 16 Oct 2018 15:06:38 +0200 -Subject: [PATCH] Ticket 49915 - fix compiler warnings (2nd) - ---- - ldap/servers/slapd/back-ldbm/proto-back-ldbm.h | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h -index a3241b078..61c3313c5 100644 ---- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h -+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h -@@ -305,6 +305,8 @@ int index_buffer_init(size_t size, int flags, void **h); - int index_buffer_flush(void *h, backend *be, DB_TXN *txn, struct attrinfo *a); - int index_buffer_terminate(void *h); - -+int get_suffix_key(Slapi_Backend *be, struct _back_info_index_key *info); -+int set_suffix_key(Slapi_Backend *be, struct _back_info_index_key *info); - char *index_index2prefix(const char *indextype); - void index_free_prefix(char *); - --- -2.17.2 - diff --git a/SOURCES/0016-Ticket-50028-ds-replcheck-y-option-throws-usage-erro.patch b/SOURCES/0016-Ticket-50028-ds-replcheck-y-option-throws-usage-erro.patch new file mode 100644 index 0000000..fa48834 --- /dev/null +++ b/SOURCES/0016-Ticket-50028-ds-replcheck-y-option-throws-usage-erro.patch @@ -0,0 +1,32 @@ +From 24ffb21ba81fcd1272aa3232c5f17cee6c7568ef Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 4 Apr 2019 12:43:13 -0400 +Subject: [PATCH] Ticket 50028 - ds-replcheck -y option throws usage error + +Description: Using the password file option (-y) causes + a usage error to occur. The arg validation + needs to properly check for this option. + +https://pagure.io/389-ds-base/issue/50028 + +Reviewed by: mreynolds(one line commit rule) +--- + ldap/admin/src/scripts/ds-replcheck | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck +index f48240699..e18465dc0 100755 +--- a/ldap/admin/src/scripts/ds-replcheck ++++ b/ldap/admin/src/scripts/ds-replcheck +@@ -1209,7 +1209,7 @@ def main(): + elif (args.mldif is None and + (args.suffix is None or + args.binddn is None or +- (args.bindpw is None and args.prompt is False) or ++ (args.bindpw is None and (args.prompt is False and args.pass_file is None)) or + args.murl is None or + args.rurl is None)): + print("\n-------> Missing required options for online mode!\n") +-- +2.17.2 + diff --git a/SOURCES/0017-Ticket-49618-Increase-cachememsize-and-dncachememsize.patch b/SOURCES/0017-Ticket-49618-Increase-cachememsize-and-dncachememsize.patch deleted file mode 100644 index 7b4ae87..0000000 --- a/SOURCES/0017-Ticket-49618-Increase-cachememsize-and-dncachememsize.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 60c0e7dfb30fbd8100d45e79d11141956f51656f Mon Sep 17 00:00:00 2001 -From: Simon Pichugin -Date: Fri, 14 Dec 2018 17:34:34 +0100 -Subject: [PATCH] Issue 49618 - Increase cachememsize and dncachememsize - variable sizes - -Description: nssapld-cachememsize is reverted to a smaller value -if the cap is reached. Increase it to UINT64. - -https://pagure.io/389-ds-base/issue/49618 - -Reviewed by: tbordaz, mreynolds (Thanks!) ---- - ldap/servers/slapd/back-ldbm/ldbm_config.c | 2 +- - ldap/servers/slapd/back-ldbm/ldbm_instance_config.c | 4 ++-- - 2 files changed, 3 insertions(+), 3 deletions(-) - -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c -index 4e1a94341..144c5efc5 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_config.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c -@@ -2118,7 +2118,7 @@ ldbm_config_get(void *arg, config_info *config, char *buf) - break; - case CONFIG_TYPE_SIZE_T: - val = (size_t)config->config_get_fn(arg); -- sprintf(buf, "%lu", (long unsigned int)val); -+ sprintf(buf, "%" PRIu32, (uint32_t)val); - break; - case CONFIG_TYPE_UINT64: - val = (uint64_t)((uintptr_t)config->config_get_fn(arg)); -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c -index 643628c85..5eac1c1df 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c -@@ -366,11 +366,11 @@ ldbm_instance_config_require_index_set(void *arg, - *----------------------------------------------------------------------*/ - static config_info ldbm_instance_config[] = { - {CONFIG_INSTANCE_CACHESIZE, CONFIG_TYPE_LONG, "-1", &ldbm_instance_config_cachesize_get, &ldbm_instance_config_cachesize_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, -- {CONFIG_INSTANCE_CACHEMEMSIZE, CONFIG_TYPE_SIZE_T, DEFAULT_CACHE_SIZE_STR, &ldbm_instance_config_cachememsize_get, &ldbm_instance_config_cachememsize_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, -+ {CONFIG_INSTANCE_CACHEMEMSIZE, CONFIG_TYPE_UINT64, DEFAULT_CACHE_SIZE_STR, &ldbm_instance_config_cachememsize_get, &ldbm_instance_config_cachememsize_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, - {CONFIG_INSTANCE_READONLY, CONFIG_TYPE_ONOFF, "off", &ldbm_instance_config_readonly_get, &ldbm_instance_config_readonly_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, - {CONFIG_INSTANCE_REQUIRE_INDEX, CONFIG_TYPE_ONOFF, "off", &ldbm_instance_config_require_index_get, &ldbm_instance_config_require_index_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, - {CONFIG_INSTANCE_DIR, CONFIG_TYPE_STRING, NULL, &ldbm_instance_config_instance_dir_get, &ldbm_instance_config_instance_dir_set, CONFIG_FLAG_ALWAYS_SHOW}, -- {CONFIG_INSTANCE_DNCACHEMEMSIZE, CONFIG_TYPE_SIZE_T, DEFAULT_DNCACHE_SIZE_STR, &ldbm_instance_config_dncachememsize_get, &ldbm_instance_config_dncachememsize_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, -+ {CONFIG_INSTANCE_DNCACHEMEMSIZE, CONFIG_TYPE_UINT64, DEFAULT_DNCACHE_SIZE_STR, &ldbm_instance_config_dncachememsize_get, &ldbm_instance_config_dncachememsize_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, - {NULL, 0, NULL, NULL, NULL, 0}}; - - void --- -2.17.2 - diff --git a/SOURCES/0018-Ticket-49990-Increase-the-default-FD-limits.patch b/SOURCES/0018-Ticket-49990-Increase-the-default-FD-limits.patch new file mode 100644 index 0000000..38643c8 --- /dev/null +++ b/SOURCES/0018-Ticket-49990-Increase-the-default-FD-limits.patch @@ -0,0 +1,270 @@ +From a825c288665556013a51a7efba2e07bc16ee4ee8 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Fri, 5 Apr 2019 09:16:02 -0400 +Subject: [PATCH] Ticket 49990 - Increase the default FD limits + +Description: As discussed in the ticket, this fix sets the maxdescriptors + to the maximum allowed by the OS/systemd. If this limit can + not be obtained then we fall back to 8192 as the limit + +https://pagure.io/389-ds-base/issue/49990 + +Reviewed by: tbordaz & firstyear(Thanks!!) + +(cherry picked from commit 8ca142034a051122b78bdaa3a948d3c50d4cca7e) +(cherry picked from commit 2c583a97cffa54a7da9922215ae37156174a37c5) +--- + .../suites/resource_limits/fdlimits_test.py | 63 +++++++++++++++++++ + ldap/servers/slapd/libglobs.c | 26 +++++--- + ldap/servers/slapd/main.c | 5 +- + ldap/servers/slapd/proto-slap.h | 4 +- + ldap/servers/slapd/slap.h | 6 +- + wrappers/systemd.template.service.in | 1 - + wrappers/systemd.template.sysconfig | 3 +- + 7 files changed, 90 insertions(+), 18 deletions(-) + create mode 100644 dirsrvtests/tests/suites/resource_limits/fdlimits_test.py + +diff --git a/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py b/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py +new file mode 100644 +index 000000000..e5b14a747 +--- /dev/null ++++ b/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py +@@ -0,0 +1,63 @@ ++import logging ++import pytest ++import os ++import ldap ++from lib389._constants import * ++from lib389.topologies import topology_st ++ ++logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ ++FD_ATTR = "nsslapd-maxdescriptors" ++SYSTEMD_VAL = "16384" ++CUSTOM_VAL = "9000" ++TOO_HIGH_VAL = "65536" ++TOO_LOW_VAL = "0" ++ ++ ++def test_fd_limits(topology_st): ++ """Test the default limits, and custom limits ++ ++ :id: fa0a5106-612f-428f-84c0-9c85c34d0433 ++ :setup: Standalone Instance ++ :steps: ++ 1. Check default limit ++ 2. Change default limit ++ 3. Check invalid/too high limit is rejected ++ 4. Check invalid/too low limit is rejected ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4 Success ++ """ ++ ++ # Check systemd default ++ max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) ++ assert max_fd == SYSTEMD_VAL ++ ++ # Check custom value is applied ++ topology_st.standalone.config.set(FD_ATTR, CUSTOM_VAL) ++ max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) ++ assert max_fd == CUSTOM_VAL ++ ++ # Attempt to use val that is too high ++ with pytest.raises(ldap.UNWILLING_TO_PERFORM): ++ topology_st.standalone.config.set(FD_ATTR, TOO_HIGH_VAL) ++ max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) ++ assert max_fd == CUSTOM_VAL ++ ++ # Attempt to use val that is too low ++ with pytest.raises(ldap.OPERATIONS_ERROR): ++ topology_st.standalone.config.set(FD_ATTR, TOO_LOW_VAL) ++ max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) ++ assert max_fd == CUSTOM_VAL ++ ++ log.info("Test PASSED") ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main(["-s", CURRENT_FILE]) +diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c +index 59f8d06d5..91c3a4a89 100644 +--- a/ldap/servers/slapd/libglobs.c ++++ b/ldap/servers/slapd/libglobs.c +@@ -131,6 +131,7 @@ + #if defined(LINUX) + #include + #endif ++#include + + #define REMOVE_CHANGELOG_CMD "remove" + +@@ -1465,6 +1466,8 @@ void + FrontendConfig_init(void) + { + slapdFrontendConfig_t *cfg = getFrontendConfig(); ++ struct rlimit rlp; ++ int64_t maxdescriptors = SLAPD_DEFAULT_MAXDESCRIPTORS; + + #if SLAPI_CFG_USE_RWLOCK == 1 + /* initialize the read/write configuration lock */ +@@ -1480,6 +1483,11 @@ FrontendConfig_init(void) + exit(-1); + } + #endif ++ /* Default the maximum fd's to the maximum allowed */ ++ if (getrlimit(RLIMIT_NOFILE, &rlp) == 0) { ++ maxdescriptors = (int64_t)rlp.rlim_max; ++ } ++ + /* Take the lock to make sure we barrier correctly. */ + CFG_LOCK_WRITE(cfg); + +@@ -1514,7 +1522,7 @@ FrontendConfig_init(void) + /* minssf is applied to rootdse, by default */ + init_minssf_exclude_rootdse = cfg->minssf_exclude_rootdse = LDAP_OFF; + cfg->validate_cert = SLAPD_DEFAULT_VALIDATE_CERT; +- cfg->maxdescriptors = SLAPD_DEFAULT_MAXDESCRIPTORS; ++ cfg->maxdescriptors = maxdescriptors; + cfg->groupevalnestlevel = SLAPD_DEFAULT_GROUPEVALNESTLEVEL; + cfg->snmp_index = SLAPD_DEFAULT_SNMP_INDEX; + cfg->SSLclientAuth = SLAPD_DEFAULT_SSLCLIENTAUTH; +@@ -1665,8 +1673,7 @@ FrontendConfig_init(void) + init_ndn_cache_enabled = cfg->ndn_cache_enabled = LDAP_ON; + cfg->ndn_cache_max_size = SLAPD_DEFAULT_NDN_SIZE; + init_sasl_mapping_fallback = cfg->sasl_mapping_fallback = LDAP_OFF; +- init_ignore_vattrs = +- cfg->ignore_vattrs = LDAP_OFF; ++ init_ignore_vattrs = cfg->ignore_vattrs = LDAP_OFF; + cfg->sasl_max_bufsize = SLAPD_DEFAULT_SASL_MAXBUFSIZE; + cfg->unhashed_pw_switch = SLAPD_DEFAULT_UNHASHED_PW_SWITCH; + init_return_orig_type = cfg->return_orig_type = LDAP_OFF; +@@ -4011,13 +4018,12 @@ config_set_maxthreadsperconn(const char *attrname, char *value, char *errorbuf, + return retVal; + } + +-#include +-int ++int32_t + config_set_maxdescriptors(const char *attrname, char *value, char *errorbuf, int apply) + { +- int retVal = LDAP_SUCCESS; +- long nValue = 0; +- int maxVal = 65535; ++ int32_t retVal = LDAP_SUCCESS; ++ int64_t nValue = 0; ++ int64_t maxVal = 65535; + struct rlimit rlp; + char *endp = NULL; + +@@ -5493,11 +5499,11 @@ config_get_maxthreadsperconn() + return slapi_atomic_load_32(&(slapdFrontendConfig->maxthreadsperconn), __ATOMIC_ACQUIRE); + } + +-int ++int64_t + config_get_maxdescriptors(void) + { + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); +- int retVal; ++ int64_t retVal; + + CFG_LOCK_READ(slapdFrontendConfig); + retVal = slapdFrontendConfig->maxdescriptors; +diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c +index 219c91294..5e24b3b5f 100644 +--- a/ldap/servers/slapd/main.c ++++ b/ldap/servers/slapd/main.c +@@ -1074,7 +1074,10 @@ main(int argc, char **argv) + slapi_ch_free((void **)&versionstring); + } + +- /* -sduloutre: compute_init() and entry_computed_attr_init() moved up */ ++ /* log the max fd limit as it is typically set in env/systemd */ ++ slapi_log_err(SLAPI_LOG_INFO, "main", ++ "Setting the maximum file descriptor limit to: %ld\n", ++ config_get_maxdescriptors()); + + if (mcfg.slapd_exemode != SLAPD_EXEMODE_REFERRAL) { + int rc; +diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h +index 79017e68d..a0648ca3c 100644 +--- a/ldap/servers/slapd/proto-slap.h ++++ b/ldap/servers/slapd/proto-slap.h +@@ -383,7 +383,7 @@ int config_set_malloc_mxfast(const char *attrname, char *value, char *errorbuf, + int config_set_malloc_trim_threshold(const char *attrname, char *value, char *errorbuf, int apply); + int config_set_malloc_mmap_threshold(const char *attrname, char *value, char *errorbuf, int apply); + #endif +-int config_set_maxdescriptors(const char *attrname, char *value, char *errorbuf, int apply); ++int32_t config_set_maxdescriptors(const char *attrname, char *value, char *errorbuf, int apply); + int config_set_localuser(const char *attrname, char *value, char *errorbuf, int apply); + + int config_set_maxsimplepaged_per_conn(const char *attrname, char *value, char *errorbuf, int apply); +@@ -465,7 +465,7 @@ char *config_get_workingdir(void); + char *config_get_encryptionalias(void); + int32_t config_get_threadnumber(void); + int config_get_maxthreadsperconn(void); +-int config_get_maxdescriptors(void); ++int64_t config_get_maxdescriptors(void); + int config_get_reservedescriptors(void); + int config_get_ioblocktimeout(void); + int config_get_idletimeout(void); +diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h +index 618e245b6..bce720974 100644 +--- a/ldap/servers/slapd/slap.h ++++ b/ldap/servers/slapd/slap.h +@@ -351,8 +351,8 @@ typedef void (*VFPV)(); /* takes undefined arguments */ + + #define SLAPD_DEFAULT_PAGEDSIZELIMIT 0 + #define SLAPD_DEFAULT_PAGEDSIZELIMIT_STR "0" +-#define SLAPD_DEFAULT_MAXDESCRIPTORS 1024 +-#define SLAPD_DEFAULT_MAXDESCRIPTORS_STR "1024" ++#define SLAPD_DEFAULT_MAXDESCRIPTORS 8192 ++#define SLAPD_DEFAULT_MAXDESCRIPTORS_STR "8192" + #define SLAPD_DEFAULT_MAX_FILTER_NEST_LEVEL 40 + #define SLAPD_DEFAULT_MAX_FILTER_NEST_LEVEL_STR "40" + #define SLAPD_DEFAULT_GROUPEVALNESTLEVEL 0 +@@ -2254,7 +2254,7 @@ typedef struct _slapdFrontendConfig + int idletimeout; + slapi_int_t ioblocktimeout; + slapi_onoff_t lastmod; +- int maxdescriptors; ++ int64_t maxdescriptors; + int conntablesize; + slapi_int_t maxthreadsperconn; + int outbound_ldap_io_timeout; +diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in +index 0d88900b6..4c1b13d98 100644 +--- a/wrappers/systemd.template.service.in ++++ b/wrappers/systemd.template.service.in +@@ -28,7 +28,6 @@ EnvironmentFile=@initconfigdir@/@package_name@-%i + PIDFile=@localstatedir@/run/@package_name@/slapd-%i.pid + ExecStartPre=@sbindir@/ds_systemd_ask_password_acl @instconfigdir@/slapd-%i/dse.ldif + ExecStart=@sbindir@/ns-slapd -D @instconfigdir@/slapd-%i -i @localstatedir@/run/@package_name@/slapd-%i.pid +- + # Hardening options: + # PrivateDevices=true + # ProtectSystem=true +diff --git a/wrappers/systemd.template.sysconfig b/wrappers/systemd.template.sysconfig +index 903876b17..76c004d40 100644 +--- a/wrappers/systemd.template.sysconfig ++++ b/wrappers/systemd.template.sysconfig +@@ -7,7 +7,8 @@ + + # This controls the number of file handles avaliable. File handles + # correlate to sockets for the process, and our access to logs and +-# databases. ++# databases. Note, the configuration setting in Directory Server, ++# "nsslapd-maxdescriptors", can override this limit. + LimitNOFILE=16384 + + # You can limit the memory in the cgroup with these, and ns-slapd +-- +2.17.2 + diff --git a/SOURCES/0018-Ticket-50020-during-MODRDN-referential-integrity-can.patch b/SOURCES/0018-Ticket-50020-during-MODRDN-referential-integrity-can.patch deleted file mode 100644 index bd10630..0000000 --- a/SOURCES/0018-Ticket-50020-during-MODRDN-referential-integrity-can.patch +++ /dev/null @@ -1,209 +0,0 @@ -From 70fd6e1fa6667734f39146cef53de6e3ff22d765 Mon Sep 17 00:00:00 2001 -From: Thierry Bordaz -Date: Fri, 9 Nov 2018 17:07:11 +0100 -Subject: [PATCH] Ticket 50020 - during MODRDN referential integrity can fail - erronously while updating large groups - -Bug Description: - During a MODRDN of a group member, referential integrity will update the groups containing this member. - Under specific conditions, the MODRDN can fail (err=1). - - on MODRDN Referential integrity checks if the original DN of the target MODRDN entry is - member of a given group. If it is then it updates the group. - The returned code of the group update is using the variable 'rc'. - It does a normalized DN comparison to compare original DN with members DN, to determine if - a group needs to be updated. - If the group does not need to be updated, 'rc' is not set. - The bug is that it uses 'rc' to normalize the DN and if the group is not updated - the returned code reflects the normalization returned code rather that the group update. - - The bug is hit in specific conditions - - One of the evaluated group contains more than 128 members - the last member (last value) of the group is not the moved entry - the last member (last value) of the group is a DN value that contains escaped chars - -Fix Description: - Use a local variable to check the result of the DN normalization - -https://pagure.io/389-ds-base/issue/50020 - -Reviewed by: Simon Pichugin, Mark Reynolds (thanks) - -Platforms tested: F27 - -Flag Day: no ---- - .../tests/suites/plugins/referint_test.py | 103 ++++++++++++++++++ - ldap/servers/plugins/referint/referint.c | 18 +-- - 2 files changed, 113 insertions(+), 8 deletions(-) - create mode 100644 dirsrvtests/tests/suites/plugins/referint_test.py - -diff --git a/dirsrvtests/tests/suites/plugins/referint_test.py b/dirsrvtests/tests/suites/plugins/referint_test.py -new file mode 100644 -index 000000000..67a11de9e ---- /dev/null -+++ b/dirsrvtests/tests/suites/plugins/referint_test.py -@@ -0,0 +1,103 @@ -+# --- BEGIN COPYRIGHT BLOCK --- -+# Copyright (C) 2016 Red Hat, Inc. -+# All rights reserved. -+# -+# License: GPL (version 3 or any later version). -+# See LICENSE for details. -+# --- END COPYRIGHT BLOCK --- -+# -+''' -+Created on Dec 12, 2019 -+ -+@author: tbordaz -+''' -+import logging -+import subprocess -+import pytest -+from lib389 import Entry -+from lib389.utils import * -+from lib389.plugins import * -+from lib389._constants import * -+from lib389.idm.user import UserAccounts, UserAccount -+from lib389.idm.group import Groups -+from lib389.topologies import topology_st as topo -+ -+log = logging.getLogger(__name__) -+ -+ESCAPED_RDN_BASE = "foo\,oo" -+def _user_get_dn(no): -+ uid = '%s%d' % (ESCAPED_RDN_BASE, no) -+ dn = 'uid=%s,%s' % (uid, SUFFIX) -+ return (uid, dn) -+ -+def add_escaped_user(server, no): -+ (uid, dn) = _user_get_dn(no) -+ log.fatal('Adding user (%s): ' % dn) -+ server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson'], -+ 'uid': [uid], -+ 'sn' : [uid], -+ 'cn' : [uid]}))) -+ return dn -+ -+@pytest.mark.ds50020 -+def test_referential_false_failure(topo): -+ """On MODRDN referential integrity can erronously fail -+ -+ :id: f77aeb80-c4c4-471b-8c1b-4733b714778b -+ :setup: Standalone Instance -+ :steps: -+ 1. Configure the plugin -+ 2. Create a group -+ - 1rst member the one that will be move -+ - more than 128 members -+ - last member is a DN containing escaped char -+ 3. Rename the 1rst member -+ :expectedresults: -+ 1. should succeed -+ 2. should succeed -+ 3. should succeed -+ """ -+ -+ inst = topo[0] -+ -+ # stop the plugin, and start it -+ plugin = ReferentialIntegrityPlugin(inst) -+ plugin.disable() -+ plugin.enable() -+ -+ ############################################################################ -+ # Configure plugin -+ ############################################################################ -+ GROUP_CONTAINER = "ou=groups,%s" % DEFAULT_SUFFIX -+ plugin.replace('referint-membership-attr', 'member') -+ plugin.replace('nsslapd-plugincontainerscope', GROUP_CONTAINER) -+ -+ ############################################################################ -+ # Creates a group with members having escaped DN -+ ############################################################################ -+ # Add some users and a group -+ users = UserAccounts(inst, DEFAULT_SUFFIX, None) -+ user1 = users.create_test_user(uid=1001) -+ user2 = users.create_test_user(uid=1002) -+ -+ groups = Groups(inst, GROUP_CONTAINER, None) -+ group = groups.create(properties={'cn': 'group'}) -+ group.add('member', user2.dn) -+ group.add('member', user1.dn) -+ -+ # Add more than 128 members so that referint follows the buggy path -+ for i in range(130): -+ escaped_user = add_escaped_user(inst, i) -+ group.add('member', escaped_user) -+ -+ ############################################################################ -+ # Check that the MODRDN succeeds -+ ########################################################################### -+ # Here we need to restart so that member values are taken in the right order -+ # the last value is the escaped one -+ inst.restart() -+ -+ # Here if the bug is fixed, referential is able to update the member value -+ inst.rename_s(user1.dn, 'uid=new_test_user_1001', newsuperior=SUFFIX, delold=0) -+ -+ -diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c -index f6d1c27a2..9e4e680d3 100644 ---- a/ldap/servers/plugins/referint/referint.c -+++ b/ldap/servers/plugins/referint/referint.c -@@ -824,20 +824,21 @@ _update_one_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */ - */ - for (nval = slapi_attr_first_value(attr, &v); nval != -1; - nval = slapi_attr_next_value(attr, nval, &v)) { -+ int normalize_rc; - p = NULL; - dnlen = 0; - - /* DN syntax, which should be a string */ - sval = slapi_ch_strdup(slapi_value_get_string(v)); -- rc = slapi_dn_normalize_case_ext(sval, 0, &p, &dnlen); -- if (rc == 0) { /* sval is passed in; not terminated */ -+ normalize_rc = slapi_dn_normalize_case_ext(sval, 0, &p, &dnlen); -+ if (normalize_rc == 0) { /* sval is passed in; not terminated */ - *(p + dnlen) = '\0'; - sval = p; -- } else if (rc > 0) { -+ } else if (normalize_rc > 0) { - slapi_ch_free_string(&sval); - sval = p; - } -- /* else: (rc < 0) Ignore the DN normalization error for now. */ -+ /* else: (normalize_rc < 0) Ignore the DN normalization error for now. */ - - p = PL_strstr(sval, slapi_sdn_get_ndn(origDN)); - if (p == sval) { -@@ -1013,20 +1014,21 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */ - for (nval = slapi_attr_first_value(attr, &v); - nval != -1; - nval = slapi_attr_next_value(attr, nval, &v)) { -+ int normalize_rc; - p = NULL; - dnlen = 0; - - /* DN syntax, which should be a string */ - sval = slapi_ch_strdup(slapi_value_get_string(v)); -- rc = slapi_dn_normalize_case_ext(sval, 0, &p, &dnlen); -- if (rc == 0) { /* sval is passed in; not terminated */ -+ normalize_rc = slapi_dn_normalize_case_ext(sval, 0, &p, &dnlen); -+ if (normalize_rc == 0) { /* sval is passed in; not terminated */ - *(p + dnlen) = '\0'; - sval = p; -- } else if (rc > 0) { -+ } else if (normalize_rc > 0) { - slapi_ch_free_string(&sval); - sval = p; - } -- /* else: (rc < 0) Ignore the DN normalization error for now. */ -+ /* else: normalize_rc < 0) Ignore the DN normalization error for now. */ - - p = PL_strstr(sval, slapi_sdn_get_ndn(origDN)); - if (p == sval) { --- -2.17.2 - diff --git a/SOURCES/0019-Ticket-49543-fix-certmap-dn-comparison.patch b/SOURCES/0019-Ticket-49543-fix-certmap-dn-comparison.patch deleted file mode 100644 index 3ee748e..0000000 --- a/SOURCES/0019-Ticket-49543-fix-certmap-dn-comparison.patch +++ /dev/null @@ -1,435 +0,0 @@ -From 66c96b915dd4a82ebd4228cba61d7c4bae96cbca Mon Sep 17 00:00:00 2001 -From: Fraser Tweedale -Date: Fri, 16 Mar 2018 15:16:56 +1000 -Subject: [PATCH] Ticket 49543 - fix certmap dn comparison - -Bug Description: Differences in DN string representations between -the value included in certmap.conf, and the stringified value of the -Issuer DN produced by NSS, as well as buggy DN normalisation code in -389 itself, cause 389 to wrongly reject the correct certmap -configuration to use. Authentication fails. This behaviour was -observed when there is an escaped comma in an attribute value. - -Fix Description: Instead of comparing stringified DNs, parse the DN -represented in certmap.conf into an NSS CertNAME. Use the NSS DN -comparison routine when comparing certificate Issuer DNs against the -certmap configurations. Remove the buggy DN normalisation routine. - -https://pagure.io/389-ds-base/issue/49543 - -Author: Fraser Tweedale - -Review by: ??? ---- - include/ldaputil/certmap.h | 20 +++-- - include/ldaputil/ldaputil.h | 2 +- - lib/ldaputil/cert.c | 27 ++++-- - lib/ldaputil/certmap.c | 162 ++++++----------------------------- - lib/ldaputil/examples/init.c | 3 +- - 5 files changed, 62 insertions(+), 152 deletions(-) - -diff --git a/include/ldaputil/certmap.h b/include/ldaputil/certmap.h -index fec2dd931..50fd4d158 100644 ---- a/include/ldaputil/certmap.h -+++ b/include/ldaputil/certmap.h -@@ -16,6 +16,7 @@ - /* What was extcmap.h begins ... */ - - #include -+#include - - #ifndef NSAPI_PUBLIC - #define NSAPI_PUBLIC -@@ -156,7 +157,7 @@ typedef int (*CertVerifyFn_t)(void *cert, LDAP *ld, void *certmap_info, LDAPMess - * otherwise return LDAPU_CERT_MAP_INITFN_FAILED. The server startup will be - * aborted if the return value is not LDAPU_SUCCESS. - */ --typedef int (*CertMapInitFn_t)(void *certmap_info, const char *issuerName, const char *issuerDN, const char *libname); -+typedef int (*CertMapInitFn_t)(void *certmap_info, const char *issuerName, const CERTName *issuerDN, const char *libname); - - /* - * Refer to the description of the function ldapu_get_cert_ava_val -@@ -209,27 +210,30 @@ extern "C" { - - NSAPI_PUBLIC int ldapu_cert_to_ldap_entry(void *cert, LDAP *ld, const char *basedn, LDAPMessage **res); - --NSAPI_PUBLIC int ldapu_set_cert_mapfn(const char *issuerDN, -+NSAPI_PUBLIC int ldapu_set_cert_mapfn(const CERTName *issuerDN, - CertMapFn_t mapfn); - - --NSAPI_PUBLIC CertMapFn_t ldapu_get_cert_mapfn(const char *issuerDN); -+NSAPI_PUBLIC CertMapFn_t ldapu_get_cert_mapfn(const CERTName *issuerDN); - --NSAPI_PUBLIC int ldapu_set_cert_searchfn(const char *issuerDN, -+NSAPI_PUBLIC int ldapu_set_cert_searchfn(const CERTName *issuerDN, - CertSearchFn_t searchfn); - - --NSAPI_PUBLIC CertSearchFn_t ldapu_get_cert_searchfn(const char *issuerDN); -+NSAPI_PUBLIC CertSearchFn_t ldapu_get_cert_searchfn(const CERTName *issuerDN); - --NSAPI_PUBLIC int ldapu_set_cert_verifyfn(const char *issuerDN, -+NSAPI_PUBLIC int ldapu_set_cert_verifyfn(const CERTName *issuerDN, - CertVerifyFn_t verifyFn); - --NSAPI_PUBLIC CertVerifyFn_t ldapu_get_cert_verifyfn(const char *issuerDN); -+NSAPI_PUBLIC CertVerifyFn_t ldapu_get_cert_verifyfn(const CERTName *issuerDN); - - - NSAPI_PUBLIC int ldapu_get_cert_subject_dn(void *cert, char **subjectDN); - - -+NSAPI_PUBLIC CERTName *ldapu_get_cert_issuer_dn_as_CERTName(CERTCertificate *cert); -+ -+ - NSAPI_PUBLIC int ldapu_get_cert_issuer_dn(void *cert, char **issuerDN); - - -@@ -242,7 +246,7 @@ NSAPI_PUBLIC int ldapu_free_cert_ava_val(char **val); - NSAPI_PUBLIC int ldapu_get_cert_der(void *cert, unsigned char **derCert, unsigned int *len); - - --NSAPI_PUBLIC int ldapu_issuer_certinfo(const char *issuerDN, -+NSAPI_PUBLIC int ldapu_issuer_certinfo(const CERTName *issuerDN, - void **certmap_info); - - -diff --git a/include/ldaputil/ldaputil.h b/include/ldaputil/ldaputil.h -index e0e028c5c..b172819b0 100644 ---- a/include/ldaputil/ldaputil.h -+++ b/include/ldaputil/ldaputil.h -@@ -48,7 +48,7 @@ enum - typedef struct - { - char *issuerName; /* issuer (symbolic/short) name */ -- char *issuerDN; /* cert issuer's DN */ -+ CERTName *issuerDN; /* cert issuer's DN */ - LDAPUPropValList_t *propval; /* pointer to the prop-val pairs list */ - CertMapFn_t mapfn; /* cert to ldapdn & filter mapping func */ - CertVerifyFn_t verifyfn; /* verify cert function */ -diff --git a/lib/ldaputil/cert.c b/lib/ldaputil/cert.c -index 65a481541..73abba12a 100644 ---- a/lib/ldaputil/cert.c -+++ b/lib/ldaputil/cert.c -@@ -54,15 +54,30 @@ ldapu_get_cert_subject_dn(void *cert_in, char **subjectDN) - return *subjectDN ? LDAPU_SUCCESS : LDAPU_ERR_EXTRACT_SUBJECTDN_FAILED; - } - -+/* -+ * Return the Issuer DN as a CERTName. -+ * The CERTName is owned by the CERTCertificate. -+ */ -+NSAPI_PUBLIC CERTName * -+ldapu_get_cert_issuer_dn_as_CERTName(CERTCertificate *cert_in) -+{ -+ return &cert_in->issuer; -+} -+ -+/* -+ * Return the Issuer DN as a string. -+ * The string should be freed by the caller. -+ */ - NSAPI_PUBLIC int - ldapu_get_cert_issuer_dn(void *cert_in, char **issuerDN) - { -- CERTCertificate *cert = (CERTCertificate *)cert_in; -- char *cert_issuer = CERT_NameToAscii(&cert->issuer); -- -- *issuerDN = strdup(cert_issuer); -- PR_Free(cert_issuer); -- -+ *issuerDN = NULL; -+ CERTName *dn = ldapu_get_cert_issuer_dn_as_CERTName((CERTCertificate *)cert_in); -+ if (dn != NULL) { -+ char *cert_issuer = CERT_NameToAscii(dn); -+ *issuerDN = strdup(cert_issuer); -+ PR_Free(cert_issuer); -+ } - return *issuerDN ? LDAPU_SUCCESS : LDAPU_ERR_EXTRACT_ISSUERDN_FAILED; - } - -diff --git a/lib/ldaputil/certmap.c b/lib/ldaputil/certmap.c -index 78bb3635b..0db2de12b 100644 ---- a/lib/ldaputil/certmap.c -+++ b/lib/ldaputil/certmap.c -@@ -52,7 +52,6 @@ static char this_dllname[256]; - static const char *LIB_DIRECTIVE = "certmap"; - static const int LIB_DIRECTIVE_LEN = 7; /* strlen("LIB_DIRECTIVE") */ - --static char *ldapu_dn_normalize(char *dn); - static void *ldapu_propval_free(void *propval_in, void *arg); - - typedef struct -@@ -337,8 +336,13 @@ dbinfo_to_certinfo(DBConfDBInfo_t *db_info, - certinfo->issuerName = db_info->dbname; - db_info->dbname = 0; - -- certinfo->issuerDN = ldapu_dn_normalize(db_info->url); -- db_info->url = 0; -+ /* Parse the Issuer DN. */ -+ certinfo->issuerDN = CERT_AsciiToName(db_info->url); -+ if (NULL == certinfo->issuerDN /* invalid DN */ -+ && ldapu_strcasecmp(db_info->url, "default") != 0 /* not "default" */) { -+ rv = LDAPU_ERR_MALFORMED_SUBJECT_DN; -+ goto error; -+ } - - /* hijack actual prop-vals from dbinfo -- to avoid strdup calls */ - if (db_info->firstprop) { -@@ -890,24 +894,26 @@ ldapu_cert_searchfn_default(void *cert, LDAP *ld, void *certmap_info_in, const c - } - - NSAPI_PUBLIC int --ldapu_issuer_certinfo(const char *issuerDN, void **certmap_info) -+ldapu_issuer_certinfo(const CERTName *issuerDN, void **certmap_info) - { - *certmap_info = 0; - -- if (!issuerDN || !*issuerDN || !ldapu_strcasecmp(issuerDN, "default")) { -- *certmap_info = default_certmap_info; -- } else if (certmap_listinfo) { -- char *n_issuerDN = ldapu_dn_normalize(ldapu_strdup(issuerDN)); -+ if (certmap_listinfo) { - LDAPUListNode_t *cur = certmap_listinfo->head; - while (cur) { -- if (!ldapu_strcasecmp(n_issuerDN, ((LDAPUCertMapInfo_t *)cur->info)->issuerDN)) { -+ LDAPUCertMapInfo_t *info = (LDAPUCertMapInfo_t *)cur->info; -+ -+ if (NULL == info->issuerDN) { -+ /* no DN to compare to (probably the default certmap info) */ -+ continue; -+ } -+ -+ if (CERT_CompareName(issuerDN, info->issuerDN) == SECEqual) { - *certmap_info = cur->info; - break; - } - cur = cur->next; - } -- if (n_issuerDN) -- ldapu_free(n_issuerDN); - } - return *certmap_info ? LDAPU_SUCCESS : LDAPU_FAILED; - } -@@ -1128,7 +1134,7 @@ ldapu_cert_mapfn_default(void *cert_in, LDAP *ld __attribute__((unused)), void * - } - - NSAPI_PUBLIC int --ldapu_set_cert_mapfn(const char *issuerDN, -+ldapu_set_cert_mapfn(const CERTName *issuerDN, - CertMapFn_t mapfn) - { - LDAPUCertMapInfo_t *certmap_info; -@@ -1161,7 +1167,7 @@ ldapu_get_cert_mapfn_sub(LDAPUCertMapInfo_t *certmap_info) - } - - NSAPI_PUBLIC CertMapFn_t --ldapu_get_cert_mapfn(const char *issuerDN) -+ldapu_get_cert_mapfn(const CERTName *issuerDN) - { - LDAPUCertMapInfo_t *certmap_info = 0; - -@@ -1173,7 +1179,7 @@ ldapu_get_cert_mapfn(const char *issuerDN) - } - - NSAPI_PUBLIC int --ldapu_set_cert_searchfn(const char *issuerDN, -+ldapu_set_cert_searchfn(const CERTName *issuerDN, - CertSearchFn_t searchfn) - { - LDAPUCertMapInfo_t *certmap_info; -@@ -1206,7 +1212,7 @@ ldapu_get_cert_searchfn_sub(LDAPUCertMapInfo_t *certmap_info) - } - - NSAPI_PUBLIC CertSearchFn_t --ldapu_get_cert_searchfn(const char *issuerDN) -+ldapu_get_cert_searchfn(const CERTName *issuerDN) - { - LDAPUCertMapInfo_t *certmap_info = 0; - -@@ -1218,7 +1224,7 @@ ldapu_get_cert_searchfn(const char *issuerDN) - } - - NSAPI_PUBLIC int --ldapu_set_cert_verifyfn(const char *issuerDN, -+ldapu_set_cert_verifyfn(const CERTName *issuerDN, - CertVerifyFn_t verifyfn) - { - LDAPUCertMapInfo_t *certmap_info; -@@ -1251,7 +1257,7 @@ ldapu_get_cert_verifyfn_sub(LDAPUCertMapInfo_t *certmap_info) - } - - NSAPI_PUBLIC CertVerifyFn_t --ldapu_get_cert_verifyfn(const char *issuerDN) -+ldapu_get_cert_verifyfn(const CERTName *issuerDN) - { - LDAPUCertMapInfo_t *certmap_info = 0; - -@@ -1288,7 +1294,6 @@ static int ldapu_certinfo_copy (const LDAPUCertMapInfo_t *from, - NSAPI_PUBLIC int - ldapu_cert_to_ldap_entry(void *cert, LDAP *ld, const char *basedn, LDAPMessage **res) - { -- char *issuerDN = 0; - char *ldapDN = 0; - char *filter = 0; - LDAPUCertMapInfo_t *certmap_info; -@@ -1308,14 +1313,14 @@ ldapu_cert_to_ldap_entry(void *cert, LDAP *ld, const char *basedn, LDAPMessage * - certmap_attrs[3] = 0; - } - -- rv = ldapu_get_cert_issuer_dn(cert, &issuerDN); -+ CERTName *issuerDN = ldapu_get_cert_issuer_dn_as_CERTName(cert); -+ /* ^ don't need to free this; it will be freed with ^ the cert */ - -- if (rv != LDAPU_SUCCESS) -+ if (NULL == issuerDN) - return LDAPU_ERR_NO_ISSUERDN_IN_CERT; - - /* don't free the certmap_info -- its a pointer to an internal structure */ - rv = ldapu_issuer_certinfo(issuerDN, (void **)&certmap_info); -- free(issuerDN); - - if (!certmap_info) - certmap_info = default_certmap_info; -@@ -1604,118 +1609,3 @@ ldapu_realloc(void *ptr, int size) - { - return realloc(ptr, size); - } -- --#define DNSEPARATOR(c) (c == ',' || c == ';') --#define SEPARATOR(c) (c == ',' || c == ';' || c == '+') --#define SPACE(c) (c == ' ' || c == '\n') --#define NEEDSESCAPE(c) (c == '\\' || c == '"') --#define B4TYPE 0 --#define INTYPE 1 --#define B4EQUAL 2 --#define B4VALUE 3 --#define INVALUE 4 --#define INQUOTEDVALUE 5 --#define B4SEPARATOR 6 -- --static char * --ldapu_dn_normalize(char *dn) --{ -- char *d, *s; -- int state, gotesc; -- -- gotesc = 0; -- state = B4TYPE; -- for (d = s = dn; *s; s++) { -- switch (state) { -- case B4TYPE: -- if (!SPACE(*s)) { -- state = INTYPE; -- *d++ = *s; -- } -- break; -- case INTYPE: -- if (*s == '=') { -- state = B4VALUE; -- *d++ = *s; -- } else if (SPACE(*s)) { -- state = B4EQUAL; -- } else { -- *d++ = *s; -- } -- break; -- case B4EQUAL: -- if (*s == '=') { -- state = B4VALUE; -- *d++ = *s; -- } else if (!SPACE(*s)) { -- /* not a valid dn - but what can we do here? */ -- *d++ = *s; -- } -- break; -- case B4VALUE: -- if (*s == '"') { -- state = INQUOTEDVALUE; -- *d++ = *s; -- } else if (!SPACE(*s)) { -- state = INVALUE; -- *d++ = *s; -- } -- break; -- case INVALUE: -- if (!gotesc && SEPARATOR(*s)) { -- while (SPACE(*(d - 1))) -- d--; -- state = B4TYPE; -- if (*s == '+') { -- *d++ = *s; -- } else { -- *d++ = ','; -- } -- } else if (gotesc && !NEEDSESCAPE(*s) && -- !SEPARATOR(*s)) { -- *--d = *s; -- d++; -- } else { -- *d++ = *s; -- } -- break; -- case INQUOTEDVALUE: -- if (!gotesc && *s == '"') { -- state = B4SEPARATOR; -- *d++ = *s; -- } else if (gotesc && !NEEDSESCAPE(*s)) { -- *--d = *s; -- d++; -- } else { -- *d++ = *s; -- } -- break; -- case B4SEPARATOR: -- if (SEPARATOR(*s)) { -- state = B4TYPE; -- if (*s == '+') { -- *d++ = *s; -- } else { -- *d++ = ','; -- } -- } -- break; -- default: -- break; -- } -- if (*s == '\\') { -- gotesc = 1; -- } else { -- gotesc = 0; -- } -- } -- *d = '\0'; -- -- /* Trim trailing spaces */ -- d--; -- while (d >= dn && *d == ' ') { -- *d-- = '\0'; -- } -- -- return (dn); --} -diff --git a/lib/ldaputil/examples/init.c b/lib/ldaputil/examples/init.c -index 74db9775c..fd1edc97e 100644 ---- a/lib/ldaputil/examples/init.c -+++ b/lib/ldaputil/examples/init.c -@@ -15,12 +15,13 @@ - #include - #include - #include -+#include - #include "certmap.h" /* Public Certmap API */ - #include "plugin.h" /* must define extern "C" functions */ - - - NSAPI_PUBLIC int --plugin_init_fn(void *certmap_info, const char *issuerName, const char *issuerDN, const char *libname) -+plugin_init_fn(void *certmap_info, const char *issuerName, const CERTName *issuerDN, const char *libname) - { - static int initialized = 0; - int rv; --- -2.17.2 - diff --git a/SOURCES/0019-Ticket-50053-Subtree-password-policy-overrides-a-use.patch b/SOURCES/0019-Ticket-50053-Subtree-password-policy-overrides-a-use.patch new file mode 100644 index 0000000..af6af73 --- /dev/null +++ b/SOURCES/0019-Ticket-50053-Subtree-password-policy-overrides-a-use.patch @@ -0,0 +1,44 @@ +From f6e1acdefc97aa0f808b622c6f6a3dc0a683d457 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 15 Apr 2019 11:23:46 -0400 +Subject: [PATCH] Ticket 50053 - Subtree password policy overrides a + user-defined password policy + +Bug Description: + +When an entry contains an attribute that is also defined by a cos definition +a specifier defines which values win: the real values that are in the entry or the +virtual values that are cos defined. +The specifier 'default' means that the real values are the winners (returned). +'operational-default' has the same behavior but just specify that the attribute +is operational. +The bug is that when real values exists, the 'operational-default' specifier +drops the real values in favor of the virtual ones. + +Fix Description: + +Change the test, so that real values are not kept for 'operation-default' +Note: the full routine cos_cache_query_attr looks quite messy and error prone +It would be nice to rewrite it when we have time + +https://pagure.io/389-ds-base/issue/50053 +--- + ldap/servers/plugins/cos/cos_cache.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c +index 5e0cf1725..64c0441c4 100644 +--- a/ldap/servers/plugins/cos/cos_cache.c ++++ b/ldap/servers/plugins/cos/cos_cache.c +@@ -2279,7 +2279,7 @@ cos_cache_query_attr(cos_cache *ptheCache, vattr_context *context, Slapi_Entry * + /* now for the tests */ + + /* would we be allowed to supply this attribute if we had one? */ +- if (entry_has_value && !pAttr->attr_override && !pAttr->attr_operational && !pAttr->attr_operational_default) { ++ if (entry_has_value && !pAttr->attr_override && !pAttr->attr_operational) { + /* answer: no, move on to the next attribute */ + attr_index++; + continue; +-- +2.17.2 + diff --git a/SOURCES/0020-Ticket-49866-Add-passwordSendExpiringTime-to-objectc.patch b/SOURCES/0020-Ticket-49866-Add-passwordSendExpiringTime-to-objectc.patch new file mode 100644 index 0000000..fca53c5 --- /dev/null +++ b/SOURCES/0020-Ticket-49866-Add-passwordSendExpiringTime-to-objectc.patch @@ -0,0 +1,30 @@ +From 97eff45e36a6fe0cc11172b435d8c90908da81e9 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Fri, 10 May 2019 10:55:28 -0400 +Subject: [PATCH] Ticket 49866 - Add passwordSendExpiringTime to objectclass + +Description: add the expringtime attribute to the passwordPolicy objectclass + +https://pagure.io/389-ds-base/issue/49866 + +Reviewed by: mreynolds(one line commit rule) +--- + ldap/schema/02common.ldif | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif +index 70d64c0d2..b44e860b3 100644 +--- a/ldap/schema/02common.ldif ++++ b/ldap/schema/02common.ldif +@@ -142,7 +142,7 @@ objectClasses: ( 2.16.840.1.113730.3.2.7 NAME 'nsLicenseUser' DESC 'Netscape def + objectClasses: ( 2.16.840.1.113730.3.2.1 NAME 'changeLogEntry' DESC 'LDAP changelog objectclass' SUP top MUST ( targetdn $ changeTime $ changenumber $ changeType ) MAY ( changes $ newrdn $ deleteoldrdn $ newsuperior ) X-ORIGIN 'Changelog Internet Draft' ) + objectClasses: ( 2.16.840.1.113730.3.2.6 NAME 'referral' DESC 'LDAP referrals objectclass' SUP top MAY ( ref ) X-ORIGIN 'LDAPv3 referrals Internet Draft' ) + objectClasses: ( 2.16.840.1.113730.3.2.12 NAME 'passwordObject' DESC 'Netscape defined password policy objectclass' SUP top MAY ( pwdpolicysubentry $ passwordExpirationTime $ passwordExpWarned $ passwordRetryCount $ retryCountResetTime $ accountUnlockTime $ passwordHistory $ passwordAllowChangeTime $ passwordGraceUserTime ) X-ORIGIN 'Netscape Directory Server' ) +-objectClasses: ( 2.16.840.1.113730.3.2.13 NAME 'passwordPolicy' DESC 'Netscape defined password policy objectclass' SUP top MAY ( passwordMaxAge $ passwordExp $ passwordMinLength $ passwordKeepHistory $ passwordInHistory $ passwordChange $ passwordWarning $ passwordLockout $ passwordMaxFailure $ passwordResetDuration $ passwordUnlock $ passwordLockoutDuration $ passwordCheckSyntax $ passwordMustChange $ passwordStorageScheme $ passwordMinAge $ passwordResetFailureCount $ passwordGraceLimit $ passwordMinDigits $ passwordMinAlphas $ passwordMinUppers $ passwordMinLowers $ passwordMinSpecials $ passwordMin8bit $ passwordMaxRepeats $ passwordMinCategories $ passwordMinTokenLength $ passwordTrackUpdateTime $ passwordAdminDN ) X-ORIGIN 'Netscape Directory Server' ) ++objectClasses: ( 2.16.840.1.113730.3.2.13 NAME 'passwordPolicy' DESC 'Netscape defined password policy objectclass' SUP top MAY ( passwordMaxAge $ passwordExp $ passwordMinLength $ passwordKeepHistory $ passwordInHistory $ passwordChange $ passwordWarning $ passwordLockout $ passwordMaxFailure $ passwordResetDuration $ passwordUnlock $ passwordLockoutDuration $ passwordCheckSyntax $ passwordMustChange $ passwordStorageScheme $ passwordMinAge $ passwordResetFailureCount $ passwordGraceLimit $ passwordMinDigits $ passwordMinAlphas $ passwordMinUppers $ passwordMinLowers $ passwordMinSpecials $ passwordMin8bit $ passwordMaxRepeats $ passwordMinCategories $ passwordMinTokenLength $ passwordTrackUpdateTime $ passwordAdminDN $ passwordSendExpiringTime ) X-ORIGIN 'Netscape Directory Server' ) + objectClasses: ( 2.16.840.1.113730.3.2.30 NAME 'glue' DESC 'Netscape defined objectclass' SUP top X-ORIGIN 'Netscape Directory Server' ) + objectClasses: ( 2.16.840.1.113730.3.2.32 NAME 'netscapeMachineData' DESC 'Netscape defined objectclass' SUP top X-ORIGIN 'Netscape Directory Server' ) + objectClasses: ( 2.16.840.1.113730.3.2.38 NAME 'vlvSearch' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ vlvBase $ vlvScope $ vlvFilter ) MAY ( multiLineDescription ) X-ORIGIN 'Netscape Directory Server' ) +-- +2.17.2 + diff --git a/SOURCES/0020-Ticket-50117-after-certain-failed-import-operation-i.patch b/SOURCES/0020-Ticket-50117-after-certain-failed-import-operation-i.patch deleted file mode 100644 index a178a31..0000000 --- a/SOURCES/0020-Ticket-50117-after-certain-failed-import-operation-i.patch +++ /dev/null @@ -1,63 +0,0 @@ -From 90ba52ac8f655cb5d6cfd3f201c15f8004eb8414 Mon Sep 17 00:00:00 2001 -From: Thierry Bordaz -Date: Fri, 4 Jan 2019 12:24:56 +0100 -Subject: [PATCH] Ticket 50117 - after certain failed import operation, - impossible to replay an import operation - -Bug Description: - At the beginning of an import, a flag is set to mark the target backend is busy. - Then import tests if there are pending operations. If such operations exist the import can not proceed and fails. - The problem is that in such case of pending operations, the import fails without resetting the busy flag. - It let the backend busy (until next reboot) and prevent new import. - -Fix Description: - It needs to reset the busy flag if there are pending operations - -https://pagure.io/389-ds-base/issue/50117 - -Reviewed by: Mark Reynolds, William Brown - -Platforms tested: F27 - -Flag Day: no - -Doc impact: no - -(cherry picked from commit ff00b07402747aac403478a157adab75e306d7d1) -(cherry picked from commit 630940ec119a90c3bbfc7cd3464eb02ab779b474) ---- - ldap/servers/slapd/back-ldbm/ldif2ldbm.c | 14 ++++++++++++-- - 1 file changed, 12 insertions(+), 2 deletions(-) - -diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c -index 16b87ee6b..69a2af9cf 100644 ---- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c -+++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c -@@ -704,12 +704,22 @@ ldbm_back_ldif2ldbm(Slapi_PBlock *pb) - } - - /* check if an import/restore is already ongoing... */ -- if ((instance_set_busy(inst) != 0) || -- (slapi_counter_get_value(inst->inst_ref_count) > 0)) { -+ if ((instance_set_busy(inst) != 0)) { - slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_ldif2ldbm", "ldbm: '%s' is already in the middle of " - "another task and cannot be disturbed.\n", - inst->inst_name); - return -1; -+ } else { -+ uint64_t refcnt; -+ refcnt = slapi_counter_get_value(inst->inst_ref_count); -+ if (refcnt > 0) { -+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_ldif2ldbm", "ldbm: '%s' there are %d pending operation(s)." -+ " Import can not proceed until they are completed.\n", -+ inst->inst_name, -+ refcnt); -+ instance_set_not_busy(inst); -+ return -1; -+ } - } - - if ((task_flags & SLAPI_TASK_RUNNING_FROM_COMMANDLINE)) { --- -2.17.2 - diff --git a/SOURCES/0021-Ticket-49540-Fix-compiler-warning-in-ldif2ldbm.patch b/SOURCES/0021-Ticket-49540-Fix-compiler-warning-in-ldif2ldbm.patch deleted file mode 100644 index d212998..0000000 --- a/SOURCES/0021-Ticket-49540-Fix-compiler-warning-in-ldif2ldbm.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 9ab11ef54c378772982ef65cba3ea6718942899c Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Tue, 15 Jan 2019 13:55:18 -0500 -Subject: [PATCH] Ticket 49540 - FIx compiler warning in ldif2ldbm - -https://pagure.io/389-ds-base/issue/49540 - -Reviewed by: mreynolds(one line commit rule) - -(cherry picked from commit 58be90b8bf96a7a0e10740b122035ea03fa13e0f) -(cherry picked from commit c9580477ffe22a08c0094378e81a6927d0dc4ffc) ---- - ldap/servers/slapd/back-ldbm/ldif2ldbm.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c -index 69a2af9cf..11c020af0 100644 ---- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c -+++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c -@@ -713,7 +713,7 @@ ldbm_back_ldif2ldbm(Slapi_PBlock *pb) - uint64_t refcnt; - refcnt = slapi_counter_get_value(inst->inst_ref_count); - if (refcnt > 0) { -- slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_ldif2ldbm", "ldbm: '%s' there are %d pending operation(s)." -+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_ldif2ldbm", "ldbm: '%s' there are %" PRIu64 " pending operation(s)." - " Import can not proceed until they are completed.\n", - inst->inst_name, - refcnt); --- -2.17.2 - diff --git a/SOURCES/0021-Ticket-50013-Log-warn-instead-of-ERR-when-aci-target.patch b/SOURCES/0021-Ticket-50013-Log-warn-instead-of-ERR-when-aci-target.patch new file mode 100644 index 0000000..377268a --- /dev/null +++ b/SOURCES/0021-Ticket-50013-Log-warn-instead-of-ERR-when-aci-target.patch @@ -0,0 +1,42 @@ +From 11ec6a5bb6978f379206de5f2df9fc7c79a9a1c8 Mon Sep 17 00:00:00 2001 +From: German Parente +Date: Tue, 6 Nov 2018 18:46:42 +0100 +Subject: [PATCH] Ticket 50013 - Log warn instead of ERR when aci target does + not exist. + +Bug Description: + +This is something we have very often in IPA context and customers are very often asking why there are errors in the logs: + +[31/Oct/2018:05:52:23.436616394 -0400] - ERR - NSACLPlugin - acl_parse - The ACL target cn=groups,cn=compat,dc=cgparente,dc=local does not exist +[31/Oct/2018:05:52:23.438951763 -0400] - ERR - NSACLPlugin - acl_parse - The ACL target cn=computers,cn=compat,dc=cgparente,dc=local does not exist + +Fix Description: + +just log WARN instead of ERR + +https://pagure.io/389-ds-base/issue/50013 + +Author: German Parente + +Review by: ??? +--- + ldap/servers/plugins/acl/aclparse.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/servers/plugins/acl/aclparse.c b/ldap/servers/plugins/acl/aclparse.c +index a425adbd8..906c1d099 100644 +--- a/ldap/servers/plugins/acl/aclparse.c ++++ b/ldap/servers/plugins/acl/aclparse.c +@@ -147,7 +147,7 @@ acl_parse(Slapi_PBlock *pb, char *str, aci_t *aci_item, char **errbuf) + slapi_search_internal_pb(temppb); + slapi_pblock_get(temppb, SLAPI_PLUGIN_INTOP_RESULT, &rc); + if (rc != LDAP_SUCCESS) { +- slapi_log_err(SLAPI_LOG_ERR, plugin_name, ++ slapi_log_err(SLAPI_LOG_WARNING, plugin_name, + "acl_parse - The ACL target %s does not exist\n", slapi_sdn_get_dn(&targdn)); + } + +-- +2.17.2 + diff --git a/SOURCES/0022-Ticket-49997-RFE-ds-replcheck-could-validate-suffix-.patch b/SOURCES/0022-Ticket-49997-RFE-ds-replcheck-could-validate-suffix-.patch new file mode 100644 index 0000000..aec4eb6 --- /dev/null +++ b/SOURCES/0022-Ticket-49997-RFE-ds-replcheck-could-validate-suffix-.patch @@ -0,0 +1,82 @@ +From d116dd899b4ad64b0ab14f3e153e76a95f54937e Mon Sep 17 00:00:00 2001 +From: German Parente +Date: Sun, 28 Oct 2018 16:29:09 +0100 +Subject: [PATCH] Ticket 49997 RFE: ds-replcheck could validate suffix exists + and it's replicated + +Bug Description: + +seen at customer site, as first request to ldap database is the RUV, if the suffix provided in the command line does not exist or it's not replicated, we have an error message that it's regarding the RUV: + +ds-replcheck -D "cn=directory manager" -w secret12 -b "o=ipaca" -r ldap://ipamaster.germanparente.local:389 -m ldap://ipareplica.germanparente.local +Performing online report... +Connecting to servers... +Gathering Master's RUV... +Error: Failed to get Master RUV entry: {'desc': 'No such object'} + +Fix Description: + +add function to validate suffix exists and it's replicated + +https://pagure.io/389-ds-base/issue/49997 + +Author: German Parente + +Review by: ??? +--- + ldap/admin/src/scripts/ds-replcheck | 32 +++++++++++++++++++++++++++++ + 1 file changed, 32 insertions(+) + +diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck +index e18465dc0..57748b09f 100755 +--- a/ldap/admin/src/scripts/ds-replcheck ++++ b/ldap/admin/src/scripts/ds-replcheck +@@ -816,6 +816,30 @@ def check_for_diffs(mentries, mglue, rentries, rglue, report, opts): + + return report + ++def validate_suffix(ldapnode, suffix, hostname): ++ # Validate suffix exists ++ try: ++ master_basesuffix = ldapnode.search_s(suffix, ldap.SCOPE_BASE ) ++ except ldap.NO_SUCH_OBJECT: ++ print("Error: Failed to validate suffix in {}. {} does not exist.".format(hostname, suffix)) ++ return False ++ except ldap.LDAPError as e: ++ print("Error: failed to validate suffix in {} ({}). ".format(hostname, str(e))) ++ return False ++ ++ # Check suffix is replicated ++ try: ++ replica_filter = "(&(objectclass=nsds5replica)(nsDS5ReplicaRoot=%s))" % suffix ++ master_replica = ldapnode.search_s("cn=config",ldap.SCOPE_SUBTREE,replica_filter) ++ if (len(master_replica) != 1): ++ print("Error: Failed to validate suffix in {}. {} is not replicated.".format(hostname, suffix)) ++ return False ++ except ldap.LDAPError as e: ++ print("Error: failed to validate suffix in {} ({}). ".format(hostname, str(e))) ++ return False ++ ++ return True ++ + + def connect_to_replicas(opts): + ''' Start the paged results searches +@@ -888,6 +912,14 @@ def connect_to_replicas(opts): + "Please check your credentials and LDAP urls are correct.".format(str(e))) + exit(1) + ++ # Validate suffix ++ print ("Validating suffix ...") ++ if not validate_suffix(master, opts['suffix'], opts['mhost']): ++ exit(1) ++ ++ if not validate_suffix(replica,opts['suffix'], opts['rhost']): ++ exit(1) ++ + # Get the RUVs + print ("Gathering Master's RUV...") + try: +-- +2.17.2 + diff --git a/SOURCES/0022-Ticket-50078-cannot-add-cenotaph-in-read-only-consum.patch b/SOURCES/0022-Ticket-50078-cannot-add-cenotaph-in-read-only-consum.patch deleted file mode 100644 index 6a80c19..0000000 --- a/SOURCES/0022-Ticket-50078-cannot-add-cenotaph-in-read-only-consum.patch +++ /dev/null @@ -1,108 +0,0 @@ -From 7b4cb7aebdf5264e12e4ffad96fd21b3d7d2a14f Mon Sep 17 00:00:00 2001 -From: Ludwig Krispenz -Date: Tue, 11 Dec 2018 11:06:44 +0100 -Subject: [PATCH] Ticket 50078 - cannot add cenotaph in read only consumer - -Bug: For modrdn operations a cenotaph entry is created to be used in later conflict - resolution procedures, this is done by an internal add operation and - fails on hubs and consumers - -Fix: Add the "bypass referral" flag to the internal add operation to allow it - -Reviewed by: Thierry, thanks ---- - dirsrvtests/tests/tickets/ticket50078_test.py | 68 +++++++++++++++++++ - ldap/servers/plugins/replication/urp.c | 2 +- - 2 files changed, 69 insertions(+), 1 deletion(-) - create mode 100644 dirsrvtests/tests/tickets/ticket50078_test.py - -diff --git a/dirsrvtests/tests/tickets/ticket50078_test.py b/dirsrvtests/tests/tickets/ticket50078_test.py -new file mode 100644 -index 000000000..3f6c5ec2d ---- /dev/null -+++ b/dirsrvtests/tests/tickets/ticket50078_test.py -@@ -0,0 +1,68 @@ -+import pytest -+from lib389.utils import * -+from lib389.topologies import topology_m1h1c1 -+from lib389.idm.user import UserAccounts -+ -+from lib389._constants import (DEFAULT_SUFFIX, REPLICA_RUV_FILTER, defaultProperties, -+ REPLICATION_BIND_DN, REPLICATION_BIND_PW, REPLICATION_BIND_METHOD, -+ REPLICATION_TRANSPORT, SUFFIX, RA_NAME, RA_BINDDN, RA_BINDPW, -+ RA_METHOD, RA_TRANSPORT_PROT, SUFFIX) -+ -+logging.getLogger(__name__).setLevel(logging.DEBUG) -+log = logging.getLogger(__name__) -+ -+TEST_USER = "test_user" -+ -+def test_ticket50078(topology_m1h1c1): -+ """ -+ Test that for a MODRDN operation the cenotaph entry is created on -+ a hub or consumer. -+ """ -+ -+ M1 = topology_m1h1c1.ms["master1"] -+ H1 = topology_m1h1c1.hs["hub1"] -+ C1 = topology_m1h1c1.cs["consumer1"] -+ # -+ # Test replication is working -+ # -+ if M1.testReplication(DEFAULT_SUFFIX, topology_m1h1c1.cs["consumer1"]): -+ log.info('Replication is working.') -+ else: -+ log.fatal('Replication is not working.') -+ assert False -+ -+ ua = UserAccounts(M1, DEFAULT_SUFFIX) -+ ua.create(properties={ -+ 'uid': "%s%d" % (TEST_USER, 1), -+ 'cn' : "%s%d" % (TEST_USER, 1), -+ 'sn' : 'user', -+ 'uidNumber' : '1000', -+ 'gidNumber' : '2000', -+ 'homeDirectory' : '/home/testuser' -+ }) -+ -+ user = ua.get('%s1' % TEST_USER) -+ log.info(" Rename the test entry %s..." % user) -+ user.rename('uid=test_user_new') -+ -+ # wait until replication is in sync -+ if M1.testReplication(DEFAULT_SUFFIX, topology_m1h1c1.cs["consumer1"]): -+ log.info('Replication is working.') -+ else: -+ log.fatal('Replication is not working.') -+ assert False -+ -+ # check if cenotaph was created on hub and consumer -+ ents = H1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filterstr="(&(objectclass=nstombstone)(cenotaphid=*))") -+ assert len(ents) == 1 -+ -+ ents = C1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filterstr="(&(objectclass=nstombstone)(cenotaphid=*))") -+ assert len(ents) == 1 -+ -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main("-s %s" % CURRENT_FILE) -diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c -index 11c5da7cf..37fe77379 100644 ---- a/ldap/servers/plugins/replication/urp.c -+++ b/ldap/servers/plugins/replication/urp.c -@@ -911,7 +911,7 @@ urp_fixup_add_cenotaph (Slapi_PBlock *pb, char *sessionid, CSN *opcsn) - cenotaph, - NULL, - repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), -- OP_FLAG_REPL_FIXUP|OP_FLAG_NOOP|OP_FLAG_CENOTAPH_ENTRY); -+ OP_FLAG_REPL_FIXUP|OP_FLAG_NOOP|OP_FLAG_CENOTAPH_ENTRY|SLAPI_OP_FLAG_BYPASS_REFERRALS); - slapi_add_internal_pb(add_pb); - slapi_pblock_get(add_pb, SLAPI_PLUGIN_INTOP_RESULT, &ret); - --- -2.17.2 - diff --git a/SOURCES/0023-Ticket-50177-import-task-should-not-be-deleted-too-r.patch b/SOURCES/0023-Ticket-50177-import-task-should-not-be-deleted-too-r.patch deleted file mode 100644 index ab57e52..0000000 --- a/SOURCES/0023-Ticket-50177-import-task-should-not-be-deleted-too-r.patch +++ /dev/null @@ -1,85 +0,0 @@ -From aea4494eeb5351bcc26bf5e15411c28b96648445 Mon Sep 17 00:00:00 2001 -From: Thierry Bordaz -Date: Fri, 1 Feb 2019 15:36:01 +0100 -Subject: [PATCH] Ticket 50177 - import task should not be deleted too rapidely - after import finishes to be able to query the status - -Bug Description: - scripts that create online import and export tasks do not define a Time To Life of the tasks. - As a consequence the task entry is cleared 2min (default value) after task completion. - This is too rapid and some admin scripts may miss the final task status. - -Fix Description: - The fix is to keep the entry of completed online import and export tasks for 1 day. - It also allows defines a default TTL to 1h (instead of 2min) - -https://pagure.io/389-ds-base/issue/50177 - -Reviewed by: Mark Reynolds - -Platforms tested: F27 - -Flag Day: no - -Doc impact: no ---- - ldap/admin/src/scripts/db2ldif.pl.in | 3 ++- - ldap/admin/src/scripts/ldif2db.pl.in | 3 ++- - ldap/servers/slapd/task.c | 6 +++--- - 3 files changed, 7 insertions(+), 5 deletions(-) - -diff --git a/ldap/admin/src/scripts/db2ldif.pl.in b/ldap/admin/src/scripts/db2ldif.pl.in -index 0d220f00a..f7d12b48a 100644 ---- a/ldap/admin/src/scripts/db2ldif.pl.in -+++ b/ldap/admin/src/scripts/db2ldif.pl.in -@@ -241,7 +241,8 @@ if ($decrypt_on_export != 0) { $nsexportdecrypt = "nsExportDecrypt: true\n"; } - $nsprintkey = ""; - if ($printkey == 0) { $nsprintkey = "nsPrintKey: false\n"; } - $nsldiffile = "nsFilename: ${ldiffile}\n"; --$entry = "${dn}${misc}${cn}${nsinstance}${nsincluded}${nsexcluded}${nsreplica}${nsnobase64}${nsnowrap}${nsnoversion}${nsnouniqueid}${nsuseid2entry}${nsonefile}${nsexportdecrypt}${nsprintkey}${nsldiffile}"; -+$ttl = "ttl: 86400"; -+$entry = "${dn}${misc}${cn}${nsinstance}${nsincluded}${nsexcluded}${nsreplica}${nsnobase64}${nsnowrap}${nsnoversion}${nsnouniqueid}${nsuseid2entry}${nsonefile}${nsexportdecrypt}${nsprintkey}${nsldiffile}${ttl}"; - - print("Exporting to ldif file: ${ldiffile}\n"); - $rc = DSUtil::ldapmod($entry, %info); -diff --git a/ldap/admin/src/scripts/ldif2db.pl.in b/ldap/admin/src/scripts/ldif2db.pl.in -index a5d834f8e..486dcd053 100644 ---- a/ldap/admin/src/scripts/ldif2db.pl.in -+++ b/ldap/admin/src/scripts/ldif2db.pl.in -@@ -192,7 +192,8 @@ $nsmergechunksiz = "nsImportChunkSize: ${mergechunksiz}\n"; - $nsgenuniqid = "nsUniqueIdGenerator: ${genuniqid}\n"; - $nsuniqidname = ""; - if ($uniqidname ne "") { $nsuniqidname = "nsUniqueIdGeneratorNamespace: ${uniqidname}\n"; } --$entry = "${dn}${misc}${cn}${nsinstance}${nsincluded}${nsexcluded}${nsldiffiles}${nsnoattrindexes}${nsimportencrypt}${nsmergechunksiz}${nsgenuniqid}${nsuniqidname}"; -+$ttl = "ttl: 86400"; -+$entry = "${dn}${misc}${cn}${nsinstance}${nsincluded}${nsexcluded}${nsldiffiles}${nsnoattrindexes}${nsimportencrypt}${nsmergechunksiz}${nsgenuniqid}${nsuniqidname}${ttl}"; - - $rc = DSUtil::ldapmod($entry, %info); - -diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c -index 698ee19b9..8c48c05b8 100644 ---- a/ldap/servers/slapd/task.c -+++ b/ldap/servers/slapd/task.c -@@ -46,7 +46,7 @@ static int shutting_down = 0; - #define TASK_PROGRESS_NAME "nsTaskCurrentItem" - #define TASK_WORK_NAME "nsTaskTotalItems" - --#define DEFAULT_TTL "120" /* seconds */ -+#define DEFAULT_TTL "3600" /* seconds */ - #define TASK_SYSCONFIG_FILE_ATTR "sysconfigfile" /* sysconfig reload task file attr */ - #define TASK_SYSCONFIG_LOGCHANGES_ATTR "logchanges" - #define TASK_TOMBSTONE_FIXUP "fixup tombstones task" -@@ -387,8 +387,8 @@ slapi_task_status_changed(Slapi_Task *task) - if (e == NULL) - return; - ttl = atoi(fetch_attr(e, "ttl", DEFAULT_TTL)); -- if (ttl > 3600) -- ttl = 3600; /* be reasonable. */ -+ if (ttl > (24*3600)) -+ ttl = (24*3600); /* be reasonable, allow to check task status not longer than one day */ - expire = time(NULL) + ttl; - task->task_flags |= SLAPI_TASK_DESTROYING; - /* queue an event to destroy the state info */ --- -2.17.2 - diff --git a/SOURCES/0023-Ticket-50363-ds-replcheck-incorrectly-reports-error-.patch b/SOURCES/0023-Ticket-50363-ds-replcheck-incorrectly-reports-error-.patch new file mode 100644 index 0000000..1d17485 --- /dev/null +++ b/SOURCES/0023-Ticket-50363-ds-replcheck-incorrectly-reports-error-.patch @@ -0,0 +1,49 @@ +From 86bbfb42aa03a7dd1ea0a9faa130d3cdb31aab5e Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 13 May 2019 10:03:58 -0400 +Subject: [PATCH] Ticket 50363 - ds-replcheck incorrectly reports error out of + order multi-valued attributes + +Bug Description: If for some reason an entry's multi-valued attribute + values are in different orders on different replicas + the tool reports this as an inconsistency when it is + not. + +Fix Description: For both offline & online processing sort each entry's + multi-valued attribute values. + +https://pagure.io/389-ds-base/issue/50363 + +Reviewed by: firstyear & mhonek (Thanks!!) +--- + ldap/admin/src/scripts/ds-replcheck | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck +index 57748b09f..a9411cb4f 100755 +--- a/ldap/admin/src/scripts/ds-replcheck ++++ b/ldap/admin/src/scripts/ds-replcheck +@@ -110,7 +110,7 @@ def convert_entries(entries): + + for entry in entries: + new_entry = Entry(entry) +- new_entry.data = {k.lower(): v for k, v in list(new_entry.data.items())} ++ new_entry.data = {k.lower(): sorted(v) for k, v in list(new_entry.data.items())} + if new_entry.dn.endswith("cn=mapping tree,cn=config"): + '''Skip replica entry (ldapsearch brings this in because the filter + we use triggers an internal operation to return the config entry - so +@@ -381,6 +381,11 @@ def ldif_search(LDIF, dn): + # Keep track of entry index - we use this later when searching the LDIF again + result['idx'] = count + ++ # Sort all the multi-valued attributes ++ for k, v in data.items(): ++ v.sort() ++ data[k] = v ++ + result['glue'] = None + if found_conflict and found_subentry and found_tombstone is False: + result['entry'] = None +-- +2.17.2 + diff --git a/SOURCES/0024-Ticket-50370-CleanAllRUV-task-crashing-during-server.patch b/SOURCES/0024-Ticket-50370-CleanAllRUV-task-crashing-during-server.patch new file mode 100644 index 0000000..ff0c5ef --- /dev/null +++ b/SOURCES/0024-Ticket-50370-CleanAllRUV-task-crashing-during-server.patch @@ -0,0 +1,73 @@ +From 5bed4704f3fa6fc6fca1cad9f132985cc7f68056 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 14 May 2019 13:58:42 -0400 +Subject: [PATCH] Ticket 50370 - CleanAllRUV task crashing during server + shutdown + +Description: There is a race condition during server shutdown that + can cause the server to crash. Increment the active + thread count for each cleaning task to prevent the plugins + from being closed before the thread terminates. + +https://pagure.io/389-ds-base/issue/50370 + +Reviewed by: firstyear(Thanks!) + +(cherry picked from commit 7141b8d10382e8dcb8528b57e5226c82506b79b9) +--- + .../plugins/replication/repl5_replica_config.c | 14 ++++++++++++-- + 1 file changed, 12 insertions(+), 2 deletions(-) + +diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c +index a952b687d..b4aff9eb4 100644 +--- a/ldap/servers/plugins/replication/repl5_replica_config.c ++++ b/ldap/servers/plugins/replication/repl5_replica_config.c +@@ -1630,9 +1630,13 @@ replica_cleanallruv_thread(void *arg) + int aborted = 0; + int rc = 0; + +- if (!data) { ++ if (!data || slapi_is_shutting_down()) { + return; /* no data */ + } ++ ++ /* Increase active thread count to prevent a race condition at server shutdown */ ++ g_incr_active_threadcnt(); ++ + if (data->task) { + slapi_task_inc_refcount(data->task); + slapi_log_err(SLAPI_LOG_PLUGIN, repl_plugin_name, +@@ -1912,6 +1916,7 @@ done: + slapi_ch_free_string(&data->force); + slapi_ch_free_string(&rid_text); + slapi_ch_free((void **)&data); ++ g_decr_active_threadcnt(); + } + + /* +@@ -3005,9 +3010,13 @@ replica_abort_task_thread(void *arg) + int release_it = 0; + int count = 0, rc = 0; + +- if (!data) { ++ if (!data || slapi_is_shutting_down()) { + return; /* no data */ + } ++ ++ /* Increase active thread count to prevent a race condition at server shutdown */ ++ g_incr_active_threadcnt(); ++ + if (data->task) { + slapi_task_inc_refcount(data->task); + slapi_log_err(SLAPI_LOG_PLUGIN, repl_plugin_name, "replica_abort_task_thread --> refcount incremented.\n"); +@@ -3134,6 +3143,7 @@ done: + slapi_ch_free_string(&data->certify); + slapi_sdn_free(&data->sdn); + slapi_ch_free((void **)&data); ++ g_decr_active_threadcnt(); + } + + static int +-- +2.17.2 + diff --git a/SOURCES/0024-Ticket-50396-Crash-in-PAM-plugin-when-user-does-not-.patch b/SOURCES/0024-Ticket-50396-Crash-in-PAM-plugin-when-user-does-not-.patch deleted file mode 100644 index 6cd2f51..0000000 --- a/SOURCES/0024-Ticket-50396-Crash-in-PAM-plugin-when-user-does-not-.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 6984b34fe496d696e37f003dbf57a5ca1b5899ea Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Mon, 20 May 2019 15:06:54 -0400 -Subject: [PATCH] Ticket 50396 - Crash in PAM plugin when user does not exist - -Description: pam passthru & addn plugin causes crash in bind when - user does not exist. Need to make sure we don't - dereference NULL pointer. - -https://pagure.io/389-ds-base/issue/50396 - -Reviewed by: mreynolds & tbordaz - -(cherry picked from commit 0935b8af6c8925c7a79a0a22103142ef5f7c5960) ---- - ldap/servers/plugins/pam_passthru/pam_ptpreop.c | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c -index de9448b90..b62c3c6b6 100644 ---- a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c -+++ b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c -@@ -436,8 +436,9 @@ pam_passthru_bindpreop(Slapi_PBlock *pb) - * We only handle simple bind requests that include non-NULL binddn and - * credentials. Let the Directory Server itself handle everything else. - */ -- if ((method != LDAP_AUTH_SIMPLE) || (*normbinddn == '\0') || -- (creds->bv_len == 0)) { -+ if (method != LDAP_AUTH_SIMPLE || normbinddn == NULL || -+ *normbinddn == '\0' || creds->bv_len == 0) -+ { - slapi_log_err(SLAPI_LOG_PLUGIN, PAM_PASSTHRU_PLUGIN_SUBSYSTEM, - "pam_passthru_bindpreop - Not handled (not simple bind or NULL dn/credentials)\n"); - return retcode; --- -2.21.0 - diff --git a/SOURCES/0025-Issue-50426-nsSSL3Ciphers-is-limited-to-1024-charact.patch b/SOURCES/0025-Issue-50426-nsSSL3Ciphers-is-limited-to-1024-charact.patch deleted file mode 100644 index bae0073..0000000 --- a/SOURCES/0025-Issue-50426-nsSSL3Ciphers-is-limited-to-1024-charact.patch +++ /dev/null @@ -1,186 +0,0 @@ -From bc9ae5a810b8024e7ab1179f492c425793e0ddcf Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Fri, 7 Jun 2019 09:21:31 -0400 -Subject: [PATCH] Issue 50426 - nsSSL3Ciphers is limited to 1024 characters - -Bug Description: There was a hardcoded buffer for processing TLS ciphers. - Anything over 1024 characters was truncated and was not - applied. - -Fix Description: Don't use a fixed size buffer and just use the entire - string. When printing errors about invalid format then - we must use a fixed sized buffer, but we will truncate - that log value as to not exceed the ssl logging function's - buffer, and still output a useful message. - -ASAN approved - -https://pagure.io/389-ds-base/issue/50426 - -Reviewed by: firstyear, tbordaz, and spichugi (Thanks!!!) - -(cherry picked from commit 22f2f9a1502e63bb169b7d599b5a3b35ddb31b8a) ---- - dirsrvtests/tests/suites/tls/cipher_test.py | 51 +++++++++++++++++++++ - ldap/servers/slapd/ssl.c | 34 ++++++-------- - 2 files changed, 66 insertions(+), 19 deletions(-) - create mode 100644 dirsrvtests/tests/suites/tls/cipher_test.py - -diff --git a/dirsrvtests/tests/suites/tls/cipher_test.py b/dirsrvtests/tests/suites/tls/cipher_test.py -new file mode 100644 -index 000000000..058931046 ---- /dev/null -+++ b/dirsrvtests/tests/suites/tls/cipher_test.py -@@ -0,0 +1,51 @@ -+import pytest -+import os -+from lib389.config import Encryption -+from lib389.topologies import topology_st as topo -+ -+ -+def test_long_cipher_list(topo): -+ """Test a long cipher list, and makre sure it is not truncated -+ -+ :id: bc400f54-3966-49c8-b640-abbf4fb2377d -+ :setup: Standalone Instance -+ :steps: -+ 1. Set nsSSL3Ciphers to a very long list of ciphers -+ 2. Ciphers are applied correctly -+ :expectedresults: -+ 1. Success -+ 2. Success -+ """ -+ ENABLED_CIPHER = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384::AES-GCM::AEAD::256" -+ DISABLED_CIPHER = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256::AES-GCM::AEAD::128" -+ CIPHER_LIST = ( -+ "-all,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5," -+ "-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5," -+ "-TLS_RSA_WITH_RC4_128_MD5,-TLS_RSA_WITH_RC4_128_SHA,-TLS_RSA_WITH_3DES_EDE_CBC_SHA," -+ "-TLS_RSA_WITH_DES_CBC_SHA,-SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,-SSL_RSA_FIPS_WITH_DES_CBC_SHA," -+ "-TLS_RSA_EXPORT_WITH_RC4_40_MD5,-TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,-TLS_RSA_WITH_NULL_MD5," -+ "-TLS_RSA_WITH_NULL_SHA,-TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA," -+ "-SSL_FORTEZZA_DMS_WITH_RC4_128_SHA,-SSL_FORTEZZA_DMS_WITH_NULL_SHA,-TLS_DHE_DSS_WITH_DES_CBC_SHA," -+ "-TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,-TLS_DHE_RSA_WITH_DES_CBC_SHA,-TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA," -+ "+TLS_RSA_WITH_AES_128_CBC_SHA,-TLS_DHE_DSS_WITH_AES_128_CBC_SHA,-TLS_DHE_RSA_WITH_AES_128_CBC_SHA," -+ "+TLS_RSA_WITH_AES_256_CBC_SHA,-TLS_DHE_DSS_WITH_AES_256_CBC_SHA,-TLS_DHE_RSA_WITH_AES_256_CBC_SHA," -+ "-TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,-TLS_DHE_DSS_WITH_RC4_128_SHA,-TLS_ECDHE_RSA_WITH_RC4_128_SHA," -+ "-TLS_RSA_WITH_NULL_SHA,-TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5," -+ "-TLS_RSA_WITH_RC4_128_MD5,-TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,-TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA," -+ "-TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,+TLS_AES_128_GCM_SHA256,+TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" -+ ) -+ -+ topo.standalone.enable_tls() -+ enc = Encryption(topo.standalone) -+ enc.set('nsSSL3Ciphers', CIPHER_LIST) -+ topo.standalone.restart() -+ enabled_ciphers = enc.get_attr_vals_utf8('nssslenabledciphers') -+ assert ENABLED_CIPHER in enabled_ciphers -+ assert DISABLED_CIPHER not in enabled_ciphers -+ -+ -+if __name__ == '__main__': -+ # Run isolated -+ # -s for DEBUG mode -+ CURRENT_FILE = os.path.realpath(__file__) -+ pytest.main(["-s", CURRENT_FILE]) -diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c -index b8eba2da4..ed054db44 100644 ---- a/ldap/servers/slapd/ssl.c -+++ b/ldap/servers/slapd/ssl.c -@@ -95,7 +95,6 @@ static char *configDN = "cn=encryption,cn=config"; - #define CIPHER_SET_ALLOWWEAKDHPARAM 0x200 /* allowWeakDhParam is on */ - #define CIPHER_SET_DISALLOWWEAKDHPARAM 0x400 /* allowWeakDhParam is off */ - -- - #define CIPHER_SET_ISDEFAULT(flag) \ - (((flag)&CIPHER_SET_DEFAULT) ? PR_TRUE : PR_FALSE) - #define CIPHER_SET_ISALL(flag) \ -@@ -689,10 +688,12 @@ _conf_setciphers(char *setciphers, int flags) - active = 0; - break; - default: -- PR_snprintf(err, sizeof(err), "invalid ciphers <%s>: format is " -- "+cipher1,-cipher2...", -- raw); -- return slapi_ch_strdup(err); -+ if (strlen(raw) > MAGNUS_ERROR_LEN) { -+ PR_snprintf(err, sizeof(err) - 3, "%s...", raw); -+ return slapi_ch_smprintf("invalid ciphers <%s>: format is +cipher1,-cipher2...", err); -+ } else { -+ return slapi_ch_smprintf("invalid ciphers <%s>: format is +cipher1,-cipher2...", raw); -+ } - } - if ((t = strchr(setciphers, ','))) - *t++ = '\0'; -@@ -1689,7 +1690,6 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) - PRUint16 NSSVersionMax = enabledNSSVersions.max; - char mymin[VERSION_STR_LENGTH], mymax[VERSION_STR_LENGTH]; - char newmax[VERSION_STR_LENGTH]; -- char cipher_string[1024]; - int allowweakcipher = CIPHER_SET_DEFAULTWEAKCIPHER; - int_fast16_t renegotiation = (int_fast16_t)SSL_RENEGOTIATE_REQUIRES_XTN; - -@@ -1730,21 +1730,17 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) - "Ignoring it and set it to default.", val, configDN); - } - } -- slapi_ch_free((void **)&val); -+ slapi_ch_free_string(&val); - - /* Set SSL cipher preferences */ -- *cipher_string = 0; -- if (ciphers && (*ciphers) && PL_strcmp(ciphers, "blank")) -- PL_strncpyz(cipher_string, ciphers, sizeof(cipher_string)); -- slapi_ch_free((void **)&ciphers); -- -- if (NULL != (val = _conf_setciphers(cipher_string, allowweakcipher))) { -+ if (NULL != (val = _conf_setciphers(ciphers, allowweakcipher))) { - errorCode = PR_GetError(); - slapd_SSL_warn("Failed to set SSL cipher " - "preference information: %s (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)", - val, errorCode, slapd_pr_strerror(errorCode)); -- slapi_ch_free((void **)&val); -+ slapi_ch_free_string(&val); - } -+ slapi_ch_free_string(&ciphers); - freeConfigEntry(&e); - - /* Import pr fd into SSL */ -@@ -1815,12 +1811,12 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) - activation = slapi_entry_attr_get_charptr(e, "nssslactivation"); - if ((!activation) || (!PL_strcasecmp(activation, "off"))) { - /* this family was turned off, goto next */ -- slapi_ch_free((void **)&activation); -+ slapi_ch_free_string(&activation); - freeConfigEntry(&e); - continue; - } - -- slapi_ch_free((void **)&activation); -+ slapi_ch_free_string(&activation); - - token = slapi_entry_attr_get_charptr(e, "nsssltoken"); - personality = slapi_entry_attr_get_charptr(e, "nssslpersonalityssl"); -@@ -1837,8 +1833,8 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) - "family information. Missing nsssltoken or" - "nssslpersonalityssl in %s (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)", - *family, errorCode, slapd_pr_strerror(errorCode)); -- slapi_ch_free((void **)&token); -- slapi_ch_free((void **)&personality); -+ slapi_ch_free_string(&token); -+ slapi_ch_free_string(&personality); - freeConfigEntry(&e); - continue; - } -@@ -1865,7 +1861,7 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) - "private key for cert %s of family %s (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)", - cert_name, *family, - errorCode, slapd_pr_strerror(errorCode)); -- slapi_ch_free((void **)&personality); -+ slapi_ch_free_string(&personality); - CERT_DestroyCertificate(cert); - cert = NULL; - freeConfigEntry(&e); --- -2.21.0 - diff --git a/SOURCES/0025-Ticket-50378-ACI-s-with-IPv4-and-IPv6-bind-rules-do-.patch b/SOURCES/0025-Ticket-50378-ACI-s-with-IPv4-and-IPv6-bind-rules-do-.patch new file mode 100644 index 0000000..b4d8c86 --- /dev/null +++ b/SOURCES/0025-Ticket-50378-ACI-s-with-IPv4-and-IPv6-bind-rules-do-.patch @@ -0,0 +1,47 @@ +From 7264a239b71b4b5adc6740457586520ad0ba1d3f Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 15 May 2019 16:04:55 -0400 +Subject: [PATCH] Ticket 50378 - ACI's with IPv4 and IPv6 bind rules do not + work for IPv6 clients + +Description: When the client is a IPv6 client, any ACI's that contain bind rules + for IPv4 addresses essentially break that aci causing it to not be + fully evaluated. + + For example we have an aci like this: + + aci: (targetattr != "aci")(version 3.0; aci "rootdse anon read access"; allow( + read,search,compare) userdn="ldap:///anyone" and + (ip="127.0.0.1" or ip="2620:52:0:84:f816:3eff:fe4b:4f35");) + + So when the client is IPv6 we start processing the IP addresses in + the ACI, as soon as a IPv4 address is found the ACI evaluation stops + and in this case the IPv6 address is never checked and access is denied. + + The problem is that we set the wrong return code variable in libaccess + +https://pagure.io/389-ds-base/issue/50378 + +Reviewed by: mreynolds (one line commit rule) + +(cherry picked from commit 41c30fd557d4cc0aaaf8a9f7767d37746f4c4bc4) +--- + lib/libaccess/lasip.cpp | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/lib/libaccess/lasip.cpp b/lib/libaccess/lasip.cpp +index eea7affba..30c546df7 100644 +--- a/lib/libaccess/lasip.cpp ++++ b/lib/libaccess/lasip.cpp +@@ -598,7 +598,7 @@ int LASIpEval(NSErr_t *errp, char *attr_name, CmpOp_t comparator, + + node = context->treetop_ipv6; + if ( node == NULL ) { +- retcode = (comparator == CMP_OP_EQ ? LAS_EVAL_FALSE : LAS_EVAL_TRUE); ++ rc = (comparator == CMP_OP_EQ ? LAS_EVAL_FALSE : LAS_EVAL_TRUE); + } else { + addr = PR_ntohs( ipv6->_S6_un._S6_u16[field]); + for (bit = 127; bit >= 0 ; bit--, bit_position--) { +-- +2.17.2 + diff --git a/SOURCES/0026-Ticket-50329-2nd-Possible-Security-Issue-DOS-due-to-.patch b/SOURCES/0026-Ticket-50329-2nd-Possible-Security-Issue-DOS-due-to-.patch deleted file mode 100644 index 0ad1c92..0000000 --- a/SOURCES/0026-Ticket-50329-2nd-Possible-Security-Issue-DOS-due-to-.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 4383a6cb666bc79feb9ca0ee62dda0520955d286 Mon Sep 17 00:00:00 2001 -From: Thierry Bordaz -Date: Wed, 15 May 2019 17:46:14 +0200 -Subject: [PATCH] Ticket 50329 - (2nd) Possible Security Issue: DOS due to - ioblocktimeout not applying to TLS - -Bug Description: - A secure socket is configured in blocking mode. If an event - is detected on a secure socket a worker tries to receive the request. - If handshake occurs during the read, it can hang longer than - ioblocktimeout because it takes into account the socket option - rather than the timeout used for the ssl_Recv - -Fix Description: - The fix is specific to secure socket and set this socket option - to do non blocking IO. - -https://pagure.io/389-ds-base/issue/50329 - -Reviewed by: ? - -Platforms tested: F28, RHEL7.6 - -Flag Day: no - -Doc impact: no ---- - ldap/servers/slapd/daemon.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c -index c77e1f15c..b1d41c858 100644 ---- a/ldap/servers/slapd/daemon.c -+++ b/ldap/servers/slapd/daemon.c -@@ -3191,7 +3191,7 @@ configure_pr_socket(PRFileDesc **pr_socket, int secure, int local) - - if (secure) { - pr_socketoption.option = PR_SockOpt_Nonblocking; -- pr_socketoption.value.non_blocking = 0; -+ pr_socketoption.value.non_blocking = 1; - if (PR_SetSocketOption(*pr_socket, &pr_socketoption) == PR_FAILURE) { - PRErrorCode prerr = PR_GetError(); - slapi_log_err(SLAPI_LOG_ERR, --- -2.21.0 - diff --git a/SOURCES/0026-Ticket-50396-Crash-in-PAM-plugin-when-user-does-not-.patch b/SOURCES/0026-Ticket-50396-Crash-in-PAM-plugin-when-user-does-not-.patch new file mode 100644 index 0000000..a31a89a --- /dev/null +++ b/SOURCES/0026-Ticket-50396-Crash-in-PAM-plugin-when-user-does-not-.patch @@ -0,0 +1,37 @@ +From 44b6f98bc687f3ec3ae2c40b8086f1bd4936b827 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 20 May 2019 15:06:54 -0400 +Subject: [PATCH] Ticket 50396 - Crash in PAM plugin when user does not exist + +Description: pam passthru & addn plugin causes crash in bind when + user does not exist. Need to make sure we don't + dereference NULL pointer. + +https://pagure.io/389-ds-base/issue/50396 + +Reviewed by: mreynolds & tbordaz + +(cherry picked from commit 0935b8af6c8925c7a79a0a22103142ef5f7c5960) +--- + ldap/servers/plugins/pam_passthru/pam_ptpreop.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c +index de9448b90..b62c3c6b6 100644 +--- a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c ++++ b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c +@@ -436,8 +436,9 @@ pam_passthru_bindpreop(Slapi_PBlock *pb) + * We only handle simple bind requests that include non-NULL binddn and + * credentials. Let the Directory Server itself handle everything else. + */ +- if ((method != LDAP_AUTH_SIMPLE) || (*normbinddn == '\0') || +- (creds->bv_len == 0)) { ++ if (method != LDAP_AUTH_SIMPLE || normbinddn == NULL || ++ *normbinddn == '\0' || creds->bv_len == 0) ++ { + slapi_log_err(SLAPI_LOG_PLUGIN, PAM_PASSTHRU_PLUGIN_SUBSYSTEM, + "pam_passthru_bindpreop - Not handled (not simple bind or NULL dn/credentials)\n"); + return retcode; +-- +2.17.2 + diff --git a/SOURCES/0027-BZ1518320-entry-cache-crash-fix.patch b/SOURCES/0027-BZ1518320-entry-cache-crash-fix.patch deleted file mode 100644 index 062f943..0000000 --- a/SOURCES/0027-BZ1518320-entry-cache-crash-fix.patch +++ /dev/null @@ -1,703 +0,0 @@ -From 17aada4feb87407e004a890225700e730778d692 Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Thu, 20 Jun 2019 15:50:08 -0400 -Subject: [PATCH 1/2] BZ1518320 - entry cache crash fix - -Description: THis patch is combination of all the entry cache fixes. - - If these fixes are not enough, there is an experimental - "fix" that should prevent the crash. A message will be - logged that reports the crash was averted: - - "(avoided crash, but cache was corrupted)" - - The customer should monitor the errors log for this text, - and let GSS know if they see it. ---- - configure.ac | 3 - - dirsrvtests/tests/suites/betxns/betxn_test.py | 57 ++++++ - ldap/servers/slapd/back-ldbm/back-ldbm.h | 68 ++++---- - ldap/servers/slapd/back-ldbm/backentry.c | 2 +- - ldap/servers/slapd/back-ldbm/cache.c | 163 ++++++++++++++++-- - ldap/servers/slapd/back-ldbm/ldbm_add.c | 13 ++ - ldap/servers/slapd/back-ldbm/ldbm_delete.c | 12 ++ - ldap/servers/slapd/back-ldbm/ldbm_modify.c | 12 ++ - ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 22 ++- - .../servers/slapd/back-ldbm/proto-back-ldbm.h | 1 + - ldap/servers/slapd/slapi-plugin.h | 15 ++ - ldap/servers/slapd/time.c | 26 +++ - 12 files changed, 341 insertions(+), 53 deletions(-) - -diff --git a/configure.ac b/configure.ac -index 91d6d398b..ea528ff2b 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -72,9 +72,6 @@ AC_FUNC_STRFTIME - AC_FUNC_VPRINTF - AC_CHECK_FUNCS([endpwent ftruncate getcwd gethostbyname inet_ntoa localtime_r memmove memset mkdir munmap putenv rmdir setrlimit socket strcasecmp strchr strcspn strdup strerror strncasecmp strpbrk strrchr strstr strtol tzset]) - --# These functions are *required* without option. --AC_CHECK_FUNCS([clock_gettime], [], AC_MSG_ERROR([unable to locate required symbol clock_gettime])) -- - # This will detect if we need to add the LIBADD_DL value for us. - LT_LIB_DLLOAD - -diff --git a/dirsrvtests/tests/suites/betxns/betxn_test.py b/dirsrvtests/tests/suites/betxns/betxn_test.py -index 175496495..48181a9ea 100644 ---- a/dirsrvtests/tests/suites/betxns/betxn_test.py -+++ b/dirsrvtests/tests/suites/betxns/betxn_test.py -@@ -8,6 +8,7 @@ - # - import pytest - import six -+import ldap - from lib389.tasks import * - from lib389.utils import * - from lib389.topologies import topology_st -@@ -248,6 +249,62 @@ def test_betxn_memberof(topology_st, dynamic_plugins): - log.info('test_betxn_memberof: PASSED') - - -+def test_betxn_modrdn_memberof(topology_st): -+ """Test modrdn operartions and memberOf -+ -+ :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5994 -+ -+ :setup: Standalone instance -+ -+ :steps: 1. Enable and configure memberOf plugin -+ 2. Set memberofgroupattr="member" and memberofAutoAddOC="nsContainer" -+ 3. Create group and user outside of memberOf plugin scope -+ 4. Do modrdn to move group into scope -+ 5. Do modrdn to move group into scope (again) -+ -+ :expectedresults: -+ 1. memberOf plugin plugin should be ON -+ 2. Set memberofgroupattr="member" and memberofAutoAddOC="nsContainer" should PASS -+ 3. Creating group and user should PASS -+ 4. Modrdn should fail with objectclass violation -+ 5. Second modrdn should also fail with objectclass violation -+ """ -+ -+ peoplebase = 'ou=people,%s' % DEFAULT_SUFFIX -+ memberof = MemberOfPlugin(topology_st.standalone) -+ memberof.enable() -+ memberof.set_autoaddoc('nsContainer') # Bad OC -+ memberof.set('memberOfEntryScope', peoplebase) -+ memberof.set('memberOfAllBackends', 'on') -+ topology_st.standalone.restart() -+ -+ groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) -+ group = groups.create(properties={ -+ 'cn': 'group', -+ }) -+ -+ # Create user and add it to group -+ users = UserAccounts(topology_st.standalone, basedn=DEFAULT_SUFFIX) -+ user = users.create(properties=TEST_USER_PROPERTIES) -+ if not ds_is_older('1.3.7'): -+ user.remove('objectClass', 'nsMemberOf') -+ -+ group.add_member(user.dn) -+ -+ # Attempt modrdn that should fail, but the original entry should stay in the cache -+ with pytest.raises(ldap.OBJECTCLASS_VIOLATION): -+ group.rename('cn=group_to_people', newsuperior=peoplebase) -+ -+ # Should fail, but not with NO_SUCH_OBJECT as the original entry should still be in the cache -+ with pytest.raises(ldap.OBJECTCLASS_VIOLATION): -+ group.rename('cn=group_to_people', newsuperior=peoplebase) -+ -+ # -+ # Done -+ # -+ log.info('test_betxn_modrdn_memberof: PASSED') -+ -+ - if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode -diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h -index 4727961a9..399508561 100644 ---- a/ldap/servers/slapd/back-ldbm/back-ldbm.h -+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h -@@ -310,36 +310,37 @@ typedef struct - #define CACHE_TYPE_ENTRY 0 - #define CACHE_TYPE_DN 1 - --struct backcommon --{ -- int ep_type; /* to distinguish backdn from backentry */ -- struct backcommon *ep_lrunext; /* for the cache */ -- struct backcommon *ep_lruprev; /* for the cache */ -- ID ep_id; /* entry id */ -- char ep_state; /* state in the cache */ --#define ENTRY_STATE_DELETED 0x1 /* entry is marked as deleted */ --#define ENTRY_STATE_CREATING 0x2 /* entry is being created; don't touch it */ --#define ENTRY_STATE_NOTINCACHE 0x4 /* cache_add failed; not in the cache */ -- int ep_refcnt; /* entry reference cnt */ -- size_t ep_size; /* for cache tracking */ -+struct backcommon { -+ int ep_type; /* to distinguish backdn from backentry */ -+ struct backcommon *ep_lrunext; /* for the cache */ -+ struct backcommon *ep_lruprev; /* for the cache */ -+ ID ep_id; /* entry id */ -+ char ep_state; /* state in the cache */ -+#define ENTRY_STATE_DELETED 0x1 /* entry is marked as deleted */ -+#define ENTRY_STATE_CREATING 0x2 /* entry is being created; don't touch it */ -+#define ENTRY_STATE_NOTINCACHE 0x4 /* cache_add failed; not in the cache */ -+#define ENTRY_STATE_INVALID 0x8 /* cache entry is invalid and needs to be removed */ -+ int ep_refcnt; /* entry reference cnt */ -+ size_t ep_size; /* for cache tracking */ -+ struct timespec ep_create_time; /* the time the entry was added to the cache */ - }; - - /* From ep_type through ep_size MUST be identical to backcommon */ --struct backentry --{ -- int ep_type; /* to distinguish backdn from backentry */ -- struct backcommon *ep_lrunext; /* for the cache */ -- struct backcommon *ep_lruprev; /* for the cache */ -- ID ep_id; /* entry id */ -- char ep_state; /* state in the cache */ -- int ep_refcnt; /* entry reference cnt */ -- size_t ep_size; /* for cache tracking */ -- Slapi_Entry *ep_entry; /* real entry */ -- Slapi_Entry *ep_vlventry; -- void *ep_dn_link; /* linkage for the 3 hash */ -- void *ep_id_link; /* tables used for */ -- void *ep_uuid_link; /* looking up entries */ -- PRMonitor *ep_mutexp; /* protection for mods; make it reentrant */ -+struct backentry { -+ int ep_type; /* to distinguish backdn from backentry */ -+ struct backcommon *ep_lrunext; /* for the cache */ -+ struct backcommon *ep_lruprev; /* for the cache */ -+ ID ep_id; /* entry id */ -+ char ep_state; /* state in the cache */ -+ int ep_refcnt; /* entry reference cnt */ -+ size_t ep_size; /* for cache tracking */ -+ struct timespec ep_create_time; /* the time the entry was added to the cache */ -+ Slapi_Entry *ep_entry; /* real entry */ -+ Slapi_Entry *ep_vlventry; -+ void * ep_dn_link; /* linkage for the 3 hash */ -+ void * ep_id_link; /* tables used for */ -+ void * ep_uuid_link; /* looking up entries */ -+ PRMonitor *ep_mutexp; /* protection for mods; make it reentrant */ - }; - - /* From ep_type through ep_size MUST be identical to backcommon */ -@@ -348,12 +349,13 @@ struct backdn - int ep_type; /* to distinguish backdn from backentry */ - struct backcommon *ep_lrunext; /* for the cache */ - struct backcommon *ep_lruprev; /* for the cache */ -- ID ep_id; /* entry id */ -- char ep_state; /* state in the cache; share ENTRY_STATE_* */ -- int ep_refcnt; /* entry reference cnt */ -- size_t ep_size; /* for cache tracking */ -- Slapi_DN *dn_sdn; -- void *dn_id_link; /* for hash table */ -+ ID ep_id; /* entry id */ -+ char ep_state; /* state in the cache; share ENTRY_STATE_* */ -+ int ep_refcnt; /* entry reference cnt */ -+ size_t ep_size; /* for cache tracking */ -+ struct timespec ep_create_time; /* the time the entry was added to the cache */ -+ Slapi_DN *dn_sdn; -+ void *dn_id_link; /* for hash table */ - }; - - /* for the in-core cache of entries */ -diff --git a/ldap/servers/slapd/back-ldbm/backentry.c b/ldap/servers/slapd/back-ldbm/backentry.c -index f2fe780db..a1f3ca1bb 100644 ---- a/ldap/servers/slapd/back-ldbm/backentry.c -+++ b/ldap/servers/slapd/back-ldbm/backentry.c -@@ -23,7 +23,7 @@ backentry_free(struct backentry **bep) - return; - } - ep = *bep; -- PR_ASSERT(ep->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_NOTINCACHE)); -+ PR_ASSERT(ep->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_NOTINCACHE | ENTRY_STATE_INVALID)); - if (ep->ep_entry != NULL) { - slapi_entry_free(ep->ep_entry); - } -diff --git a/ldap/servers/slapd/back-ldbm/cache.c b/ldap/servers/slapd/back-ldbm/cache.c -index 86e1f7b39..054766df2 100644 ---- a/ldap/servers/slapd/back-ldbm/cache.c -+++ b/ldap/servers/slapd/back-ldbm/cache.c -@@ -56,6 +56,11 @@ - #define LOG(...) - #endif - -+typedef enum { -+ ENTRY_CACHE, -+ DN_CACHE, -+} CacheType; -+ - #define LRU_DETACH(cache, e) lru_detach((cache), (void *)(e)) - - #define CACHE_LRU_HEAD(cache, type) ((type)((cache)->c_lruhead)) -@@ -185,6 +190,7 @@ new_hash(u_long size, u_long offset, HashFn hfn, HashTestFn tfn) - int - add_hash(Hashtable *ht, void *key, uint32_t keylen, void *entry, void **alt) - { -+ struct backcommon *back_entry = (struct backcommon *)entry; - u_long val, slot; - void *e; - -@@ -202,6 +208,7 @@ add_hash(Hashtable *ht, void *key, uint32_t keylen, void *entry, void **alt) - e = HASH_NEXT(ht, e); - } - /* ok, it's not already there, so add it */ -+ back_entry->ep_create_time = slapi_current_rel_time_hr(); - HASH_NEXT(ht, entry) = ht->slot[slot]; - ht->slot[slot] = entry; - return 1; -@@ -492,6 +499,126 @@ cache_make_hashes(struct cache *cache, int type) - } - } - -+/* -+ * Helper function for flush_hash() to calculate if the entry should be -+ * removed from the cache. -+ */ -+static int32_t -+flush_remove_entry(struct timespec *entry_time, struct timespec *start_time) -+{ -+ struct timespec diff; -+ -+ slapi_timespec_diff(entry_time, start_time, &diff); -+ if (diff.tv_sec >= 0) { -+ return 1; -+ } else { -+ return 0; -+ } -+} -+ -+/* -+ * Flush all the cache entries that were added after the "start time" -+ * This is called when a backend transaction plugin fails, and we need -+ * to remove all the possible invalid entries in the cache. -+ * -+ * If the ref count is 0, we can straight up remove it from the cache, but -+ * if the ref count is greater than 1, then the entry is currently in use. -+ * In the later case we set the entry state to ENTRY_STATE_INVALID, and -+ * when the owning thread cache_returns() the cache entry is automatically -+ * removed so another thread can not use/lock the invalid cache entry. -+ */ -+static void -+flush_hash(struct cache *cache, struct timespec *start_time, int32_t type) -+{ -+ Hashtable *ht = cache->c_idtable; /* start with the ID table as it's in both ENTRY and DN caches */ -+ void *e, *laste = NULL; -+ -+ cache_lock(cache); -+ -+ for (size_t i = 0; i < ht->size; i++) { -+ e = ht->slot[i]; -+ while (e) { -+ struct backcommon *entry = (struct backcommon *)e; -+ uint64_t remove_it = 0; -+ if (flush_remove_entry(&entry->ep_create_time, start_time)) { -+ /* Mark the entry to be removed */ -+ slapi_log_err(SLAPI_LOG_CACHE, "flush_hash", "[%s] Removing entry id (%d)\n", -+ type ? "DN CACHE" : "ENTRY CACHE", entry->ep_id); -+ remove_it = 1; -+ } -+ laste = e; -+ e = HASH_NEXT(ht, e); -+ -+ if (remove_it) { -+ /* since we have the cache lock we know we can trust refcnt */ -+ entry->ep_state |= ENTRY_STATE_INVALID; -+ if (entry->ep_refcnt == 0) { -+ entry->ep_refcnt++; -+ lru_delete(cache, laste); -+ if (type == ENTRY_CACHE) { -+ entrycache_remove_int(cache, laste); -+ entrycache_return(cache, (struct backentry **)&laste); -+ } else { -+ dncache_remove_int(cache, laste); -+ dncache_return(cache, (struct backdn **)&laste); -+ } -+ } else { -+ /* Entry flagged for removal */ -+ slapi_log_err(SLAPI_LOG_CACHE, "flush_hash", -+ "[%s] Flagging entry to be removed later: id (%d) refcnt: %d\n", -+ type ? "DN CACHE" : "ENTRY CACHE", entry->ep_id, entry->ep_refcnt); -+ } -+ } -+ } -+ } -+ -+ if (type == ENTRY_CACHE) { -+ /* Also check the DN hashtable */ -+ ht = cache->c_dntable; -+ -+ for (size_t i = 0; i < ht->size; i++) { -+ e = ht->slot[i]; -+ while (e) { -+ struct backcommon *entry = (struct backcommon *)e; -+ uint64_t remove_it = 0; -+ if (flush_remove_entry(&entry->ep_create_time, start_time)) { -+ /* Mark the entry to be removed */ -+ slapi_log_err(SLAPI_LOG_CACHE, "flush_hash", "[ENTRY CACHE] Removing entry id (%d)\n", -+ entry->ep_id); -+ remove_it = 1; -+ } -+ laste = e; -+ e = HASH_NEXT(ht, e); -+ -+ if (remove_it) { -+ /* since we have the cache lock we know we can trust refcnt */ -+ entry->ep_state |= ENTRY_STATE_INVALID; -+ if (entry->ep_refcnt == 0) { -+ entry->ep_refcnt++; -+ lru_delete(cache, laste); -+ entrycache_remove_int(cache, laste); -+ entrycache_return(cache, (struct backentry **)&laste); -+ } else { -+ /* Entry flagged for removal */ -+ slapi_log_err(SLAPI_LOG_CACHE, "flush_hash", -+ "[ENTRY CACHE] Flagging entry to be removed later: id (%d) refcnt: %d\n", -+ entry->ep_id, entry->ep_refcnt); -+ } -+ } -+ } -+ } -+ } -+ -+ cache_unlock(cache); -+} -+ -+void -+revert_cache(ldbm_instance *inst, struct timespec *start_time) -+{ -+ flush_hash(&inst->inst_cache, start_time, ENTRY_CACHE); -+ flush_hash(&inst->inst_dncache, start_time, DN_CACHE); -+} -+ - /* initialize the cache */ - int - cache_init(struct cache *cache, uint64_t maxsize, long maxentries, int type) -@@ -1141,10 +1268,10 @@ entrycache_return(struct cache *cache, struct backentry **bep) - backentry_free(bep); - } else { - ASSERT(e->ep_refcnt > 0); -- if (!--e->ep_refcnt) { -- if (e->ep_state & ENTRY_STATE_DELETED) { -- const char *ndn = slapi_sdn_get_ndn(backentry_get_sdn(e)); -- if (ndn) { -+ if (! --e->ep_refcnt) { -+ if (e->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_INVALID)) { -+ const char* ndn = slapi_sdn_get_ndn(backentry_get_sdn(e)); -+ if (ndn){ - /* - * State is "deleted" and there are no more references, - * so we need to remove the entry from the DN cache because -@@ -1154,6 +1281,13 @@ entrycache_return(struct cache *cache, struct backentry **bep) - LOG("entrycache_return -Failed to remove %s from dn table\n", ndn); - } - } -+ if (e->ep_state & ENTRY_STATE_INVALID) { -+ /* Remove it from the hash table before we free the back entry */ -+ slapi_log_err(SLAPI_LOG_CACHE, "entrycache_return", -+ "Finally flushing invalid entry: %d (%s)\n", -+ e->ep_id, backentry_get_ndn(e)); -+ entrycache_remove_int(cache, e); -+ } - backentry_free(bep); - } else { - lru_add(cache, e); -@@ -1535,11 +1669,11 @@ cache_lock_entry(struct cache *cache, struct backentry *e) - - /* make sure entry hasn't been deleted now */ - cache_lock(cache); -- if (e->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_NOTINCACHE)) { -- cache_unlock(cache); -- PR_ExitMonitor(e->ep_mutexp); -- LOG("<= cache_lock_entry (DELETED)\n"); -- return RETRY_CACHE_LOCK; -+ if (e->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_NOTINCACHE | ENTRY_STATE_INVALID)) { -+ cache_unlock(cache); -+ PR_ExitMonitor(e->ep_mutexp); -+ LOG("<= cache_lock_entry (DELETED)\n"); -+ return RETRY_CACHE_LOCK; - } - cache_unlock(cache); - -@@ -1695,8 +1829,15 @@ dncache_return(struct cache *cache, struct backdn **bdn) - backdn_free(bdn); - } else { - ASSERT((*bdn)->ep_refcnt > 0); -- if (!--(*bdn)->ep_refcnt) { -- if ((*bdn)->ep_state & ENTRY_STATE_DELETED) { -+ if (! --(*bdn)->ep_refcnt) { -+ if ((*bdn)->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_INVALID)) { -+ if ((*bdn)->ep_state & ENTRY_STATE_INVALID) { -+ /* Remove it from the hash table before we free the back dn */ -+ slapi_log_err(SLAPI_LOG_CACHE, "dncache_return", -+ "Finally flushing invalid entry: %d (%s)\n", -+ (*bdn)->ep_id, slapi_sdn_get_dn((*bdn)->dn_sdn)); -+ dncache_remove_int(cache, (*bdn)); -+ } - backdn_free(bdn); - } else { - lru_add(cache, (void *)*bdn); -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c -index 32c8e71ff..d3c8cdab2 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_add.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c -@@ -97,6 +97,8 @@ ldbm_back_add(Slapi_PBlock *pb) - PRUint64 conn_id; - int op_id; - int result_sent = 0; -+ int32_t parent_op = 0; -+ struct timespec parent_time; - - if (slapi_pblock_get(pb, SLAPI_CONN_ID, &conn_id) < 0) { - conn_id = 0; /* connection is NULL */ -@@ -147,6 +149,13 @@ ldbm_back_add(Slapi_PBlock *pb) - slapi_entry_delete_values(e, numsubordinates, NULL); - - dblayer_txn_init(li, &txn); -+ -+ if (txn.back_txn_txn == NULL) { -+ /* This is the parent operation, get the time */ -+ parent_op = 1; -+ parent_time = slapi_current_rel_time_hr(); -+ } -+ - /* the calls to perform searches require the parent txn if any - so set txn to the parent_txn until we begin the child transaction */ - if (parent_txn) { -@@ -1239,6 +1248,10 @@ ldbm_back_add(Slapi_PBlock *pb) - goto common_return; - - error_return: -+ if (parent_op) { -+ revert_cache(inst, &parent_time); -+ } -+ - if (addingentry_id_assigned) { - next_id_return(be, addingentry->ep_id); - } -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c -index f5f6c1e3a..80c53a3e0 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c -@@ -79,6 +79,8 @@ ldbm_back_delete(Slapi_PBlock *pb) - ID tomb_ep_id = 0; - int result_sent = 0; - Connection *pb_conn; -+ int32_t parent_op = 0; -+ struct timespec parent_time; - - if (slapi_pblock_get(pb, SLAPI_CONN_ID, &conn_id) < 0) { - conn_id = 0; /* connection is NULL */ -@@ -98,6 +100,13 @@ ldbm_back_delete(Slapi_PBlock *pb) - - /* dblayer_txn_init needs to be called before "goto error_return" */ - dblayer_txn_init(li, &txn); -+ -+ if (txn.back_txn_txn == NULL) { -+ /* This is the parent operation, get the time */ -+ parent_op = 1; -+ parent_time = slapi_current_rel_time_hr(); -+ } -+ - /* the calls to perform searches require the parent txn if any - so set txn to the parent_txn until we begin the child transaction */ - if (parent_txn) { -@@ -1356,6 +1365,9 @@ commit_return: - goto common_return; - - error_return: -+ if (parent_op) { -+ revert_cache(inst, &parent_time); -+ } - if (tombstone) { - if (cache_is_in_cache(&inst->inst_cache, tombstone)) { - tomb_ep_id = tombstone->ep_id; /* Otherwise, tombstone might have been freed. */ -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c -index cc4319e5f..93ab0a9e8 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c -@@ -412,6 +412,8 @@ ldbm_back_modify(Slapi_PBlock *pb) - int fixup_tombstone = 0; - int ec_locked = 0; - int result_sent = 0; -+ int32_t parent_op = 0; -+ struct timespec parent_time; - - slapi_pblock_get(pb, SLAPI_BACKEND, &be); - slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li); -@@ -424,6 +426,13 @@ ldbm_back_modify(Slapi_PBlock *pb) - fixup_tombstone = operation_is_flag_set(operation, OP_FLAG_TOMBSTONE_FIXUP); - - dblayer_txn_init(li, &txn); /* must do this before first goto error_return */ -+ -+ if (txn.back_txn_txn == NULL) { -+ /* This is the parent operation, get the time */ -+ parent_op = 1; -+ parent_time = slapi_current_rel_time_hr(); -+ } -+ - /* the calls to perform searches require the parent txn if any - so set txn to the parent_txn until we begin the child transaction */ - if (parent_txn) { -@@ -887,6 +896,9 @@ ldbm_back_modify(Slapi_PBlock *pb) - goto common_return; - - error_return: -+ if (parent_op) { -+ revert_cache(inst, &parent_time); -+ } - if (postentry != NULL) { - slapi_entry_free(postentry); - postentry = NULL; -diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c -index e2e9d1b46..1ca1bdb28 100644 ---- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c -+++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c -@@ -97,6 +97,8 @@ ldbm_back_modrdn(Slapi_PBlock *pb) - int op_id; - int result_sent = 0; - Connection *pb_conn = NULL; -+ int32_t parent_op = 0; -+ struct timespec parent_time; - - if (slapi_pblock_get(pb, SLAPI_CONN_ID, &conn_id) < 0) { - conn_id = 0; /* connection is NULL */ -@@ -134,6 +136,13 @@ ldbm_back_modrdn(Slapi_PBlock *pb) - - /* dblayer_txn_init needs to be called before "goto error_return" */ - dblayer_txn_init(li, &txn); -+ -+ if (txn.back_txn_txn == NULL) { -+ /* This is the parent operation, get the time */ -+ parent_op = 1; -+ parent_time = slapi_current_rel_time_hr(); -+ } -+ - /* the calls to perform searches require the parent txn if any - so set txn to the parent_txn until we begin the child transaction */ - if (parent_txn) { -@@ -1276,6 +1285,10 @@ ldbm_back_modrdn(Slapi_PBlock *pb) - goto common_return; - - error_return: -+ /* Revert the caches if this is the parent operation */ -+ if (parent_op) { -+ revert_cache(inst, &parent_time); -+ } - /* result already sent above - just free stuff */ - if (postentry) { - slapi_entry_free(postentry); -@@ -1353,6 +1366,10 @@ error_return: - slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, ldap_result_code ? &ldap_result_code : &retval); - } - slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); -+ /* Revert the caches if this is the parent operation */ -+ if (parent_op) { -+ revert_cache(inst, &parent_time); -+ } - } - retval = plugin_call_mmr_plugin_postop(pb, NULL,SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN); - -@@ -1413,12 +1430,7 @@ common_return: - CACHE_RETURN(&inst->inst_dncache, &bdn); - } - -- /* remove the new entry from the cache if the op failed - -- otherwise, leave it in */ - if (ec && inst) { -- if (retval && cache_is_in_cache(&inst->inst_cache, ec)) { -- CACHE_REMOVE(&inst->inst_cache, ec); -- } - CACHE_RETURN(&inst->inst_cache, &ec); - } - ec = NULL; -diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h -index 61c3313c5..510d38f57 100644 ---- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h -+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h -@@ -55,6 +55,7 @@ void cache_unlock_entry(struct cache *cache, struct backentry *e); - int cache_replace(struct cache *cache, void *oldptr, void *newptr); - int cache_has_otherref(struct cache *cache, void *bep); - int cache_is_in_cache(struct cache *cache, void *ptr); -+void revert_cache(ldbm_instance *inst, struct timespec *start_time); - - #ifdef CACHE_DEBUG - void check_entry_cache(struct cache *cache, struct backentry *e); -diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h -index bdad4e59e..eefe88724 100644 ---- a/ldap/servers/slapd/slapi-plugin.h -+++ b/ldap/servers/slapd/slapi-plugin.h -@@ -6853,6 +6853,12 @@ void slapi_operation_time_expiry(Slapi_Operation *o, time_t timeout, struct time - */ - slapi_timer_result slapi_timespec_expire_check(struct timespec *expire); - -+/** -+ * Returns the current system time as a hr clock -+ * -+ * \return timespec of the current monotonic time. -+ */ -+struct timespec slapi_current_rel_time_hr(void); - - /* - * Plugin and parameter block related macros (remainder of this file). -@@ -8296,6 +8302,15 @@ uint64_t slapi_atomic_decr_64(uint64_t *ptr, int memorder); - - /* helper function */ - const char * fetch_attr(Slapi_Entry *e, const char *attrname, char *default_val); -+/** -+ * Diffs two timespects a - b into *diff. This is useful with -+ * clock_monotonic to find time taken to perform operations. -+ * -+ * \param struct timespec a the "end" time. -+ * \param struct timespec b the "start" time. -+ * \param struct timespec c the difference. -+ */ -+void slapi_timespec_diff(struct timespec *a, struct timespec *b, struct timespec *diff); - - #ifdef __cplusplus - } -diff --git a/ldap/servers/slapd/time.c b/ldap/servers/slapd/time.c -index 584bd1e63..2a3865858 100644 ---- a/ldap/servers/slapd/time.c -+++ b/ldap/servers/slapd/time.c -@@ -96,6 +96,32 @@ slapi_current_utc_time_hr(void) - return ltnow; - } - -+struct timespec -+slapi_current_rel_time_hr(void) -+{ -+ struct timespec now; -+ clock_gettime(CLOCK_MONOTONIC, &now); -+ return now; -+} -+ -+void -+slapi_timespec_diff(struct timespec *a, struct timespec *b, struct timespec *diff) -+{ -+ /* Now diff the two */ -+ time_t sec = a->tv_sec - b->tv_sec; -+ int32_t nsec = a->tv_nsec - b->tv_nsec; -+ -+ if (nsec < 0) { -+ /* It's negative so take one second */ -+ sec -= 1; -+ /* And set nsec to to a whole value */ -+ nsec = 1000000000 - nsec; -+ } -+ -+ diff->tv_sec = sec; -+ diff->tv_nsec = nsec; -+} -+ - time_t - slapi_current_utc_time(void) - { --- -2.21.0 - diff --git a/SOURCES/0027-Ticket-50393-maxlogsperdir-accepting-negative-values.patch b/SOURCES/0027-Ticket-50393-maxlogsperdir-accepting-negative-values.patch new file mode 100644 index 0000000..1da3a1a --- /dev/null +++ b/SOURCES/0027-Ticket-50393-maxlogsperdir-accepting-negative-values.patch @@ -0,0 +1,428 @@ +From 03695c416f7f8311afbded390f3c0ff3637a10d4 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 20 May 2019 11:38:05 -0400 +Subject: [PATCH] Ticket 50393 - maxlogsperdir accepting negative values + +Description: Improve the log "digit" config setting validation + for all settings. + +https://pagure.io/389-ds-base/issue/50393 + +Reviewed by: tbordaz, firstyear, mhonek, and spichugi (Thanks!!!!) + +(cherry picked from commit ca70d06fbb7a2c06c62f0ba5b192dba36f24b8e3) +--- + dirsrvtests/tests/suites/logging/__init__.py | 3 + + .../suites/logging/logging_config_test.py | 86 +++++++++++ + ldap/servers/slapd/log.c | 143 +++++++++++++----- + 3 files changed, 192 insertions(+), 40 deletions(-) + create mode 100644 dirsrvtests/tests/suites/logging/__init__.py + create mode 100644 dirsrvtests/tests/suites/logging/logging_config_test.py + +diff --git a/dirsrvtests/tests/suites/logging/__init__.py b/dirsrvtests/tests/suites/logging/__init__.py +new file mode 100644 +index 000000000..7f812e357 +--- /dev/null ++++ b/dirsrvtests/tests/suites/logging/__init__.py +@@ -0,0 +1,3 @@ ++""" ++ :Requirement: 389-ds-base: Directory Server Logging Configurations ++""" +diff --git a/dirsrvtests/tests/suites/logging/logging_config_test.py b/dirsrvtests/tests/suites/logging/logging_config_test.py +new file mode 100644 +index 000000000..4d8d68ab5 +--- /dev/null ++++ b/dirsrvtests/tests/suites/logging/logging_config_test.py +@@ -0,0 +1,86 @@ ++import logging ++import pytest ++import os ++import ldap ++from lib389._constants import * ++from lib389.topologies import topology_st as topo ++ ++DEBUGGING = os.getenv("DEBUGGING", default=False) ++if DEBUGGING: ++ logging.getLogger(__name__).setLevel(logging.DEBUG) ++else: ++ logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ ++big_value = "1111111111111111111111111111111111111111111" ++ ++ ++@pytest.mark.parametrize("attr, invalid_vals, valid_vals", ++ [ ++ ("logexpirationtime", ["-2", "0"], ["1", "-1"]), ++ ("maxlogsize", ["-2", "0"], ["100", "-1"]), ++ ("logmaxdiskspace", ["-2", "0"], ["100", "-1"]), ++ ("logminfreediskspace", ["-2", "0"], ["100", "-1"]), ++ ("mode", ["888", "778", "77", "7777"], ["777", "000", "600"]), ++ ("maxlogsperdir", ["-1", "0"], ["1", "20"]), ++ ("logrotationsynchour", ["-1", "24"], ["0", "23"]), ++ ("logrotationsyncmin", ["-1", "60"], ["0", "59"]), ++ ("logrotationtime", ["-2", "0"], ["100", "-1"]) ++ ]) ++def test_logging_digit_config(topo, attr, invalid_vals, valid_vals): ++ """Validate logging config settings ++ ++ :id: a0ef30e5-538b-46fa-9762-01a4435a15e9 ++ :setup: Standalone Instance ++ :steps: ++ 1. Test log expiration time ++ 2. Test log max size ++ 3. Test log max disk space ++ 4. Test log min disk space ++ 5. Test log mode ++ 6. Test log max number of logs ++ 7. Test log rotation hour ++ 8. Test log rotation minute ++ 9. Test log rotation time ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ 4. Success ++ 5. Success ++ 6. Success ++ 7. Success ++ 8. Success ++ 9. Success ++ """ ++ ++ accesslog_attr = "nsslapd-accesslog-{}".format(attr) ++ auditlog_attr = "nsslapd-auditlog-{}".format(attr) ++ auditfaillog_attr = "nsslapd-auditfaillog-{}".format(attr) ++ errorlog_attr = "nsslapd-errorlog-{}".format(attr) ++ ++ # Test each log ++ for attr in [accesslog_attr, auditlog_attr, auditfaillog_attr, errorlog_attr]: ++ # Invalid values ++ for invalid_val in invalid_vals: ++ with pytest.raises(ldap.LDAPError): ++ topo.standalone.config.set(attr, invalid_val) ++ ++ # Invalid high value ++ with pytest.raises(ldap.LDAPError): ++ topo.standalone.config.set(attr, big_value) ++ ++ # Non digits ++ with pytest.raises(ldap.LDAPError): ++ topo.standalone.config.set(attr, "abc") ++ ++ # Valid values ++ for valid_val in valid_vals: ++ topo.standalone.config.set(attr, valid_val) ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main(["-s", CURRENT_FILE]) +diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c +index 7dd71541b..2456abf1e 100644 +--- a/ldap/servers/slapd/log.c ++++ b/ldap/servers/slapd/log.c +@@ -817,8 +817,9 @@ log_update_auditfaillogdir(char *pathname, int apply) + int + log_set_mode(const char *attrname, char *value, int logtype, char *errorbuf, int apply) + { +- int v = 0; ++ int64_t v = 0; + int retval = LDAP_SUCCESS; ++ char *endp = NULL; + slapdFrontendConfig_t *fe_cfg = getFrontendConfig(); + + if (NULL == value) { +@@ -833,7 +834,18 @@ log_set_mode(const char *attrname, char *value, int logtype, char *errorbuf, int + return LDAP_SUCCESS; + } + +- v = strtol(value, NULL, 8); ++ errno = 0; ++ v = strtol(value, &endp, 8); ++ if (*endp != '\0' || errno == ERANGE || ++ strlen(value) != 3 || ++ v > 0777 /* octet of 777 511 */ || ++ v < 0) ++ { ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid value \"%s\" for attribute (%s) (%ld), value must be three digits between 000 and 777", ++ value, attrname, v); ++ return LDAP_UNWILLING_TO_PERFORM; ++ } + + switch (logtype) { + case SLAPD_ACCESS_LOG: +@@ -895,9 +907,9 @@ int + log_set_numlogsperdir(const char *attrname, char *numlogs_str, int logtype, char *returntext, int apply) + { + slapdFrontendConfig_t *fe_cfg = getFrontendConfig(); +- ++ char *endp = NULL; + int rv = LDAP_SUCCESS; +- int numlogs; ++ int64_t numlogs; + + if (logtype != SLAPD_ACCESS_LOG && + logtype != SLAPD_ERROR_LOG && +@@ -911,7 +923,14 @@ log_set_numlogsperdir(const char *attrname, char *numlogs_str, int logtype, char + return rv; + } + +- numlogs = atoi(numlogs_str); ++ errno = 0; ++ numlogs = strtol(numlogs_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || numlogs < 1) { ++ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid value \"%s\" for attribute (%s), value must be between 1 and 2147483647", ++ numlogs_str, attrname); ++ return LDAP_UNWILLING_TO_PERFORM; ++ } + + if (numlogs >= 1) { + switch (logtype) { +@@ -960,21 +979,25 @@ int + log_set_logsize(const char *attrname, char *logsize_str, int logtype, char *returntext, int apply) + { + int rv = LDAP_SUCCESS; +- PRInt64 max_logsize; /* in bytes */ +- int logsize; /* in megabytes */ ++ int64_t max_logsize; /* in bytes */ ++ int64_t logsize; /* in megabytes */ ++ char *endp = NULL; + slapdFrontendConfig_t *fe_cfg = getFrontendConfig(); + + if (!apply || !logsize_str || !*logsize_str) + return rv; + +- logsize = atoi(logsize_str); ++ errno = 0; ++ logsize = strtol(logsize_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || logsize < -1 || logsize == 0) { ++ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid value \"%s\" for attribute (%s), value must be \"-1\" or greater than 0", ++ logsize_str, attrname); ++ return LDAP_UNWILLING_TO_PERFORM; ++ } + + /* convert it to bytes */ +- max_logsize = (PRInt64)logsize * LOG_MB_IN_BYTES; +- +- if (max_logsize <= 0) { +- max_logsize = -1; +- } ++ max_logsize = logsize * LOG_MB_IN_BYTES; + + switch (logtype) { + case SLAPD_ACCESS_LOG: +@@ -1101,8 +1124,9 @@ log_set_rotationsync_enabled(const char *attrname, char *value, int logtype, cha + int + log_set_rotationsynchour(const char *attrname, char *rhour_str, int logtype, char *returntext, int apply) + { +- int rhour = -1; ++ int64_t rhour = -1; + int rv = LDAP_SUCCESS; ++ char *endp = NULL; + slapdFrontendConfig_t *fe_cfg = getFrontendConfig(); + + if (logtype != SLAPD_ACCESS_LOG && +@@ -1115,12 +1139,19 @@ log_set_rotationsynchour(const char *attrname, char *rhour_str, int logtype, cha + } + + /* return if we aren't doing this for real */ +- if (!apply) { ++ if (!apply || !rhour_str || !*rhour_str) { + return rv; + } + +- if (rhour_str && *rhour_str != '\0') +- rhour = atol(rhour_str); ++ errno = 0; ++ rhour = strtol(rhour_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || rhour < 0 || rhour > 23) { ++ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid value \"%s\" for attribute (%s), value must be \"0\" thru \"23\"", ++ rhour_str, attrname); ++ return LDAP_UNWILLING_TO_PERFORM; ++ } ++ + if (rhour > 23) + rhour = rhour % 24; + +@@ -1161,8 +1192,9 @@ log_set_rotationsynchour(const char *attrname, char *rhour_str, int logtype, cha + int + log_set_rotationsyncmin(const char *attrname, char *rmin_str, int logtype, char *returntext, int apply) + { +- int rmin = -1; ++ int64_t rmin = -1; + int rv = LDAP_SUCCESS; ++ char *endp = NULL; + slapdFrontendConfig_t *fe_cfg = getFrontendConfig(); + + if (logtype != SLAPD_ACCESS_LOG && +@@ -1175,14 +1207,18 @@ log_set_rotationsyncmin(const char *attrname, char *rmin_str, int logtype, char + } + + /* return if we aren't doing this for real */ +- if (!apply) { ++ if (!apply || !rmin_str || !*rmin_str) { + return rv; + } + +- if (rmin_str && *rmin_str != '\0') +- rmin = atol(rmin_str); +- if (rmin > 59) +- rmin = rmin % 60; ++ errno = 0; ++ rmin = strtol(rmin_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || rmin < 0 || rmin > 59) { ++ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid value \"%s\" for attribute (%s), value must be between \"0\" and \"59\"", ++ rmin_str, attrname); ++ return LDAP_UNWILLING_TO_PERFORM; ++ } + + switch (logtype) { + case SLAPD_ACCESS_LOG: +@@ -1229,8 +1265,9 @@ log_set_rotationtime(const char *attrname, char *rtime_str, int logtype, char *r + { + + int runit = 0; +- int value, rtime; ++ int64_t value, rtime; + int rv = LDAP_SUCCESS; ++ char *endp = NULL; + slapdFrontendConfig_t *fe_cfg = getFrontendConfig(); + + if (logtype != SLAPD_ACCESS_LOG && +@@ -1247,7 +1284,14 @@ log_set_rotationtime(const char *attrname, char *rtime_str, int logtype, char *r + return rv; + } + +- rtime = atoi(rtime_str); ++ errno = 0; ++ rtime = strtol(rtime_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || rtime < -1 || rtime == 0) { ++ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid value \"%s\" for attribute (%s), value must be \"-1\" or greater than \"0\"", ++ rtime_str, attrname); ++ return LDAP_UNWILLING_TO_PERFORM; ++ } + + if (0 == rtime) { + rtime = -1; /* Value Range: -1 | 1 to PR_INT32_MAX */ +@@ -1332,7 +1376,6 @@ log_set_rotationtimeunit(const char *attrname, char *runit, int logtype, char *e + int origvalue = 0, value = 0; + int runitType; + int rv = 0; +- + slapdFrontendConfig_t *fe_cfg = getFrontendConfig(); + + if (logtype != SLAPD_ACCESS_LOG && +@@ -1448,10 +1491,10 @@ int + log_set_maxdiskspace(const char *attrname, char *maxdiskspace_str, int logtype, char *errorbuf, int apply) + { + int rv = 0; +- PRInt64 mlogsize = 0; /* in bytes */ +- PRInt64 maxdiskspace; /* in bytes */ +- int s_maxdiskspace; /* in megabytes */ +- ++ int64_t mlogsize = 0; /* in bytes */ ++ int64_t maxdiskspace; /* in bytes */ ++ int64_t s_maxdiskspace; /* in megabytes */ ++ char *endp = NULL; + slapdFrontendConfig_t *fe_cfg = getFrontendConfig(); + + if (logtype != SLAPD_ACCESS_LOG && +@@ -1465,7 +1508,14 @@ log_set_maxdiskspace(const char *attrname, char *maxdiskspace_str, int logtype, + if (!apply || !maxdiskspace_str || !*maxdiskspace_str) + return rv; + +- s_maxdiskspace = atoi(maxdiskspace_str); ++ errno = 0; ++ s_maxdiskspace = strtol(maxdiskspace_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || s_maxdiskspace < -1 || s_maxdiskspace == 0) { ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid value \"%s\" for attribute (%s), value must be \"-1\" or greater than 0", ++ maxdiskspace_str, attrname); ++ return LDAP_UNWILLING_TO_PERFORM; ++ } + + /* Disk space are in MB but store in bytes */ + switch (logtype) { +@@ -1538,9 +1588,9 @@ int + log_set_mindiskspace(const char *attrname, char *minfreespace_str, int logtype, char *errorbuf, int apply) + { + int rv = LDAP_SUCCESS; +- int minfreespace; /* in megabytes */ +- PRInt64 minfreespaceB; /* in bytes */ +- ++ int64_t minfreespace; /* in megabytes */ ++ int64_t minfreespaceB; /* in bytes */ ++ char *endp = NULL; + slapdFrontendConfig_t *fe_cfg = getFrontendConfig(); + + if (logtype != SLAPD_ACCESS_LOG && +@@ -1556,11 +1606,18 @@ log_set_mindiskspace(const char *attrname, char *minfreespace_str, int logtype, + return rv; + } + +- minfreespace = atoi(minfreespace_str); ++ errno = 0; ++ minfreespace = strtol(minfreespace_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || minfreespace < -1 || minfreespace == 0) { ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid value \"%s\" for attribute (%s), value must be \"-1\" or greater than 0", ++ minfreespace_str, attrname); ++ return LDAP_UNWILLING_TO_PERFORM; ++ } + + /* Disk space are in MB but store in bytes */ + if (minfreespace >= 1) { +- minfreespaceB = (PRInt64)minfreespace * LOG_MB_IN_BYTES; ++ minfreespaceB = minfreespace * LOG_MB_IN_BYTES; + switch (logtype) { + case SLAPD_ACCESS_LOG: + LOG_ACCESS_LOCK_WRITE(); +@@ -1602,10 +1659,10 @@ log_set_mindiskspace(const char *attrname, char *minfreespace_str, int logtype, + int + log_set_expirationtime(const char *attrname, char *exptime_str, int logtype, char *errorbuf, int apply) + { +- +- int eunit, value, exptime; ++ int64_t eunit, value, exptime; + int rsec = 0; + int rv = 0; ++ char *endp = NULL; + slapdFrontendConfig_t *fe_cfg = getFrontendConfig(); + + if (logtype != SLAPD_ACCESS_LOG && +@@ -1621,7 +1678,14 @@ log_set_expirationtime(const char *attrname, char *exptime_str, int logtype, cha + return rv; + } + +- exptime = atoi(exptime_str); /* <= 0: no exptime */ ++ errno = 0; ++ exptime = strtol(exptime_str, &endp, 10); ++ if (*endp != '\0' || errno == ERANGE || exptime < -1 || exptime == 0) { ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Invalid value \"%s\" for attribute (%s), value must be \"-1\" or greater than 0", ++ exptime_str, attrname); ++ return LDAP_UNWILLING_TO_PERFORM; ++ } + + switch (logtype) { + case SLAPD_ACCESS_LOG: +@@ -1734,7 +1798,6 @@ log_set_expirationtimeunit(const char *attrname, char *expunit, int logtype, cha + } else { + slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "%s: invalid time unit \"%s\"", attrname, expunit); + rv = LDAP_OPERATIONS_ERROR; +- ; + } + + /* return if we aren't doing this for real */ +-- +2.17.2 + diff --git a/SOURCES/0028-BZ1518320-entry-cache-crash-fix.patch b/SOURCES/0028-BZ1518320-entry-cache-crash-fix.patch deleted file mode 100644 index f780d25..0000000 --- a/SOURCES/0028-BZ1518320-entry-cache-crash-fix.patch +++ /dev/null @@ -1,50 +0,0 @@ -From 12672d5bad1b1bbb71a281d33533b1829ab65fbc Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Fri, 21 Jun 2019 10:15:19 -0400 -Subject: [PATCH 2/2] BZ1518320 - entry cache crash fix - -Description: Fix cherry-pick error ---- - ldap/servers/slapd/time.c | 26 -------------------------- - 1 file changed, 26 deletions(-) - -diff --git a/ldap/servers/slapd/time.c b/ldap/servers/slapd/time.c -index 2a3865858..584bd1e63 100644 ---- a/ldap/servers/slapd/time.c -+++ b/ldap/servers/slapd/time.c -@@ -96,32 +96,6 @@ slapi_current_utc_time_hr(void) - return ltnow; - } - --struct timespec --slapi_current_rel_time_hr(void) --{ -- struct timespec now; -- clock_gettime(CLOCK_MONOTONIC, &now); -- return now; --} -- --void --slapi_timespec_diff(struct timespec *a, struct timespec *b, struct timespec *diff) --{ -- /* Now diff the two */ -- time_t sec = a->tv_sec - b->tv_sec; -- int32_t nsec = a->tv_nsec - b->tv_nsec; -- -- if (nsec < 0) { -- /* It's negative so take one second */ -- sec -= 1; -- /* And set nsec to to a whole value */ -- nsec = 1000000000 - nsec; -- } -- -- diff->tv_sec = sec; -- diff->tv_nsec = nsec; --} -- - time_t - slapi_current_utc_time(void) - { --- -2.21.0 - diff --git a/SOURCES/0028-Issue-49754-Instance-created-using-dscreate-can-t-be.patch b/SOURCES/0028-Issue-49754-Instance-created-using-dscreate-can-t-be.patch new file mode 100644 index 0000000..2576667 --- /dev/null +++ b/SOURCES/0028-Issue-49754-Instance-created-using-dscreate-can-t-be.patch @@ -0,0 +1,36 @@ +From 1e31c091f4a62d09fe8e2fbd1fda2d67ccbe8508 Mon Sep 17 00:00:00 2001 +From: Viktor Ashirov +Date: Thu, 23 May 2019 15:38:16 +0200 +Subject: [PATCH] Issue 49754 - Instance created using dscreate can't be + upgraded + +Bug Description: +On 1.3.9 remove_ds_instance() is destructive and deletes +/usr/lib64/dirsrv together with perl modules and 389-ds-base libs. + +Fix Description: +Backport defaults.inf changes for inst_dir + +Relates: https://pagure.io/389-ds-base/issue/49754 + +Reviewed by: mreynolds (Thanks!) +--- + ldap/admin/src/defaults.inf.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/admin/src/defaults.inf.in b/ldap/admin/src/defaults.inf.in +index 8565ff7a0..eff88f238 100644 +--- a/ldap/admin/src/defaults.inf.in ++++ b/ldap/admin/src/defaults.inf.in +@@ -35,7 +35,7 @@ config_dir = @instconfigdir@/slapd-{instance_name} + local_state_dir = @localstatedir@ + run_dir = @localstatedir@/run/dirsrv + pid_file = @localstatedir@/run/dirsrv/slapd-{instance_name}.pid +-inst_dir = @serverdir@ ++inst_dir = @serverdir@/slapd-{instance_name} + plugin_dir = @serverplugindir@ + system_schema_dir = @systemschemadir@ + +-- +2.17.2 + diff --git a/SOURCES/0029-BZ1518320-entry-cache-crash-fix-cherry-pick-error.patch b/SOURCES/0029-BZ1518320-entry-cache-crash-fix-cherry-pick-error.patch deleted file mode 100644 index 7ce05a4..0000000 --- a/SOURCES/0029-BZ1518320-entry-cache-crash-fix-cherry-pick-error.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 3edb83ca607bdf091cf46035b8e09e10b781f48a Mon Sep 17 00:00:00 2001 -From: Mark Reynolds -Date: Fri, 21 Jun 2019 11:40:57 -0400 -Subject: [PATCH] BZ1518320 - entry cache crash fix cherry-pick error - ---- - configure.ac | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/configure.ac b/configure.ac -index ea528ff2b..91d6d398b 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -72,6 +72,9 @@ AC_FUNC_STRFTIME - AC_FUNC_VPRINTF - AC_CHECK_FUNCS([endpwent ftruncate getcwd gethostbyname inet_ntoa localtime_r memmove memset mkdir munmap putenv rmdir setrlimit socket strcasecmp strchr strcspn strdup strerror strncasecmp strpbrk strrchr strstr strtol tzset]) - -+# These functions are *required* without option. -+AC_CHECK_FUNCS([clock_gettime], [], AC_MSG_ERROR([unable to locate required symbol clock_gettime])) -+ - # This will detect if we need to add the LIBADD_DL value for us. - LT_LIB_DLLOAD - --- -2.21.0 - diff --git a/SOURCES/0029-Ticket-50413-ds-replcheck-Always-display-the-Result-.patch b/SOURCES/0029-Ticket-50413-ds-replcheck-Always-display-the-Result-.patch new file mode 100644 index 0000000..094e1b3 --- /dev/null +++ b/SOURCES/0029-Ticket-50413-ds-replcheck-Always-display-the-Result-.patch @@ -0,0 +1,100 @@ +From 62173da703e1c0403f8102ad10a6ca9d0e8e7c82 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 30 May 2019 15:38:27 -0400 +Subject: [PATCH] Ticket 50413 - ds-replcheck - Always display the Result + Summary + +Description: Previously we only printed a "Result Summary" if there + were no inconsistencies and the entry counts matched. + However, the entry counts do not need to match. So + this made the "Result Summary" checks too strict, and + if things were out of sync there was no Result Summary + printed at all. This fix just always prints a result + summary and it removes the entry count check. + +https://pagure.io/389-ds-base/issue/50413 + +Reviewed by: ? + +(cherry picked from commit 423a7ba01ed3bad52c8caa6a20267f2335b3c69f) +--- + ldap/admin/src/scripts/ds-replcheck | 29 +++++++++++++++++------------ + 1 file changed, 17 insertions(+), 12 deletions(-) + +diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck +index a9411cb4f..aea32fdb4 100755 +--- a/ldap/admin/src/scripts/ds-replcheck ++++ b/ldap/admin/src/scripts/ds-replcheck +@@ -581,14 +581,14 @@ def do_offline_report(opts, output_file=None): + MLDIF = open(opts['mldif'], "r") + except Exception as e: + print('Failed to open Master LDIF: ' + str(e)) +- return None ++ return + + try: + RLDIF = open(opts['rldif'], "r") + except Exception as e: + print('Failed to open Replica LDIF: ' + str(e)) + MLDIF.close() +- return None ++ return + + # Verify LDIF Files + try: +@@ -598,7 +598,7 @@ def do_offline_report(opts, output_file=None): + print('Master LDIF file in invalid, aborting...') + MLDIF.close() + RLDIF.close() +- return None ++ return + try: + print("Validating Replica ldif file ({})...".format(opts['rldif'])) + LDIFRecordList(RLDIF).parse() +@@ -606,7 +606,7 @@ def do_offline_report(opts, output_file=None): + print('Replica LDIF file is invalid, aborting...') + MLDIF.close() + RLDIF.close() +- return None ++ return + + # Get all the dn's, and entry counts + print ("Gathering all the DN's...") +@@ -758,10 +758,13 @@ def do_offline_report(opts, output_file=None): + final_report += ('=====================================================\n\n') + for diff in diff_report: + final_report += ('%s\n' % (diff)) +- if missing_report == "" and len(diff_report) == 0 and m_count == r_count: +- final_report += ('\nResult\n') +- final_report += ('=====================================================\n\n') +- final_report += ('No differences between Master and Replica\n') ++ ++ final_report += ('\nResult\n') ++ final_report += ('=====================================================\n\n') ++ if missing_report == "" and len(diff_report) == 0: ++ final_report += ('No replication differences between Master and Replica\n') ++ else: ++ final_report += ('There are replication differences between Master and Replica\n') + + if output_file: + output_file.write(final_report) +@@ -1009,10 +1012,12 @@ def print_online_report(report, opts, output_file): + for diff in report['diff']: + final_report += ('%s\n' % (diff)) + +- if not missing and len(report['diff']) == 0 and report['m_count'] == report['r_count']: +- final_report += ('\nResult\n') +- final_report += ('=====================================================\n\n') +- final_report += ('No differences between Master and Replica\n') ++ final_report += ('\nResult\n') ++ final_report += ('=====================================================\n\n') ++ if not missing and len(report['diff']) == 0: ++ final_report += ('No replication differences between Master and Replica\n') ++ else: ++ final_report += ('There are replication differences between Master and Replica\n') + + if output_file: + output_file.write(final_report) +-- +2.17.2 + diff --git a/SOURCES/0030-Ticket-50389-ns-slapd-craches-while-two-threads-are-.patch b/SOURCES/0030-Ticket-50389-ns-slapd-craches-while-two-threads-are-.patch new file mode 100644 index 0000000..db41887 --- /dev/null +++ b/SOURCES/0030-Ticket-50389-ns-slapd-craches-while-two-threads-are-.patch @@ -0,0 +1,82 @@ +From 6b7f87a557170164518d7c3b8e408304f2a9c1f4 Mon Sep 17 00:00:00 2001 +From: Thierry Bordaz +Date: Fri, 17 May 2019 14:31:45 +0200 +Subject: [PATCH] Ticket 50389 - ns-slapd craches while two threads are polling + the same connection + +Bug Description: + nspr IO is not multi-threaded safe. + 389-ds should not be in a situation where several threads are polling + a same connection at the same time. + The scenario is a worker send back an operation result at the same time + another worker wants to read an incoming request. + +Fix Description: + The fix consist in synchonizing polling with c_pdumutex. + + The thread that sends data (flush_ber) hold c_pdumutex. + + The thread that reads the data does a non blocking read. It then + enforce ioblocktimeout with iteration of poll. + The reading thread must hold c_pdumutex during poll to synchronize + with the reader thread. + The reading thread must poll with a small timeout + (CONN_TURBO_TIMEOUT_INTERVAL). In order to not block + the thread that send back data, the fix reduces the delay to 0.1s. + +https://pagure.io/389-ds-base/issue/50389 + +Reviewed by: Mark Reynolds, Matus Honek, William Brown + +Platforms tested: F28 + +Flag Day: no + +Doc impact: no + +(cherry picked from commit 2886ba77f664e4734a7ddfe4146f229caca49ce4) +--- + ldap/servers/slapd/connection.c | 5 ++++- + ldap/servers/slapd/daemon.c | 2 ++ + 2 files changed, 6 insertions(+), 1 deletion(-) + +diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c +index 188383b97..945602f20 100644 +--- a/ldap/servers/slapd/connection.c ++++ b/ldap/servers/slapd/connection.c +@@ -932,7 +932,8 @@ connection_free_private_buffer(Connection *conn) + #define CONN_DONE 3 + #define CONN_TIMEDOUT 4 + +-#define CONN_TURBO_TIMEOUT_INTERVAL 1000 /* milliseconds */ ++#define CONN_TURBO_TIMEOUT_INTERVAL 100 /* milliseconds */ ++#define CONN_TURBO_TIMEOUT_MAXIMUM 5 /* attempts * interval IE 2000ms with 400 * 5 */ + #define CONN_TURBO_CHECK_INTERVAL 5 /* seconds */ + #define CONN_TURBO_PERCENTILE 50 /* proportion of threads allowed to be in turbo mode */ + #define CONN_TURBO_HYSTERESIS 0 /* avoid flip flopping in and out of turbo mode */ +@@ -1207,7 +1208,9 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int * + pr_pd.fd = (PRFileDesc *)conn->c_prfd; + pr_pd.in_flags = PR_POLL_READ; + pr_pd.out_flags = 0; ++ PR_Lock(conn->c_pdumutex); + ret = PR_Poll(&pr_pd, 1, timeout); ++ PR_Unlock(conn->c_pdumutex); + waits_done++; + /* Did we time out ? */ + if (0 == ret) { +diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c +index c77e1f15c..4841a8a5c 100644 +--- a/ldap/servers/slapd/daemon.c ++++ b/ldap/servers/slapd/daemon.c +@@ -1943,6 +1943,8 @@ ns_handle_pr_read_ready(struct ns_job_t *job) + * or something goes seriously wrong. Otherwise, return 0. + * If -1 is returned, PR_GetError() explains why. + * Revision: handle changed to void * to allow 64bit support ++ * ++ * Caller (flush_ber) must hold conn->c_pdumutex + */ + static int + slapd_poll(void *handle, int output) +-- +2.17.2 + diff --git a/SOURCES/0031-Issue-50123-with_tmpfiles_d-is-associated-with-syste.patch b/SOURCES/0031-Issue-50123-with_tmpfiles_d-is-associated-with-syste.patch new file mode 100644 index 0000000..d416959 --- /dev/null +++ b/SOURCES/0031-Issue-50123-with_tmpfiles_d-is-associated-with-syste.patch @@ -0,0 +1,34 @@ +From 957145f661d074aaca1a7aa325fa6943dda80b0c Mon Sep 17 00:00:00 2001 +From: Viktor Ashirov +Date: Thu, 9 May 2019 15:01:22 +0200 +Subject: [PATCH] Issue 50123 - with_tmpfiles_d is associated with systemd + +Bug Description: +Tests fail on 1.3.9 because of the missing tmpfiles_d +in defaults.inf + +Fix Description: +Add tmpfiles_d to defaults.inf + +Fixes https://pagure.io/389-ds-base/issue/50123 + +Reviewed by: mreynolds (Thanks!) +--- + ldap/admin/src/defaults.inf.in | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/ldap/admin/src/defaults.inf.in b/ldap/admin/src/defaults.inf.in +index eff88f238..91486c8e3 100644 +--- a/ldap/admin/src/defaults.inf.in ++++ b/ldap/admin/src/defaults.inf.in +@@ -38,6 +38,7 @@ pid_file = @localstatedir@/run/dirsrv/slapd-{instance_name}.pid + inst_dir = @serverdir@/slapd-{instance_name} + plugin_dir = @serverplugindir@ + system_schema_dir = @systemschemadir@ ++tmpfiles_d = @with_tmpfiles_d@ + + ; These values can be altered in an installation of ds + user = dirsrv +-- +2.17.2 + diff --git a/SOURCES/0032-Issue-50426-nsSSL3Ciphers-is-limited-to-1024-charact.patch b/SOURCES/0032-Issue-50426-nsSSL3Ciphers-is-limited-to-1024-charact.patch new file mode 100644 index 0000000..a6b4931 --- /dev/null +++ b/SOURCES/0032-Issue-50426-nsSSL3Ciphers-is-limited-to-1024-charact.patch @@ -0,0 +1,186 @@ +From c2637e437d5b9fa33410856d2059c2921a90fdac Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Fri, 7 Jun 2019 09:21:31 -0400 +Subject: [PATCH] Issue 50426 - nsSSL3Ciphers is limited to 1024 characters + +Bug Description: There was a hardcoded buffer for processing TLS ciphers. + Anything over 1024 characters was truncated and was not + applied. + +Fix Description: Don't use a fixed size buffer and just use the entire + string. When printing errors about invalid format then + we must use a fixed sized buffer, but we will truncate + that log value as to not exceed the ssl logging function's + buffer, and still output a useful message. + +ASAN approved + +https://pagure.io/389-ds-base/issue/50426 + +Reviewed by: firstyear, tbordaz, and spichugi (Thanks!!!) + +(cherry picked from commit 22f2f9a1502e63bb169b7d599b5a3b35ddb31b8a) +--- + dirsrvtests/tests/suites/tls/cipher_test.py | 51 +++++++++++++++++++++ + ldap/servers/slapd/ssl.c | 34 ++++++-------- + 2 files changed, 66 insertions(+), 19 deletions(-) + create mode 100644 dirsrvtests/tests/suites/tls/cipher_test.py + +diff --git a/dirsrvtests/tests/suites/tls/cipher_test.py b/dirsrvtests/tests/suites/tls/cipher_test.py +new file mode 100644 +index 000000000..058931046 +--- /dev/null ++++ b/dirsrvtests/tests/suites/tls/cipher_test.py +@@ -0,0 +1,51 @@ ++import pytest ++import os ++from lib389.config import Encryption ++from lib389.topologies import topology_st as topo ++ ++ ++def test_long_cipher_list(topo): ++ """Test a long cipher list, and makre sure it is not truncated ++ ++ :id: bc400f54-3966-49c8-b640-abbf4fb2377d ++ :setup: Standalone Instance ++ :steps: ++ 1. Set nsSSL3Ciphers to a very long list of ciphers ++ 2. Ciphers are applied correctly ++ :expectedresults: ++ 1. Success ++ 2. Success ++ """ ++ ENABLED_CIPHER = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384::AES-GCM::AEAD::256" ++ DISABLED_CIPHER = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256::AES-GCM::AEAD::128" ++ CIPHER_LIST = ( ++ "-all,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5," ++ "-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5," ++ "-TLS_RSA_WITH_RC4_128_MD5,-TLS_RSA_WITH_RC4_128_SHA,-TLS_RSA_WITH_3DES_EDE_CBC_SHA," ++ "-TLS_RSA_WITH_DES_CBC_SHA,-SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,-SSL_RSA_FIPS_WITH_DES_CBC_SHA," ++ "-TLS_RSA_EXPORT_WITH_RC4_40_MD5,-TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,-TLS_RSA_WITH_NULL_MD5," ++ "-TLS_RSA_WITH_NULL_SHA,-TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA," ++ "-SSL_FORTEZZA_DMS_WITH_RC4_128_SHA,-SSL_FORTEZZA_DMS_WITH_NULL_SHA,-TLS_DHE_DSS_WITH_DES_CBC_SHA," ++ "-TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,-TLS_DHE_RSA_WITH_DES_CBC_SHA,-TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA," ++ "+TLS_RSA_WITH_AES_128_CBC_SHA,-TLS_DHE_DSS_WITH_AES_128_CBC_SHA,-TLS_DHE_RSA_WITH_AES_128_CBC_SHA," ++ "+TLS_RSA_WITH_AES_256_CBC_SHA,-TLS_DHE_DSS_WITH_AES_256_CBC_SHA,-TLS_DHE_RSA_WITH_AES_256_CBC_SHA," ++ "-TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,-TLS_DHE_DSS_WITH_RC4_128_SHA,-TLS_ECDHE_RSA_WITH_RC4_128_SHA," ++ "-TLS_RSA_WITH_NULL_SHA,-TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5," ++ "-TLS_RSA_WITH_RC4_128_MD5,-TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,-TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA," ++ "-TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,+TLS_AES_128_GCM_SHA256,+TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" ++ ) ++ ++ topo.standalone.enable_tls() ++ enc = Encryption(topo.standalone) ++ enc.set('nsSSL3Ciphers', CIPHER_LIST) ++ topo.standalone.restart() ++ enabled_ciphers = enc.get_attr_vals_utf8('nssslenabledciphers') ++ assert ENABLED_CIPHER in enabled_ciphers ++ assert DISABLED_CIPHER not in enabled_ciphers ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main(["-s", CURRENT_FILE]) +diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c +index b8eba2da4..ed054db44 100644 +--- a/ldap/servers/slapd/ssl.c ++++ b/ldap/servers/slapd/ssl.c +@@ -95,7 +95,6 @@ static char *configDN = "cn=encryption,cn=config"; + #define CIPHER_SET_ALLOWWEAKDHPARAM 0x200 /* allowWeakDhParam is on */ + #define CIPHER_SET_DISALLOWWEAKDHPARAM 0x400 /* allowWeakDhParam is off */ + +- + #define CIPHER_SET_ISDEFAULT(flag) \ + (((flag)&CIPHER_SET_DEFAULT) ? PR_TRUE : PR_FALSE) + #define CIPHER_SET_ISALL(flag) \ +@@ -689,10 +688,12 @@ _conf_setciphers(char *setciphers, int flags) + active = 0; + break; + default: +- PR_snprintf(err, sizeof(err), "invalid ciphers <%s>: format is " +- "+cipher1,-cipher2...", +- raw); +- return slapi_ch_strdup(err); ++ if (strlen(raw) > MAGNUS_ERROR_LEN) { ++ PR_snprintf(err, sizeof(err) - 3, "%s...", raw); ++ return slapi_ch_smprintf("invalid ciphers <%s>: format is +cipher1,-cipher2...", err); ++ } else { ++ return slapi_ch_smprintf("invalid ciphers <%s>: format is +cipher1,-cipher2...", raw); ++ } + } + if ((t = strchr(setciphers, ','))) + *t++ = '\0'; +@@ -1689,7 +1690,6 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) + PRUint16 NSSVersionMax = enabledNSSVersions.max; + char mymin[VERSION_STR_LENGTH], mymax[VERSION_STR_LENGTH]; + char newmax[VERSION_STR_LENGTH]; +- char cipher_string[1024]; + int allowweakcipher = CIPHER_SET_DEFAULTWEAKCIPHER; + int_fast16_t renegotiation = (int_fast16_t)SSL_RENEGOTIATE_REQUIRES_XTN; + +@@ -1730,21 +1730,17 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) + "Ignoring it and set it to default.", val, configDN); + } + } +- slapi_ch_free((void **)&val); ++ slapi_ch_free_string(&val); + + /* Set SSL cipher preferences */ +- *cipher_string = 0; +- if (ciphers && (*ciphers) && PL_strcmp(ciphers, "blank")) +- PL_strncpyz(cipher_string, ciphers, sizeof(cipher_string)); +- slapi_ch_free((void **)&ciphers); +- +- if (NULL != (val = _conf_setciphers(cipher_string, allowweakcipher))) { ++ if (NULL != (val = _conf_setciphers(ciphers, allowweakcipher))) { + errorCode = PR_GetError(); + slapd_SSL_warn("Failed to set SSL cipher " + "preference information: %s (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)", + val, errorCode, slapd_pr_strerror(errorCode)); +- slapi_ch_free((void **)&val); ++ slapi_ch_free_string(&val); + } ++ slapi_ch_free_string(&ciphers); + freeConfigEntry(&e); + + /* Import pr fd into SSL */ +@@ -1815,12 +1811,12 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) + activation = slapi_entry_attr_get_charptr(e, "nssslactivation"); + if ((!activation) || (!PL_strcasecmp(activation, "off"))) { + /* this family was turned off, goto next */ +- slapi_ch_free((void **)&activation); ++ slapi_ch_free_string(&activation); + freeConfigEntry(&e); + continue; + } + +- slapi_ch_free((void **)&activation); ++ slapi_ch_free_string(&activation); + + token = slapi_entry_attr_get_charptr(e, "nsssltoken"); + personality = slapi_entry_attr_get_charptr(e, "nssslpersonalityssl"); +@@ -1837,8 +1833,8 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) + "family information. Missing nsssltoken or" + "nssslpersonalityssl in %s (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)", + *family, errorCode, slapd_pr_strerror(errorCode)); +- slapi_ch_free((void **)&token); +- slapi_ch_free((void **)&personality); ++ slapi_ch_free_string(&token); ++ slapi_ch_free_string(&personality); + freeConfigEntry(&e); + continue; + } +@@ -1865,7 +1861,7 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) + "private key for cert %s of family %s (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)", + cert_name, *family, + errorCode, slapd_pr_strerror(errorCode)); +- slapi_ch_free((void **)&personality); ++ slapi_ch_free_string(&personality); + CERT_DestroyCertificate(cert); + cert = NULL; + freeConfigEntry(&e); +-- +2.17.2 + diff --git a/SOURCES/0033-Ticket-50329-2nd-Possible-Security-Issue-DOS-due-to-.patch b/SOURCES/0033-Ticket-50329-2nd-Possible-Security-Issue-DOS-due-to-.patch new file mode 100644 index 0000000..f9de876 --- /dev/null +++ b/SOURCES/0033-Ticket-50329-2nd-Possible-Security-Issue-DOS-due-to-.patch @@ -0,0 +1,46 @@ +From 335f2a35433a949fc30101655a40940bc6ecec12 Mon Sep 17 00:00:00 2001 +From: Thierry Bordaz +Date: Wed, 15 May 2019 17:46:14 +0200 +Subject: [PATCH] Ticket 50329 - (2nd) Possible Security Issue: DOS due to + ioblocktimeout not applying to TLS + +Bug Description: + A secure socket is configured in blocking mode. If an event + is detected on a secure socket a worker tries to receive the request. + If handshake occurs during the read, it can hang longer than + ioblocktimeout because it takes into account the socket option + rather than the timeout used for the ssl_Recv + +Fix Description: + The fix is specific to secure socket and set this socket option + to do non blocking IO. + +https://pagure.io/389-ds-base/issue/50329 + +Reviewed by: ? + +Platforms tested: F28, RHEL7.6 + +Flag Day: no + +Doc impact: no +--- + ldap/servers/slapd/daemon.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c +index 4841a8a5c..afe0fb737 100644 +--- a/ldap/servers/slapd/daemon.c ++++ b/ldap/servers/slapd/daemon.c +@@ -3193,7 +3193,7 @@ configure_pr_socket(PRFileDesc **pr_socket, int secure, int local) + + if (secure) { + pr_socketoption.option = PR_SockOpt_Nonblocking; +- pr_socketoption.value.non_blocking = 0; ++ pr_socketoption.value.non_blocking = 1; + if (PR_SetSocketOption(*pr_socket, &pr_socketoption) == PR_FAILURE) { + PRErrorCode prerr = PR_GetError(); + slapi_log_err(SLAPI_LOG_ERR, +-- +2.21.0 + diff --git a/SPECS/389-ds-base.spec b/SPECS/389-ds-base.spec index 162cab2..caea924 100644 --- a/SPECS/389-ds-base.spec +++ b/SPECS/389-ds-base.spec @@ -38,8 +38,8 @@ Summary: 389 Directory Server (%{variant}) Name: 389-ds-base -Version: 1.3.8.4 -Release: %{?relprefix}25%{?prerel}.1%{?dist} +Version: 1.3.9.1 +Release: %{?relprefix}10%{?prerel}%{?dist} License: GPLv3+ URL: https://www.port389.org/ Group: System Environment/Daemons @@ -143,39 +143,42 @@ Requires: gperftools-libs %endif Source0: https://releases.pagure.org/389-ds-base/%{name}-%{version}%{?prerel}.tar.bz2 -# 389-ds-git.sh should be used to generate the source tarball from git Source1: %{name}-git.sh Source2: %{name}-devel.README -Patch00: 0000-Ticket-49830-Import-fails-if-backend-name-is-default.patch -Patch01: 0001-Ticket-48818-For-a-replica-bindDNGroup-should-be-fet.patch -Patch02: 0002-Ticket-49546-Fix-issues-with-MIB-file.patch -Patch03: 0003-Ticket-49840-ds-replcheck-command-returns-traceback-.patch -Patch04: 0004-Ticket-49893-disable-nunc-stans-by-default.patch -Patch05: 0005-Ticket-49890-ldapsearch-with-server-side-sort-crashe.patch -Patch06: 0006-Bug-1614820-Crash-in-vslapd_log_emergency_error.patch -Patch07: 0007-Ticket-49932-Crash-in-delete_passwdPolicy-when-persi.patch -Patch08: 0008-Bug-1624004-potential-denial-of-service-attack.patch -Patch09: 0009-Bug-1624004-fix-regression-in-empty-attribute-list.patch -Patch10: 0010-Ticket-49968-Confusing-CRITICAL-message-list_candida.patch -Patch11: 0011-Ticket-49967-entry-cache-corruption-after-failed-MOD.patch -Patch12: 0012-Ticket-49958-extended-search-fail-to-match-entries.patch -Patch13: 0013-Ticket-49915-Master-ns-slapd-had-100-CPU-usage-after.patch -Patch14: 0014-Ticket-49950-PassSync-not-setting-pwdLastSet-attribu.patch -Patch15: 0015-Ticket-49915-fix-compiler-warnings.patch -Patch16: 0016-Ticket-49915-fix-compiler-warnings-2nd.patch -Patch17: 0017-Ticket-49618-Increase-cachememsize-and-dncachememsize.patch -Patch18: 0018-Ticket-50020-during-MODRDN-referential-integrity-can.patch -Patch19: 0019-Ticket-49543-fix-certmap-dn-comparison.patch -Patch20: 0020-Ticket-50117-after-certain-failed-import-operation-i.patch -Patch21: 0021-Ticket-49540-Fix-compiler-warning-in-ldif2ldbm.patch -Patch22: 0022-Ticket-50078-cannot-add-cenotaph-in-read-only-consum.patch -Patch23: 0023-Ticket-50177-import-task-should-not-be-deleted-too-r.patch -Patch24: 0024-Ticket-50396-Crash-in-PAM-plugin-when-user-does-not-.patch -Patch26: 0026-Ticket-50329-2nd-Possible-Security-Issue-DOS-due-to-.patch -Patch25: 0025-Issue-50426-nsSSL3Ciphers-is-limited-to-1024-charact.patch -Patch27: 0027-BZ1518320-entry-cache-crash-fix.patch -Patch28: 0028-BZ1518320-entry-cache-crash-fix.patch -Patch29: 0029-BZ1518320-entry-cache-crash-fix-cherry-pick-error.patch +Patch00: 0000-Ticket-50236-memberOf-should-be-more-robust.patch +Patch01: 0001-Ticket-50238-Failed-modrdn-can-corrupt-entry-cache.patch +Patch02: 0002-Ticket-50232-export-creates-not-importable-ldif-file.patch +Patch03: 0003-Ticket-50234-one-level-search-returns-not-matching-e.patch +Patch04: 0004-Issue-50091-shadowWarning-is-not-generated-if-passwo.patch +Patch05: 0005-Ticket-50091-shadowWarning-is-not-generated-if-passw.patch +Patch06: 0006-Ticket-50260-backend-txn-plugins-can-corrupt-entry-c.patch +Patch07: 0007-Ticket-50077-Do-not-automatically-turn-automember-po.patch +Patch08: 0008-Ticket-50282-OPERATIONS-ERROR-when-trying-to-delete-.patch +Patch09: 0009-Ticket-49561-MEP-plugin-upon-direct-op-failure-will-.patch +Patch10: 0010-Ticket-50260-Invalid-cache-flushing-improvements.patch +Patch11: 0011-Ticket-50265-the-warning-about-skew-time-could-last-.patch +Patch12: 0012-Ticket-50063-Crash-after-attempting-to-restore-a-sin.patch +Patch13: 0013-Ticket-49946-upgrade-of-389-ds-base-could-remove-rep.patch +Patch14: 0014-Ticket-49873-Contention-on-virtual-attribute-lookup.patch +Patch15: 0015-Ticket-49958-extended-search-fail-to-match-entries.patch +Patch16: 0016-Ticket-50028-ds-replcheck-y-option-throws-usage-erro.patch +#Patch17: 0017-Ticket-50329-Possible-Security-Issue-DOS-due-to-iobl.patch +Patch18: 0018-Ticket-49990-Increase-the-default-FD-limits.patch +Patch19: 0019-Ticket-50053-Subtree-password-policy-overrides-a-use.patch +Patch20: 0020-Ticket-49866-Add-passwordSendExpiringTime-to-objectc.patch +Patch21: 0021-Ticket-50013-Log-warn-instead-of-ERR-when-aci-target.patch +Patch22: 0022-Ticket-49997-RFE-ds-replcheck-could-validate-suffix-.patch +Patch23: 0023-Ticket-50363-ds-replcheck-incorrectly-reports-error-.patch +Patch24: 0024-Ticket-50370-CleanAllRUV-task-crashing-during-server.patch +Patch25: 0025-Ticket-50378-ACI-s-with-IPv4-and-IPv6-bind-rules-do-.patch +Patch26: 0026-Ticket-50396-Crash-in-PAM-plugin-when-user-does-not-.patch +Patch27: 0027-Ticket-50393-maxlogsperdir-accepting-negative-values.patch +Patch28: 0028-Issue-49754-Instance-created-using-dscreate-can-t-be.patch +Patch29: 0029-Ticket-50413-ds-replcheck-Always-display-the-Result-.patch +Patch30: 0030-Ticket-50389-ns-slapd-craches-while-two-threads-are-.patch +Patch31: 0031-Issue-50123-with_tmpfiles_d-is-associated-with-syste.patch +Patch32: 0032-Issue-50426-nsSSL3Ciphers-is-limited-to-1024-charact.patch +Patch33: 0033-Ticket-50329-2nd-Possible-Security-Issue-DOS-due-to-.patch %description 389 Directory Server is an LDAPv3 compliant server. The base package includes @@ -340,13 +343,18 @@ fi # Create dirsrv user and group (if needed) USERNAME="dirsrv" +ALLOCATED_UID=389 GROUPNAME="dirsrv" +ALLOCATED_GID=389 HOMEDIR="/usr/share/dirsrv" -if ! getent group $GROUPNAME >/dev/null ; then - /usr/sbin/groupadd -f -r $GROUPNAME -fi + +getent group $GROUPNAME >/dev/null || /usr/sbin/groupadd -f -g $ALLOCATED_GID -r $GROUPNAME if ! getent passwd $USERNAME >/dev/null ; then - /usr/sbin/useradd -r -g $GROUPNAME -d $HOMEDIR -s /sbin/nologin -c "user for 389-ds-base" $USERNAME + if ! getent passwd $ALLOCATED_UID >/dev/null ; then + /usr/sbin/useradd -r -u $ALLOCATED_UID -g $GROUPNAME -d $HOMEDIR -s /sbin/nologin -c "user for 389-ds-base" $USERNAME + else + /usr/sbin/useradd -r -g $GROUPNAME -d $HOMEDIR -s /sbin/nologin -c "user for 389-ds-base" $USERNAME + fi fi # Reload our sysctl before we restart (if we can) @@ -523,43 +531,97 @@ fi %{_sysconfdir}/%{pkgname}/dirsrvtests %changelog -* Wed Jul 3 2019 Mark Reynolds - 1.3.8.4-25.1 -- Bump version to 1.3.8.4-25.1 -- Resolves: Bug 1718689 - dse.ldif strip-off string after 1023 character (missing patch file) - -* Fri Jun 21 2019 Mark Reynolds - 1.3.8.4-25 -- Bump version to 1.3.8.4-25 -- Resolves: Bug 1722828 - referint update should discard any changes if mep update fails -- Resolves: Bug 1718689 - dse.ldif strip-off string after 1023 character -- Resolves: Bug 1719720 - CVE-2019-3883 389-ds-base: DoS via hanging secured connections - -* Wed Jun 19 2019 Mark Reynolds - 1.3.8.4-24 -- Bump version to 1.3.8.4-24 -- Resolves: Bug 1718184 - segfault when using pam passthru and addn plugins together - -* Wed Feb 6 2019 Mark Reynolds - 1.3.8.4-23 -- Bump version to 1.3.8.4-23 -- Resolves: Bug 1672173 - import task should not be deleted after import finishes to be able to query the status -- Resolves: Bug 1672177 - after certain failed import operation, impossible to replay an import operation. -- Resolves: Bug 1672179 - cannot add cenotaph in read only consumer - -* Mon Dec 17 2018 Mark Reynolds - 1.3.8.4-22 -- Bump version to 1.3.8.4-22 -- Resolves: Bug 1660120 - certmap fails when Issuer DN has comma in name - -* Mon Dec 17 2018 Mark Reynolds - 1.3.8.4-21 -- Bump version to 1.3.8.4-21 -- Resolves: Bug 1659510 - during MODRDN referential integrity can fail erronously while updating large groups -- Resolves: Bug 1659936 - After RHEL 7.6 HTB update, unable to set nsslapd-cachememsize (RHDS 10) to custom value - -* Tue Dec 4 2018 Mark Reynolds - 1.3.8.4-20 -- Bump version to 1.3.8.4-20 -- Resolves: Bug 1645197 - Fix compiler warnings - -* Tue Dec 4 2018 Mark Reynolds - 1.3.8.4-19 -- Bump version to 1.3.8.4-19 -- Resolves: Bug 1653820 - PassSync not setting pwdLastSet attribute in Active Directory after Pw update from LDAP sync for normal user -- Resolves: Bug 1645197 - on-line re-initialization hangs +* Thu Jun 13 2019 Mark Reynolds - 1.3.9.1-10 +- Bump version to 1.3.9.1-10 +- Resolves: Bug 1668457 - CVE-2019-3883 389-ds-base: DoS via hanging secured connections + +* Fri Jun 7 2019 Mark Reynolds - 1.3.9.1-9 +- Bump version to 1.3.9.1-9 +- Resolves: Bug 1713361 - Update defaults.inf (fix missing first commit) +- Resolves: Bug 1716267 - dse.ldif strip-off string after 1023 character + +* Wed Jun 5 2019 Mark Reynolds - 1.3.9.1-8 +- Bump version to 1.3.9.1-8 +- Resolves: Bug 1702740 - ns-slapd craches on IPA Servers throughout the customer's topology +- Resolves: Bug 1715091 - ds-replcheck does not always print a Result summary +- Resolves: Bug 1713361 - Update defaults.inf +- Resolves: Bug 1708215 - maxlogsperdir accepting negative values +- Resolves: Bug 1701092 - segfault when using pam passthru and addn plugins together + +* Thu May 16 2019 Mark Reynolds - 1.3.9.1-7 +- Bump version to 1.3.9.1-7 +- Resolves: Bug 1704314 - [abrt] [faf] 389-ds-base: objset_find(): /usr/sbin/ns-slapd killed by 11 +- Resolves: Bug 1629055 - ds-replcheck unreliable, showing false positives +- Resolves: Bug 1710848 - ACI's with IPv4 and IPv6 bind rules do not work for IPv6 clients + +* Fri May 10 2019 Mark Reynolds - 1.3.9.1-6 +- Bump version to 1.3.9.1-6 +- Resolves: Bug 1705125 - ipa-replica-install with 389-ds-base-1.3.9.1-5.el7 +- Resolves: Bug 1643772 - ds-replcheck should validate suffix exists and it's replicated +- Resolves: Bug 1647133 - Log warn instead of ERR when aci target does not exist. +- Resolves: Bug 1639192 - Request to add passwordSendExpiringTime in password policy objectclass + +* Mon Apr 29 2019 Mark Reynolds - 1.3.9.1-5 +- Bump version to 1.3.9.1-5 +- Resolves: Bug 1630513 - Customer requesting -y option for ds-replcheck +- Resolves: Bug 1668457 - CVE-2019-3883 389-ds-base: DoS via hanging secured connections +- Resolves: Bug 1695014 - Clarify the ability to change the NOFILE limit +- Resolves: Bug 1615155 - extended search fails to match entries +- Resolves: Bug 1652984 - Subtree password policy overrides a user-defined password policy + +* Fri Mar 29 2019 Mark Reynolds - 1.3.9.1-4 +- Bump version to 1.3.9.1-4 +- Resolves: Bug 1601241 - ns-slapd - Crash when using bak2db.pl to restore a single database. +- Resolves: Bug 1623935 - upgrade of 389-ds-base could remove replication agreements. +- Resolves: Bug 1627846 - Contention on virtual attribute lookup (parts 2 & 3) + +* Fri Mar 22 2019 Mark Reynolds - 1.3.9.1-3 +- Bump version to 1.3.9.1-3 +- Resolves: Bug 1651279 - The dirsrv user is no longer created with uid 389 +- Resolves: Bug 1542469 - Why does the mep_origination entry toggle from tombstone/not_tombstone +- Resolves: Bug 1689313 - OPERATIONS ERROR when trying to delete a group with automember members +- Resolves: Bug 1673472 - the warning about skew time could last forever. +- Resolves: Bug 1680245 - Error in memberof-plugin cause failures in ipa group-mod +- Resolves: Bug 1482596 - referint update should discard any changes if mep update fails + +* Fri Mar 15 2019 Mark Reynolds - 1.3.9.1-2 +- Bump version to 1.3.9.1-2 +- Resolves: Bug 1417340 - entry cache is not cleaned up if an operation is aborted +- Resolves: Bug 1603140 - Export produces a non-importable ldif file +- Resolves: Bug 1518320 - ns-slapd: crash in entrycache_add_int +- Resolves: Bug 1680245 - Error in memberof-plugin cause failures in ipa group-mod +- Resolves: Bug 1589144 - shadowWarning is not generated if passwordWarning is lower than 86400 seconds (1 day) + +* Wed Feb 20 2019 Mark Reynolds - 1.3.9.1-1 +* Bump version to 1.3.9.1-1 +- Resolves: Bug 1438144 - [RFE] Include autounmembering feature in IPA +- Resolves: Bug 1645359 - Rebase 389-ds-base in RHEL 7.7 to 1.3.9 +- Resolves: Bug 1561769 - [RFE] revise the "transient error" message in a replication agreement's last update status +- Resolves: Bug 1563999 - Is it possible for Directory server to reject the current password only? +- Resolves: Bug 1597202 - PassSync not setting pwdLastSet attribute in Active Directory after Pw update from LDAP sync for normal user +- Resolves: Bug 1601241 - ns-slapd - Crash when using bak2db.pl to restore a single database +- Resolves: Bug 1629055 - ds-replcheck unreliable, showing false positives +- Resolves: Bug 1630513 - Customer requesting -y option for ds-replcheck +- Resolves: Bug 1639192 - Request to add passwordSendExpiringTime in password policy objectclass +- Resolves: Bug 1643587 - default of "nsslapd-errorlog-maxlogsperdir: 1" causing huge log files +- Resolves: Bug 1643772 - ds-replcheck should validate suffix exists and it's replicated +- Resolves: Bug 1647133 - Log warn instead of ERR when aci target does not exist. +- Resolves: Bug 1653163 - certmap fails when Issuer DN has comma in name +- Resolves: Bug 1602001 - cannot add cenotaph in read only consumer +- Resolves: Bug 1589144 - shadowWarning is not generated if passwordWarning is lower than 86400 seconds (1 day) +- Resolves: Bug 1600631 - In repicated topology a single-valued attribute can diverge +- Resolves: Bug 1626375 - on-line re-initialization hangs +- Resolves: Bug 1627846 - Contention on virtual attribute lookup +- Resolves: Bug 1642838 - after certain failed import operation, impossible to replay an import operation +- Resolves: Bug 1647099 - audit logs does not capture the operation where nsslapd-lookthroughlimit is modified +- Resolves: Bug 1648922 - during MODRDN referential integrity can fail erronously while updating large groups +- Resolves: Bug 1652984 - Subtree password policy overrides a user-defined password policy +- Resolves: Bug 1663829 - import task should not be deleted after import finishes to be able to query the status. +- Resolves: Bug 1623935 - upgrade of 389-ds-base could remove replication agreements +- Resolves: Bug 1622049 - db2bak.pl does not work with LDAPS, but ok with STARTTLS +- Resolves: Bug 1633488 - fixup-memberof.pl -D cn=<> -w - -b <> -P LDAP fails with ldap_start_tls error +- Resolves: Bug 1451460 - error messages during ldif2db after enabling encryption on an attribute +- Resolves: Bug 1627512 - After RHEL 7.6 HTB update, unable to set nsslapd-cachememsize (RHDS 10) to custom value * Mon Oct 29 2018 Mark Reynolds - 1.3.8.4-18 - Bump version to 1.3.8.4-18